X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=sidebyside;f=lnet%2Fklnds%2Fsocklnd%2Fsocklnd_cb.c;h=d73314c5d08c3cb8ad837d070c4f72c446cbb39a;hb=0a9c9e444635dcf35a74bfb2f46efb3040ca17a0;hp=024c3069e73fcd72698b4f7a232e6ff8d2b68a17;hpb=b26f39067bd87ad8a9840e03acbf75f3a47f6352;p=fs%2Flustre-release.git diff --git a/lnet/klnds/socklnd/socklnd_cb.c b/lnet/klnds/socklnd/socklnd_cb.c index 024c306..d73314c 100644 --- a/lnet/klnds/socklnd/socklnd_cb.c +++ b/lnet/klnds/socklnd/socklnd_cb.c @@ -27,11 +27,13 @@ #include "socklnd.h" ksock_tx_t * -ksocknal_alloc_tx (int size) +ksocknal_alloc_tx(int type, int size) { ksock_tx_t *tx = NULL; - if (size == KSOCK_NOOP_TX_SIZE) { + if (type == KSOCK_MSG_NOOP) { + LASSERT (size == KSOCK_NOOP_TX_SIZE); + /* searching for a noop tx in free list */ cfs_spin_lock(&ksocknal_data.ksnd_tx_lock); @@ -52,18 +54,47 @@ ksocknal_alloc_tx (int size) return NULL; cfs_atomic_set(&tx->tx_refcount, 1); - tx->tx_desc_size = size; + tx->tx_zc_capable = 0; + tx->tx_zc_checked = 0; + tx->tx_desc_size = size; + cfs_atomic_inc(&ksocknal_data.ksnd_nactive_txs); return tx; } +ksock_tx_t * +ksocknal_alloc_tx_noop(__u64 cookie, int nonblk) +{ + ksock_tx_t *tx; + + tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE); + if (tx == NULL) { + CERROR("Can't allocate noop tx desc\n"); + return NULL; + } + + tx->tx_conn = NULL; + tx->tx_lnetmsg = NULL; + tx->tx_kiov = NULL; + tx->tx_nkiov = 0; + tx->tx_iov = tx->tx_frags.virt.iov; + tx->tx_niov = 1; + tx->tx_nonblk = nonblk; + + socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP); + tx->tx_msg.ksm_zc_cookies[1] = cookie; + + return tx; +} + + void ksocknal_free_tx (ksock_tx_t *tx) { cfs_atomic_dec(&ksocknal_data.ksnd_nactive_txs); - if (tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) { + if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) { /* it's a noop tx */ cfs_spin_lock(&ksocknal_data.ksnd_tx_lock); @@ -75,15 +106,6 @@ ksocknal_free_tx (ksock_tx_t *tx) } } -void -ksocknal_init_msg(ksock_msg_t *msg, int type) -{ - msg->ksm_type = type; - msg->ksm_csum = 0; - msg->ksm_zc_req_cookie = 0; - msg->ksm_zc_ack_cookie = 0; -} - int ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) { @@ -396,7 +418,7 @@ ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error) le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type), le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length), libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)), - libcfs_nid2str(le64_to_cpu (tx->tx_lnetmsg->msg_hdr.dest_nid))); + libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid))); } else if (error) { CDEBUG (D_NETERROR, "Deleting noop packet\n"); } @@ -413,32 +435,24 @@ ksocknal_check_zc_req(ksock_tx_t *tx) { ksock_conn_t *conn = tx->tx_conn; ksock_peer_t *peer = conn->ksnc_peer; - lnet_kiov_t *kiov = tx->tx_kiov; - int nkiov = tx->tx_nkiov; - /* Set tx_msg.ksm_zc_req_cookie to a unique non-zero cookie and add tx + /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx * to ksnp_zc_req_list if some fragment of this message should be sent * zero-copy. Our peer will send an ACK containing this cookie when * she has received this message to tell us we can signal completion. - * tx_msg.ksm_zc_req_cookie remains non-zero while tx is on + * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on * ksnp_zc_req_list. */ + LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); + LASSERT (tx->tx_zc_capable); - if (conn->ksnc_proto != &ksocknal_protocol_v2x || - !conn->ksnc_zc_capable) - return; + tx->tx_zc_checked = 1; - while (nkiov > 0) { - if (kiov->kiov_len >= *ksocknal_tunables.ksnd_zc_min_frag) - break; - --nkiov; - ++kiov; - } - - if (nkiov == 0) + if (conn->ksnc_proto == &ksocknal_protocol_v1x || + !conn->ksnc_zc_capable) return; /* assign cookie and queue tx to pending list, it will be released when - * a matching ack is received. See ksocknal_handle_zc_ack() */ + * a matching ack is received. See ksocknal_handle_zcack() */ ksocknal_tx_addref(tx); @@ -448,27 +462,37 @@ ksocknal_check_zc_req(ksock_tx_t *tx) tx->tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); - LASSERT (tx->tx_msg.ksm_zc_req_cookie == 0); - tx->tx_msg.ksm_zc_req_cookie = peer->ksnp_zc_next_cookie++; + LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0); + + tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++; + + if (peer->ksnp_zc_next_cookie == 0) + peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1; + list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list); cfs_spin_unlock(&peer->ksnp_lock); } static void -ksocknal_unzc_req(ksock_tx_t *tx) +ksocknal_uncheck_zc_req(ksock_tx_t *tx) { ksock_peer_t *peer = tx->tx_conn->ksnc_peer; + LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); + LASSERT (tx->tx_zc_capable); + + tx->tx_zc_checked = 0; + cfs_spin_lock(&peer->ksnp_lock); - if (tx->tx_msg.ksm_zc_req_cookie == 0) { + if (tx->tx_msg.ksm_zc_cookies[0] == 0) { /* Not waiting for an ACK */ cfs_spin_unlock(&peer->ksnp_lock); return; } - tx->tx_msg.ksm_zc_req_cookie = 0; + tx->tx_msg.ksm_zc_cookies[0] = 0; list_del(&tx->tx_zc_list); cfs_spin_unlock(&peer->ksnp_lock); @@ -481,10 +505,8 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) { int rc; - if (!tx->tx_checked_zc) { - tx->tx_checked_zc = 1; + if (tx->tx_zc_capable && !tx->tx_zc_checked) ksocknal_check_zc_req(tx); - } rc = ksocknal_transmit (conn, tx); @@ -548,7 +570,8 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) conn->ksnc_port); } - ksocknal_unzc_req(tx); + if (tx->tx_zc_checked) + ksocknal_uncheck_zc_req(tx); /* it's not an error if conn is being closed */ ksocknal_close_conn_and_siblings (conn, @@ -580,123 +603,70 @@ ksocknal_launch_connection_locked (ksock_route_t *route) } ksock_conn_t * -ksocknal_find_conn_locked (int payload_nob, ksock_peer_t *peer) +ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) { struct list_head *tmp; + ksock_conn_t *conn; ksock_conn_t *typed = NULL; - int tnob = 0; ksock_conn_t *fallback = NULL; + int tnob = 0; int fnob = 0; - ksock_conn_t *conn; list_for_each (tmp, &peer->ksnp_conns) { - ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list); - int hdr_nob = 0; -#if SOCKNAL_ROUND_ROBIN - const int nob = 0; -#else + ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list); int nob = cfs_atomic_read(&c->ksnc_tx_nob) + - libcfs_sock_wmem_queued(c->ksnc_sock); -#endif - LASSERT (!c->ksnc_closing); - LASSERT (c->ksnc_proto != NULL); + libcfs_sock_wmem_queued(c->ksnc_sock); + int rc; - if (fallback == NULL || nob < fnob) { - fallback = c; - fnob = nob; - } - - if (!*ksocknal_tunables.ksnd_typed_conns) - continue; + LASSERT (!c->ksnc_closing); + LASSERT (c->ksnc_proto != NULL && + c->ksnc_proto->pro_match_tx != NULL); - if (payload_nob == 0) { - /* noop packet */ - hdr_nob = offsetof(ksock_msg_t, ksm_u); - } else { - /* lnet packet */ - hdr_nob = (c->ksnc_proto == &ksocknal_protocol_v2x)? - sizeof(ksock_msg_t) : sizeof(lnet_hdr_t); - } + rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk); - switch (c->ksnc_type) { + switch (rc) { default: - CERROR("ksnc_type bad: %u\n", c->ksnc_type); LBUG(); - case SOCKLND_CONN_ANY: - break; - case SOCKLND_CONN_BULK_IN: + case SOCKNAL_MATCH_NO: /* protocol rejected the tx */ continue; - case SOCKLND_CONN_BULK_OUT: - if ((hdr_nob + payload_nob) < *ksocknal_tunables.ksnd_min_bulk) - continue; - break; - case SOCKLND_CONN_CONTROL: - if ((hdr_nob + payload_nob) >= *ksocknal_tunables.ksnd_min_bulk) - continue; + + case SOCKNAL_MATCH_YES: /* typed connection */ + if (typed == NULL || tnob > nob || + (tnob == nob && *ksocknal_tunables.ksnd_round_robin && + cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) { + typed = c; + tnob = nob; + } break; - } - if (typed == NULL || nob < tnob) { - typed = c; - tnob = nob; + case SOCKNAL_MATCH_MAY: /* fallback connection */ + if (fallback == NULL || fnob > nob || + (fnob == nob && *ksocknal_tunables.ksnd_round_robin && + cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) { + fallback = c; + fnob = nob; + } + break; } } /* prefer the typed selection */ conn = (typed != NULL) ? typed : fallback; -#if SOCKNAL_ROUND_ROBIN - if (conn != NULL) { - /* round-robin all else being equal */ - list_del (&conn->ksnc_list); - list_add_tail (&conn->ksnc_list, &peer->ksnp_conns); - } -#endif + if (conn != NULL) + conn->ksnc_tx_last_post = cfs_time_current(); + return conn; } void -ksocknal_next_mono_tx(ksock_conn_t *conn) -{ - ksock_tx_t *tx = conn->ksnc_tx_mono; - - /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */ - LASSERT(conn->ksnc_proto == &ksocknal_protocol_v2x); - LASSERT(!list_empty(&conn->ksnc_tx_queue)); - LASSERT(tx != NULL); - - if (tx->tx_list.next == &conn->ksnc_tx_queue) { - /* no more packets queued */ - conn->ksnc_tx_mono = NULL; - } else { - conn->ksnc_tx_mono = list_entry(tx->tx_list.next, ksock_tx_t, tx_list); - LASSERT(conn->ksnc_tx_mono->tx_msg.ksm_type == tx->tx_msg.ksm_type); - } -} - -int -ksocknal_piggyback_zcack(ksock_conn_t *conn, __u64 cookie) +ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx) { - ksock_tx_t *tx = conn->ksnc_tx_mono; - - /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */ - - if (tx == NULL) - return 0; - - if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) { - /* tx is noop zc-ack, can't piggyback zc-ack cookie */ - return 0; - } - - LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET); - LASSERT(tx->tx_msg.ksm_zc_ack_cookie == 0); - - /* piggyback the zc-ack cookie */ - tx->tx_msg.ksm_zc_ack_cookie = cookie; - ksocknal_next_mono_tx(conn); + conn->ksnc_proto->pro_pack(tx); - return 1; + cfs_atomic_add (tx->tx_nob, &conn->ksnc_tx_nob); + ksocknal_conn_addref(conn); /* +1 ref for tx */ + tx->tx_conn = conn; } void @@ -704,7 +674,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) { ksock_sched_t *sched = conn->ksnc_scheduler; ksock_msg_t *msg = &tx->tx_msg; - ksock_tx_t *ztx; + ksock_tx_t *ztx = NULL; int bufnob = 0; /* called holding global lock (read or irq-write) and caller may @@ -713,13 +683,12 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) * ksnc_sock... */ LASSERT(!conn->ksnc_closing); - CDEBUG (D_NET, "Sending to %s ip %d.%d.%d.%d:%d\n", + CDEBUG (D_NET, "Sending to %s ip %d.%d.%d.%d:%d\n", libcfs_id2str(conn->ksnc_peer->ksnp_id), HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port); - tx->tx_checked_zc = 0; - conn->ksnc_proto->pro_pack(tx); + ksocknal_tx_prep(conn, tx); /* Ensure the frags we've been given EXACTLY match the number of * bytes we want to send. Many TCP/IP stacks disregard any total @@ -734,14 +703,10 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) LASSERT (tx->tx_resid == tx->tx_nob); CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n", - tx, (tx->tx_lnetmsg != NULL)? tx->tx_lnetmsg->msg_hdr.type: - KSOCK_MSG_NOOP, + tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type: + KSOCK_MSG_NOOP, tx->tx_nob, tx->tx_niov, tx->tx_nkiov); - cfs_atomic_add (tx->tx_nob, &conn->ksnc_tx_nob); - tx->tx_conn = conn; - ksocknal_conn_addref(conn); /* +1 ref for tx */ - /* * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__ * but they're used inside spinlocks a lot. @@ -757,67 +722,29 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) cfs_mb(); /* order with adding to tx_queue */ } - ztx = NULL; - if (msg->ksm_type == KSOCK_MSG_NOOP) { /* The packet is noop ZC ACK, try to piggyback the ack_cookie * on a normal packet so I don't need to send it */ - LASSERT(msg->ksm_zc_req_cookie == 0); - LASSERT(msg->ksm_zc_ack_cookie != 0); - - if (conn->ksnc_tx_mono != NULL) { - if (ksocknal_piggyback_zcack(conn, msg->ksm_zc_ack_cookie)) { - /* zc-ack cookie is piggybacked */ - cfs_atomic_sub (tx->tx_nob, &conn->ksnc_tx_nob); - ztx = tx; /* Put to freelist later */ - } else { - /* no packet can piggyback zc-ack cookie */ - list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue); - } - } else { - /* It's the first mono-packet */ - conn->ksnc_tx_mono = tx; - list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue); - } + LASSERT (msg->ksm_zc_cookies[1] != 0); + LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL); + + if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0)) + ztx = tx; /* ZC ACK piggybacked on ztx release tx later */ } else { /* It's a normal packet - can it piggback a noop zc-ack that * has been queued already? */ - LASSERT(msg->ksm_zc_ack_cookie == 0); - - if (conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x packet */ - conn->ksnc_tx_mono != NULL) { - if (conn->ksnc_tx_mono->tx_msg.ksm_type == KSOCK_MSG_NOOP) { - /* There is a noop zc-ack can be piggybacked */ - ztx = conn->ksnc_tx_mono; - - msg->ksm_zc_ack_cookie = ztx->tx_msg.ksm_zc_ack_cookie; - ksocknal_next_mono_tx(conn); - - /* use tx to replace the noop zc-ack packet, ztx will - * be put to freelist later */ - list_add(&tx->tx_list, &ztx->tx_list); - list_del(&ztx->tx_list); - - cfs_atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob); - } else { - /* no noop zc-ack packet, just enqueue it */ - LASSERT(conn->ksnc_tx_mono->tx_msg.ksm_type == KSOCK_MSG_LNET); - list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue); - } + LASSERT (msg->ksm_zc_cookies[1] == 0); + LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL); - } else if (conn->ksnc_proto == &ksocknal_protocol_v2x) { - /* it's the first mono-packet, enqueue it */ - conn->ksnc_tx_mono = tx; - list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue); - } else { - /* V1.x packet, just enqueue it */ - list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue); - } + ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx); + /* ztx will be released later */ } - if (ztx != NULL) + if (ztx != NULL) { + cfs_atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob); list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs); + } if (conn->ksnc_tx_ready && /* able to send */ !conn->ksnc_tx_scheduled) { /* not scheduled to send */ @@ -832,6 +759,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) cfs_spin_unlock_bh (&sched->kss_lock); } + ksock_route_t * ksocknal_find_connectable_route_locked (ksock_peer_t *peer) { @@ -891,17 +819,15 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) int rc; LASSERT (tx->tx_conn == NULL); - LASSERT (tx->tx_lnetmsg != NULL); g_lock = &ksocknal_data.ksnd_global_lock; for (retry = 0;; retry = 1) { -#if !SOCKNAL_ROUND_ROBIN cfs_read_lock (g_lock); peer = ksocknal_find_peer_locked(ni, id); if (peer != NULL) { if (ksocknal_find_connectable_route_locked(peer) == NULL) { - conn = ksocknal_find_conn_locked (tx->tx_lnetmsg->msg_len, peer); + conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk); if (conn != NULL) { /* I've got no routes that need to be * connecting and I do have an actual @@ -915,7 +841,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) /* I'll need a write lock... */ cfs_read_unlock (g_lock); -#endif + cfs_write_lock_bh (g_lock); peer = ksocknal_find_peer_locked(ni, id); @@ -954,7 +880,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) ksocknal_launch_connection_locked (route); } - conn = ksocknal_find_conn_locked (tx->tx_lnetmsg->msg_len, peer); + conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk); if (conn != NULL) { /* Connection exists; queue message on it */ ksocknal_queue_tx_locked (tx, conn); @@ -967,7 +893,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) /* the message is going to be pinned to the peer */ tx->tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); - + /* Queue the message until a connection is established */ list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue); cfs_write_unlock_bh (g_lock); @@ -1014,7 +940,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) desc_size = offsetof(ksock_tx_t, tx_frags.paged.kiov[payload_niov]); - tx = ksocknal_alloc_tx(desc_size); + tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size); if (tx == NULL) { CERROR("Can't allocate tx desc type %d size %d\n", type, desc_size); @@ -1039,9 +965,12 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov, payload_niov, payload_kiov, payload_offset, payload_nob); + + if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload) + tx->tx_zc_capable = 1; } - ksocknal_init_msg(&tx->tx_msg, KSOCK_MSG_LNET); + socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_LNET); /* The first fragment will be set later in pro_pack */ rc = ksocknal_launch_packet(ni, tx, target); @@ -1096,6 +1025,7 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) switch (conn->ksnc_proto->pro_version) { case KSOCK_PROTO_V2: + case KSOCK_PROTO_V3: conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER; conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space; conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg; @@ -1155,99 +1085,12 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) return (0); } -/* (Sink) handle incoming ZC request from sender */ -static int -ksocknal_handle_zc_req(ksock_peer_t *peer, __u64 cookie) -{ - ksock_conn_t *conn; - ksock_tx_t *tx; - ksock_sched_t *sched; - int rc; - - cfs_read_lock (&ksocknal_data.ksnd_global_lock); - - conn = ksocknal_find_conn_locked (0, peer); - if (conn == NULL) { - cfs_read_unlock (&ksocknal_data.ksnd_global_lock); - CERROR("Can't find connection to send zcack.\n"); - return -ECONNRESET; - } - - sched = conn->ksnc_scheduler; - - cfs_spin_lock_bh (&sched->kss_lock); - rc = ksocknal_piggyback_zcack(conn, cookie); - cfs_spin_unlock_bh (&sched->kss_lock); - - cfs_read_unlock (&ksocknal_data.ksnd_global_lock); - if (rc) { - /* Ack cookie is piggybacked */ - return 0; - } - - tx = ksocknal_alloc_tx(KSOCK_NOOP_TX_SIZE); - if (tx == NULL) { - CERROR("Can't allocate noop tx desc\n"); - return -ENOMEM; - } - - tx->tx_conn = NULL; - tx->tx_lnetmsg = NULL; - tx->tx_kiov = NULL; - tx->tx_nkiov = 0; - tx->tx_iov = tx->tx_frags.virt.iov; - tx->tx_niov = 1; - - ksocknal_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP); - tx->tx_msg.ksm_zc_ack_cookie = cookie; /* incoming cookie */ - - cfs_read_lock (&ksocknal_data.ksnd_global_lock); - - conn = ksocknal_find_conn_locked (0, peer); - if (conn == NULL) { - cfs_read_unlock (&ksocknal_data.ksnd_global_lock); - ksocknal_free_tx(tx); - CERROR("Can't find connection to send zcack.\n"); - return -ECONNRESET; - } - ksocknal_queue_tx_locked(tx, conn); - - cfs_read_unlock (&ksocknal_data.ksnd_global_lock); - - return 0; -} - -/* (Sender) handle ZC_ACK from sink */ -static int -ksocknal_handle_zc_ack(ksock_peer_t *peer, __u64 cookie) -{ - ksock_tx_t *tx; - struct list_head *ctmp; - - cfs_spin_lock(&peer->ksnp_lock); - - list_for_each(ctmp, &peer->ksnp_zc_req_list) { - tx = list_entry (ctmp, ksock_tx_t, tx_zc_list); - if (tx->tx_msg.ksm_zc_req_cookie != cookie) - continue; - - tx->tx_msg.ksm_zc_req_cookie = 0; - list_del(&tx->tx_zc_list); - - cfs_spin_unlock(&peer->ksnp_lock); - - ksocknal_tx_decref(tx); - return 0; - } - cfs_spin_unlock(&peer->ksnp_lock); - - return -EPROTO; -} - int ksocknal_process_receive (ksock_conn_t *conn) { - int rc; + lnet_hdr_t *lhdr; + lnet_process_id_t *id; + int rc; LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0); @@ -1294,8 +1137,8 @@ ksocknal_process_receive (ksock_conn_t *conn) if (conn->ksnc_flip) { __swab32s(&conn->ksnc_msg.ksm_type); __swab32s(&conn->ksnc_msg.ksm_csum); - __swab64s(&conn->ksnc_msg.ksm_zc_req_cookie); - __swab64s(&conn->ksnc_msg.ksm_zc_ack_cookie); + __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]); + __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]); } if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP && @@ -1320,15 +1163,21 @@ ksocknal_process_receive (ksock_conn_t *conn) return (-EIO); } - if (conn->ksnc_msg.ksm_zc_ack_cookie != 0) { - LASSERT(conn->ksnc_proto == &ksocknal_protocol_v2x); + if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) { + __u64 cookie = 0; + + LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x); + + if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) + cookie = conn->ksnc_msg.ksm_zc_cookies[0]; + + rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie, + conn->ksnc_msg.ksm_zc_cookies[1]); - rc = ksocknal_handle_zc_ack(conn->ksnc_peer, - conn->ksnc_msg.ksm_zc_ack_cookie); if (rc != 0) { - CERROR("%s: Unknown zero copy ACK cookie: "LPU64"\n", + CERROR("%s: Unknown ZC-ACK cookie: "LPU64", "LPU64"\n", libcfs_id2str(conn->ksnc_peer->ksnp_id), - conn->ksnc_msg.ksm_zc_ack_cookie); + cookie, conn->ksnc_msg.ksm_zc_cookies[1]); ksocknal_new_packet(conn, 0); ksocknal_close_conn_and_siblings(conn, -EPROTO); return (rc); @@ -1360,8 +1209,8 @@ ksocknal_process_receive (ksock_conn_t *conn) if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) { /* Userspace peer */ - lnet_process_id_t *id = &conn->ksnc_peer->ksnp_id; - lnet_hdr_t *lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; + lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; + id = &conn->ksnc_peer->ksnp_id; /* Substitute process ID assigned at connection time */ lhdr->src_pid = cpu_to_le32(id->pid); @@ -1405,14 +1254,19 @@ ksocknal_process_receive (ksock_conn_t *conn) rc = -EIO; } - lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc); + if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) { + LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x); + + lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; + id = &conn->ksnc_peer->ksnp_id; - if (rc == 0 && conn->ksnc_msg.ksm_zc_req_cookie != 0) { - LASSERT(conn->ksnc_proto == &ksocknal_protocol_v2x); - rc = ksocknal_handle_zc_req(conn->ksnc_peer, - conn->ksnc_msg.ksm_zc_req_cookie); + rc = conn->ksnc_proto->pro_handle_zcreq(conn, + conn->ksnc_msg.ksm_zc_cookies[0], + le64_to_cpu(lhdr->src_nid) != id->nid); } + lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc); + if (rc != 0) { ksocknal_new_packet(conn, 0); ksocknal_close_conn_and_siblings (conn, rc); @@ -1595,8 +1449,8 @@ int ksocknal_scheduler (void *arg) tx = list_entry(conn->ksnc_tx_queue.next, ksock_tx_t, tx_list); - if (conn->ksnc_tx_mono == tx) - ksocknal_next_mono_tx(conn); + if (conn->ksnc_tx_carrier == tx) + ksocknal_next_tx_carrier(conn); /* dequeue now so empty list => more to send */ list_del(&tx->tx_list); @@ -1732,15 +1586,29 @@ void ksocknal_write_callback (ksock_conn_t *conn) ksock_proto_t * ksocknal_parse_proto_version (ksock_hello_msg_t *hello) { - if ((hello->kshm_magic == LNET_PROTO_MAGIC && - hello->kshm_version == KSOCK_PROTO_V2) || - (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC) && - hello->kshm_version == __swab32(KSOCK_PROTO_V2))) { + __u32 version = 0; + + if (hello->kshm_magic == LNET_PROTO_MAGIC) + version = hello->kshm_version; + else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC)) + version = __swab32(hello->kshm_version); + + if (version != 0) { #if SOCKNAL_VERSION_DEBUG - if (*ksocknal_tunables.ksnd_protocol != 2) + if (*ksocknal_tunables.ksnd_protocol == 1) + return NULL; + + if (*ksocknal_tunables.ksnd_protocol == 2 && + version == KSOCK_PROTO_V3) return NULL; #endif - return &ksocknal_protocol_v2x; + if (version == KSOCK_PROTO_V2) + return &ksocknal_protocol_v2x; + + if (version == KSOCK_PROTO_V3) + return &ksocknal_protocol_v3x; + + return NULL; } if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) { @@ -1757,332 +1625,6 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello) return NULL; } -static int -ksocknal_send_hello_v1 (ksock_conn_t *conn, ksock_hello_msg_t *hello) -{ - cfs_socket_t *sock = conn->ksnc_sock; - lnet_hdr_t *hdr; - lnet_magicversion_t *hmv; - int rc; - int i; - - CLASSERT(sizeof(lnet_magicversion_t) == offsetof(lnet_hdr_t, src_nid)); - - LIBCFS_ALLOC(hdr, sizeof(*hdr)); - if (hdr == NULL) { - CERROR("Can't allocate lnet_hdr_t\n"); - return -ENOMEM; - } - - hmv = (lnet_magicversion_t *)&hdr->dest_nid; - - /* Re-organize V2.x message header to V1.x (lnet_hdr_t) - * header and send out */ - hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC); - hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR); - hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR); - - if (the_lnet.ln_testprotocompat != 0) { - /* single-shot proto check */ - LNET_LOCK(); - if ((the_lnet.ln_testprotocompat & 1) != 0) { - hmv->version_major++; /* just different! */ - the_lnet.ln_testprotocompat &= ~1; - } - if ((the_lnet.ln_testprotocompat & 2) != 0) { - hmv->magic = LNET_PROTO_MAGIC; - the_lnet.ln_testprotocompat &= ~2; - } - LNET_UNLOCK(); - } - - hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid); - hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid); - hdr->type = cpu_to_le32 (LNET_MSG_HELLO); - hdr->payload_length = cpu_to_le32 (hello->kshm_nips * sizeof(__u32)); - hdr->msg.hello.type = cpu_to_le32 (hello->kshm_ctype); - hdr->msg.hello.incarnation = cpu_to_le64 (hello->kshm_src_incarnation); - - rc = libcfs_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout()); - - if (rc != 0) { - CDEBUG (D_NETERROR, "Error %d sending HELLO hdr to %u.%u.%u.%u/%d\n", - rc, HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port); - goto out; - } - - if (hello->kshm_nips == 0) - goto out; - - for (i = 0; i < (int) hello->kshm_nips; i++) { - hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]); - } - - rc = libcfs_sock_write(sock, hello->kshm_ips, - hello->kshm_nips * sizeof(__u32), - lnet_acceptor_timeout()); - if (rc != 0) { - CDEBUG (D_NETERROR, "Error %d sending HELLO payload (%d)" - " to %u.%u.%u.%u/%d\n", rc, hello->kshm_nips, - HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port); - } -out: - LIBCFS_FREE(hdr, sizeof(*hdr)); - - return rc; -} - -static int -ksocknal_send_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello) -{ - cfs_socket_t *sock = conn->ksnc_sock; - int rc; - - hello->kshm_magic = LNET_PROTO_MAGIC; - hello->kshm_version = KSOCK_PROTO_V2; - - if (the_lnet.ln_testprotocompat != 0) { - /* single-shot proto check */ - LNET_LOCK(); - if ((the_lnet.ln_testprotocompat & 1) != 0) { - hello->kshm_version++; /* just different! */ - the_lnet.ln_testprotocompat &= ~1; - } - LNET_UNLOCK(); - } - - rc = libcfs_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips), - lnet_acceptor_timeout()); - - if (rc != 0) { - CDEBUG (D_NETERROR, "Error %d sending HELLO hdr to %u.%u.%u.%u/%d\n", - rc, HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port); - return rc; - } - - if (hello->kshm_nips == 0) - return 0; - - rc = libcfs_sock_write(sock, hello->kshm_ips, - hello->kshm_nips * sizeof(__u32), - lnet_acceptor_timeout()); - if (rc != 0) { - CDEBUG (D_NETERROR, "Error %d sending HELLO payload (%d)" - " to %u.%u.%u.%u/%d\n", rc, hello->kshm_nips, - HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port); - } - - return rc; -} - -static int -ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,int timeout) -{ - cfs_socket_t *sock = conn->ksnc_sock; - lnet_hdr_t *hdr; - int rc; - int i; - - LIBCFS_ALLOC(hdr, sizeof(*hdr)); - if (hdr == NULL) { - CERROR("Can't allocate lnet_hdr_t\n"); - return -ENOMEM; - } - - rc = libcfs_sock_read(sock, &hdr->src_nid, - sizeof (*hdr) - offsetof (lnet_hdr_t, src_nid), - timeout); - if (rc != 0) { - CERROR ("Error %d reading rest of HELLO hdr from %u.%u.%u.%u\n", - rc, HIPQUAD(conn->ksnc_ipaddr)); - LASSERT (rc < 0 && rc != -EALREADY); - goto out; - } - - /* ...and check we got what we expected */ - if (hdr->type != cpu_to_le32 (LNET_MSG_HELLO)) { - CERROR ("Expecting a HELLO hdr," - " but got type %d from %u.%u.%u.%u\n", - le32_to_cpu (hdr->type), - HIPQUAD(conn->ksnc_ipaddr)); - rc = -EPROTO; - goto out; - } - - hello->kshm_src_nid = le64_to_cpu (hdr->src_nid); - hello->kshm_src_pid = le32_to_cpu (hdr->src_pid); - hello->kshm_src_incarnation = le64_to_cpu (hdr->msg.hello.incarnation); - hello->kshm_ctype = le32_to_cpu (hdr->msg.hello.type); - hello->kshm_nips = le32_to_cpu (hdr->payload_length) / - sizeof (__u32); - - if (hello->kshm_nips > LNET_MAX_INTERFACES) { - CERROR("Bad nips %d from ip %u.%u.%u.%u\n", - hello->kshm_nips, HIPQUAD(conn->ksnc_ipaddr)); - rc = -EPROTO; - goto out; - } - - if (hello->kshm_nips == 0) - goto out; - - rc = libcfs_sock_read(sock, hello->kshm_ips, - hello->kshm_nips * sizeof(__u32), timeout); - if (rc != 0) { - CERROR ("Error %d reading IPs from ip %u.%u.%u.%u\n", - rc, HIPQUAD(conn->ksnc_ipaddr)); - LASSERT (rc < 0 && rc != -EALREADY); - goto out; - } - - for (i = 0; i < (int) hello->kshm_nips; i++) { - hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]); - - if (hello->kshm_ips[i] == 0) { - CERROR("Zero IP[%d] from ip %u.%u.%u.%u\n", - i, HIPQUAD(conn->ksnc_ipaddr)); - rc = -EPROTO; - break; - } - } -out: - LIBCFS_FREE(hdr, sizeof(*hdr)); - - return rc; -} - -static int -ksocknal_recv_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout) -{ - cfs_socket_t *sock = conn->ksnc_sock; - int rc; - int i; - - if (hello->kshm_magic == LNET_PROTO_MAGIC) - conn->ksnc_flip = 0; - else - conn->ksnc_flip = 1; - - rc = libcfs_sock_read(sock, &hello->kshm_src_nid, - offsetof(ksock_hello_msg_t, kshm_ips) - - offsetof(ksock_hello_msg_t, kshm_src_nid), - timeout); - if (rc != 0) { - CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n", - rc, HIPQUAD(conn->ksnc_ipaddr)); - LASSERT (rc < 0 && rc != -EALREADY); - return rc; - } - - if (conn->ksnc_flip) { - __swab32s(&hello->kshm_src_pid); - __swab64s(&hello->kshm_src_nid); - __swab32s(&hello->kshm_dst_pid); - __swab64s(&hello->kshm_dst_nid); - __swab64s(&hello->kshm_src_incarnation); - __swab64s(&hello->kshm_dst_incarnation); - __swab32s(&hello->kshm_ctype); - __swab32s(&hello->kshm_nips); - } - - if (hello->kshm_nips > LNET_MAX_INTERFACES) { - CERROR("Bad nips %d from ip %u.%u.%u.%u\n", - hello->kshm_nips, HIPQUAD(conn->ksnc_ipaddr)); - return -EPROTO; - } - - if (hello->kshm_nips == 0) - return 0; - - rc = libcfs_sock_read(sock, hello->kshm_ips, - hello->kshm_nips * sizeof(__u32), timeout); - if (rc != 0) { - CERROR ("Error %d reading IPs from ip %u.%u.%u.%u\n", - rc, HIPQUAD(conn->ksnc_ipaddr)); - LASSERT (rc < 0 && rc != -EALREADY); - return rc; - } - - for (i = 0; i < (int) hello->kshm_nips; i++) { - if (conn->ksnc_flip) - __swab32s(&hello->kshm_ips[i]); - - if (hello->kshm_ips[i] == 0) { - CERROR("Zero IP[%d] from ip %u.%u.%u.%u\n", - i, HIPQUAD(conn->ksnc_ipaddr)); - return -EPROTO; - } - } - - return 0; -} - -static void -ksocknal_pack_msg_v1(ksock_tx_t *tx) -{ - /* V1.x has no KSOCK_MSG_NOOP */ - LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); - LASSERT(tx->tx_lnetmsg != NULL); - - tx->tx_iov[0].iov_base = (void *)&tx->tx_lnetmsg->msg_hdr; - tx->tx_iov[0].iov_len = sizeof(lnet_hdr_t); - - tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t); -} - -static void -ksocknal_pack_msg_v2(ksock_tx_t *tx) -{ - tx->tx_iov[0].iov_base = (void *)&tx->tx_msg; - - if (tx->tx_lnetmsg != NULL) { - LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); - - tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr; - tx->tx_iov[0].iov_len = sizeof(ksock_msg_t); - tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len; - } else { - LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP); - - tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); - tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); - } - /* Don't checksum before start sending, because packet can be piggybacked with ACK */ -} - -static void -ksocknal_unpack_msg_v1(ksock_msg_t *msg) -{ - msg->ksm_type = KSOCK_MSG_LNET; - msg->ksm_csum = 0; - msg->ksm_zc_req_cookie = 0; - msg->ksm_zc_ack_cookie = 0; -} - -static void -ksocknal_unpack_msg_v2(ksock_msg_t *msg) -{ - return; /* Do nothing */ -} - -ksock_proto_t ksocknal_protocol_v1x = -{ - KSOCK_PROTO_V1, - ksocknal_send_hello_v1, - ksocknal_recv_hello_v1, - ksocknal_pack_msg_v1, - ksocknal_unpack_msg_v1 -}; - -ksock_proto_t ksocknal_protocol_v2x = -{ - KSOCK_PROTO_V2, - ksocknal_send_hello_v2, - ksocknal_recv_hello_v2, - ksocknal_pack_msg_v2, - ksocknal_unpack_msg_v2 -}; - int ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn, lnet_nid_t peer_nid, ksock_hello_msg_t *hello) @@ -2178,9 +1720,11 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, if (proto == NULL) { if (!active) { /* unknown protocol from peer, tell peer my protocol */ - conn->ksnc_proto = &ksocknal_protocol_v2x; + conn->ksnc_proto = &ksocknal_protocol_v3x; #if SOCKNAL_VERSION_DEBUG - if (*ksocknal_tunables.ksnd_protocol != 2) + if (*ksocknal_tunables.ksnd_protocol == 2) + conn->ksnc_proto = &ksocknal_protocol_v2x; + else if (*ksocknal_tunables.ksnd_protocol == 1) conn->ksnc_proto = &ksocknal_protocol_v1x; #endif hello->kshm_nips = 0; @@ -2391,14 +1935,18 @@ ksocknal_connect (ksock_route_t *route) if (!list_empty(&peer->ksnp_tx_queue) && peer->ksnp_accepting == 0 && ksocknal_find_connecting_route_locked(peer) == NULL) { + ksock_conn_t *conn; + /* ksnp_tx_queue is queued on a conn on successful - * connection */ - LASSERT (list_empty (&peer->ksnp_conns)); + * connection for V1.x and V2.x */ + if (!list_empty (&peer->ksnp_conns)) { + conn = list_entry(peer->ksnp_conns.next, ksock_conn_t, ksnc_list); + LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x); + } /* take all the blocked packets while I've got the lock and * complete below... */ - list_add(&zombies, &peer->ksnp_tx_queue); - list_del_init(&peer->ksnp_tx_queue); + list_splice_init(&peer->ksnp_tx_queue, &zombies); } #if 0 /* irrelevent with only eager routes */ @@ -2615,6 +2163,68 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer) ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1); } +int +ksocknal_send_keepalive_locked(ksock_peer_t *peer) +{ + ksock_sched_t *sched; + ksock_conn_t *conn; + ksock_tx_t *tx; + + if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */ + return 0; + + if (peer->ksnp_proto != &ksocknal_protocol_v3x) + return 0; + + if (*ksocknal_tunables.ksnd_keepalive <= 0 || + cfs_time_before(cfs_time_current(), + cfs_time_add(peer->ksnp_last_alive, + cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive)))) + return 0; + + if (cfs_time_before(cfs_time_current(), + peer->ksnp_send_keepalive)) + return 0; + + /* retry 10 secs later, so we wouldn't put pressure + * on this peer if we failed to send keepalive this time */ + peer->ksnp_send_keepalive = cfs_time_shift(10); + + conn = ksocknal_find_conn_locked(peer, NULL, 1); + if (conn != NULL) { + sched = conn->ksnc_scheduler; + + spin_lock_bh (&sched->kss_lock); + if (!list_empty(&conn->ksnc_tx_queue)) { + spin_unlock_bh(&sched->kss_lock); + /* there is an queued ACK, don't need keepalive */ + return 0; + } + + spin_unlock_bh(&sched->kss_lock); + } + + read_unlock(&ksocknal_data.ksnd_global_lock); + + /* cookie = 1 is reserved for keepalive PING */ + tx = ksocknal_alloc_tx_noop(1, 1); + if (tx == NULL) { + read_lock(&ksocknal_data.ksnd_global_lock); + return -ENOMEM; + } + + if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) { + read_lock(&ksocknal_data.ksnd_global_lock); + return 1; + } + + ksocknal_free_tx(tx); + read_lock(&ksocknal_data.ksnd_global_lock); + + return -EIO; +} + + void ksocknal_check_peer_timeouts (int idx) { @@ -2631,6 +2241,11 @@ ksocknal_check_peer_timeouts (int idx) list_for_each (ptmp, peers) { peer = list_entry (ptmp, ksock_peer_t, ksnp_list); + if (ksocknal_send_keepalive_locked(peer) != 0) { + read_unlock (&ksocknal_data.ksnd_global_lock); + goto again; + } + conn = ksocknal_find_timed_out_conn (peer); if (conn != NULL) {