X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Fklnds%2Fsocklnd%2Fsocklnd_cb.c;h=2da283add83af704acb6e2105c03fbe87f8ecd2a;hb=61aa09e1ed8463ccda1f5d83d2c5aff8080a6116;hp=c2f7c5767cb26adbcd31046357da8f5458b82efa;hpb=3e753d381565875c685342e28e76476bee75a4ca;p=fs%2Flustre-release.git diff --git a/lnet/klnds/socklnd/socklnd_cb.c b/lnet/klnds/socklnd/socklnd_cb.c index c2f7c57..2da283a 100644 --- a/lnet/klnds/socklnd/socklnd_cb.c +++ b/lnet/klnds/socklnd/socklnd_cb.c @@ -389,61 +389,60 @@ ksocknal_receive (ksock_conn_t *conn) } void -ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx) +ksocknal_tx_done(struct lnet_ni *ni, ksock_tx_t *tx, int rc) { - lnet_msg_t *lnetmsg = tx->tx_lnetmsg; - int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO; + struct lnet_msg *lnetmsg = tx->tx_lnetmsg; ENTRY; - LASSERT(ni != NULL || tx->tx_conn != NULL); + LASSERT(ni != NULL || tx->tx_conn != NULL); - if (tx->tx_conn != NULL) - ksocknal_conn_decref(tx->tx_conn); + if (!rc && (tx->tx_resid != 0 || tx->tx_zc_aborted)) + rc = -EIO; - if (ni == NULL && tx->tx_conn != NULL) - ni = tx->tx_conn->ksnc_peer->ksnp_ni; + if (tx->tx_conn != NULL) + ksocknal_conn_decref(tx->tx_conn); - ksocknal_free_tx (tx); - if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */ - lnet_finalize (ni, lnetmsg, rc); + ksocknal_free_tx(tx); + if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */ + lnet_finalize(lnetmsg, rc); - EXIT; + EXIT; } void -ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error) +ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error) { - ksock_tx_t *tx; + ksock_tx_t *tx; while (!list_empty(txlist)) { tx = list_entry(txlist->next, ksock_tx_t, tx_list); - if (error && tx->tx_lnetmsg != NULL) { - CNETERR("Deleting packet type %d len %d %s->%s\n", - le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type), - le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length), - libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)), - libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid))); - } else if (error) { - CNETERR("Deleting noop packet\n"); - } + if (error && tx->tx_lnetmsg != NULL) { + CNETERR("Deleting packet type %d len %d %s->%s\n", + le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type), + le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length), + libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)), + libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid))); + } else if (error) { + CNETERR("Deleting noop packet\n"); + } list_del(&tx->tx_list); - LASSERT (atomic_read(&tx->tx_refcount) == 1); - ksocknal_tx_done (ni, tx); - } + LASSERT(atomic_read(&tx->tx_refcount) == 1); + ksocknal_tx_done(ni, tx, error); + } } static void ksocknal_check_zc_req(ksock_tx_t *tx) { ksock_conn_t *conn = tx->tx_conn; - ksock_peer_t *peer = conn->ksnc_peer; + ksock_peer_ni_t *peer_ni = conn->ksnc_peer; /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx * to ksnp_zc_req_list if some fragment of this message should be sent - * zero-copy. Our peer will send an ACK containing this cookie when + * zero-copy. Our peer_ni will send an ACK containing this cookie when * she has received this message to tell us we can signal completion. * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on * ksnp_zc_req_list. */ @@ -461,46 +460,46 @@ ksocknal_check_zc_req(ksock_tx_t *tx) ksocknal_tx_addref(tx); - spin_lock(&peer->ksnp_lock); + spin_lock(&peer_ni->ksnp_lock); - /* ZC_REQ is going to be pinned to the peer */ + /* ZC_REQ is going to be pinned to the peer_ni */ tx->tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0); - tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++; + tx->tx_msg.ksm_zc_cookies[0] = peer_ni->ksnp_zc_next_cookie++; - if (peer->ksnp_zc_next_cookie == 0) - peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1; + if (peer_ni->ksnp_zc_next_cookie == 0) + peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1; - list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list); + list_add_tail(&tx->tx_zc_list, &peer_ni->ksnp_zc_req_list); - spin_unlock(&peer->ksnp_lock); + spin_unlock(&peer_ni->ksnp_lock); } static void ksocknal_uncheck_zc_req(ksock_tx_t *tx) { - ksock_peer_t *peer = tx->tx_conn->ksnc_peer; + ksock_peer_ni_t *peer_ni = tx->tx_conn->ksnc_peer; LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); LASSERT(tx->tx_zc_capable); tx->tx_zc_checked = 0; - spin_lock(&peer->ksnp_lock); + spin_lock(&peer_ni->ksnp_lock); if (tx->tx_msg.ksm_zc_cookies[0] == 0) { /* Not waiting for an ACK */ - spin_unlock(&peer->ksnp_lock); + spin_unlock(&peer_ni->ksnp_lock); return; } tx->tx_msg.ksm_zc_cookies[0] = 0; list_del(&tx->tx_zc_list); - spin_unlock(&peer->ksnp_lock); + spin_unlock(&peer_ni->ksnp_lock); ksocknal_tx_decref(tx); } @@ -606,14 +605,14 @@ ksocknal_launch_connection_locked (ksock_route_t *route) } void -ksocknal_launch_all_connections_locked (ksock_peer_t *peer) +ksocknal_launch_all_connections_locked (ksock_peer_ni_t *peer_ni) { ksock_route_t *route; /* called holding write lock on ksnd_global_lock */ for (;;) { /* launch any/all connections that need it */ - route = ksocknal_find_connectable_route_locked(peer); + route = ksocknal_find_connectable_route_locked(peer_ni); if (route == NULL) return; @@ -622,7 +621,7 @@ ksocknal_launch_all_connections_locked (ksock_peer_t *peer) } ksock_conn_t * -ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) +ksocknal_find_conn_locked(ksock_peer_ni_t *peer_ni, ksock_tx_t *tx, int nonblk) { struct list_head *tmp; ksock_conn_t *conn; @@ -631,7 +630,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) int tnob = 0; int fnob = 0; - list_for_each(tmp, &peer->ksnp_conns) { + list_for_each(tmp, &peer_ni->ksnp_conns) { ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list); int nob = atomic_read(&c->ksnc_tx_nob) + c->ksnc_sock->sk->sk_wmem_queued; @@ -777,13 +776,13 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) ksock_route_t * -ksocknal_find_connectable_route_locked (ksock_peer_t *peer) +ksocknal_find_connectable_route_locked (ksock_peer_ni_t *peer_ni) { cfs_time_t now = cfs_time_current(); struct list_head *tmp; ksock_route_t *route; - list_for_each(tmp, &peer->ksnp_routes) { + list_for_each(tmp, &peer_ni->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); LASSERT (!route->ksnr_connecting || route->ksnr_scheduled); @@ -814,12 +813,12 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer) } ksock_route_t * -ksocknal_find_connecting_route_locked (ksock_peer_t *peer) +ksocknal_find_connecting_route_locked (ksock_peer_ni_t *peer_ni) { struct list_head *tmp; ksock_route_t *route; - list_for_each(tmp, &peer->ksnp_routes) { + list_for_each(tmp, &peer_ni->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); LASSERT (!route->ksnr_connecting || route->ksnr_scheduled); @@ -832,9 +831,10 @@ ksocknal_find_connecting_route_locked (ksock_peer_t *peer) } int -ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) +ksocknal_launch_packet(struct lnet_ni *ni, ksock_tx_t *tx, + struct lnet_process_id id) { - ksock_peer_t *peer; + ksock_peer_ni_t *peer_ni; ksock_conn_t *conn; rwlock_t *g_lock; int retry; @@ -846,10 +846,10 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) for (retry = 0;; retry = 1) { read_lock(g_lock); - peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) { - if (ksocknal_find_connectable_route_locked(peer) == NULL) { - conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk); + peer_ni = ksocknal_find_peer_locked(ni, id); + if (peer_ni != NULL) { + if (ksocknal_find_connectable_route_locked(peer_ni) == NULL) { + conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk); if (conn != NULL) { /* I've got no routes that need to be * connecting and I do have an actual @@ -866,8 +866,8 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) write_lock_bh(g_lock); - peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) + peer_ni = ksocknal_find_peer_locked(ni, id); + if (peer_ni != NULL) break; write_unlock_bh(g_lock); @@ -879,7 +879,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) } if (retry) { - CERROR("Can't find peer %s\n", libcfs_id2str(id)); + CERROR("Can't find peer_ni %s\n", libcfs_id2str(id)); return -EHOSTUNREACH; } @@ -887,15 +887,15 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) LNET_NIDADDR(id.nid), lnet_acceptor_port()); if (rc != 0) { - CERROR("Can't add peer %s: %d\n", + CERROR("Can't add peer_ni %s: %d\n", libcfs_id2str(id), rc); return rc; } } - ksocknal_launch_all_connections_locked(peer); + ksocknal_launch_all_connections_locked(peer_ni); - conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk); + conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk); if (conn != NULL) { /* Connection exists; queue message on it */ ksocknal_queue_tx_locked (tx, conn); @@ -903,14 +903,14 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) return (0); } - if (peer->ksnp_accepting > 0 || - ksocknal_find_connecting_route_locked (peer) != NULL) { - /* the message is going to be pinned to the peer */ + if (peer_ni->ksnp_accepting > 0 || + ksocknal_find_connecting_route_locked (peer_ni) != NULL) { + /* the message is going to be pinned to the peer_ni */ tx->tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); /* Queue the message until a connection is established */ - list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue); + list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue); write_unlock_bh(g_lock); return 0; } @@ -923,11 +923,11 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) } int -ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) +ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) { - int mpflag = 1; - int type = lntmsg->msg_type; - lnet_process_id_t target = lntmsg->msg_target; + int mpflag = 1; + int type = lntmsg->msg_type; + struct lnet_process_id target = lntmsg->msg_target; unsigned int payload_niov = lntmsg->msg_niov; struct kvec *payload_iov = lntmsg->msg_iov; lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; @@ -1061,15 +1061,15 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) conn->ksnc_rx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u); break; - case KSOCK_PROTO_V1: - /* Receiving bare lnet_hdr_t */ - conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER; - conn->ksnc_rx_nob_wanted = sizeof(lnet_hdr_t); - conn->ksnc_rx_nob_left = sizeof(lnet_hdr_t); + case KSOCK_PROTO_V1: + /* Receiving bare struct lnet_hdr */ + conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER; + conn->ksnc_rx_nob_wanted = sizeof(struct lnet_hdr); + conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr); conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; - conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg; - conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t); + conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg; + conn->ksnc_rx_iov[0].iov_len = sizeof(struct lnet_hdr); break; default: @@ -1114,9 +1114,9 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) static int ksocknal_process_receive (ksock_conn_t *conn) { - lnet_hdr_t *lhdr; - lnet_process_id_t *id; - int rc; + struct lnet_hdr *lhdr; + struct lnet_process_id *id; + int rc; LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0); @@ -1131,10 +1131,11 @@ ksocknal_process_receive (ksock_conn_t *conn) rc = ksocknal_receive(conn); if (rc <= 0) { - lnet_process_id_t ksnp_id = conn->ksnc_peer->ksnp_id; + struct lnet_process_id ksnp_id; - LASSERT(rc != -EAGAIN); + ksnp_id = conn->ksnc_peer->ksnp_id; + LASSERT(rc != -EAGAIN); if (rc == 0) CDEBUG(D_NET, "[%p] EOF from %s " "ip %pI4h:%d\n", conn, @@ -1235,7 +1236,7 @@ ksocknal_process_receive (ksock_conn_t *conn) conn->ksnc_proto->pro_unpack(&conn->ksnc_msg); if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) { - /* Userspace peer */ + /* Userspace peer_ni */ lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; id = &conn->ksnc_peer->ksnp_id; @@ -1293,7 +1294,7 @@ ksocknal_process_receive (ksock_conn_t *conn) le64_to_cpu(lhdr->src_nid) != id->nid); } - lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc); + lnet_finalize(conn->ksnc_cookie, rc); if (rc != 0) { ksocknal_new_packet(conn, 0); @@ -1318,9 +1319,10 @@ ksocknal_process_receive (ksock_conn_t *conn) } int -ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, - unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, - unsigned int offset, unsigned int mlen, unsigned int rlen) +ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg, + int delayed, unsigned int niov, struct kvec *iov, + lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, + unsigned int rlen) { ksock_conn_t *conn = (ksock_conn_t *)private; ksock_sched_t *sched = conn->ksnc_scheduler; @@ -1645,10 +1647,12 @@ ksocknal_parse_proto_version (struct ksock_hello_msg *hello) } if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) { - lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello; + struct lnet_magicversion *hmv; + + CLASSERT(sizeof(struct lnet_magicversion) == + offsetof(struct ksock_hello_msg, kshm_src_nid)); - CLASSERT (sizeof (lnet_magicversion_t) == - offsetof (struct ksock_hello_msg, kshm_src_nid)); + hmv = (struct lnet_magicversion *)hello; if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) && hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR)) @@ -1659,8 +1663,8 @@ ksocknal_parse_proto_version (struct ksock_hello_msg *hello) } int -ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn, - lnet_nid_t peer_nid, struct ksock_hello_msg *hello) +ksocknal_send_hello(struct lnet_ni *ni, ksock_conn_t *conn, + lnet_nid_t peer_nid, struct ksock_hello_msg *hello) { /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */ ksock_net_t *net = (ksock_net_t *)ni->ni_data; @@ -1698,8 +1702,9 @@ ksocknal_invert_type(int type) } int -ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn, - struct ksock_hello_msg *hello, lnet_process_id_t *peerid, +ksocknal_recv_hello(struct lnet_ni *ni, ksock_conn_t *conn, + struct ksock_hello_msg *hello, + struct lnet_process_id *peerid, __u64 *incarnation) { /* Return < 0 fatal error @@ -1713,7 +1718,7 @@ ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn, int proto_match; int rc; ksock_proto_t *proto; - lnet_process_id_t recv_id; + struct lnet_process_id recv_id; /* socket type set on active connections - not set on passive */ LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE)); @@ -1752,7 +1757,7 @@ ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn, proto = ksocknal_parse_proto_version(hello); if (proto == NULL) { if (!active) { - /* unknown protocol from peer, tell peer my protocol */ + /* unknown protocol from peer_ni, tell peer_ni my protocol */ conn->ksnc_proto = &ksocknal_protocol_v3x; #if SOCKNAL_VERSION_DEBUG if (*ksocknal_tunables.ksnd_protocol == 2) @@ -1792,7 +1797,7 @@ ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn, if (!active && conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) { - /* Userspace NAL assigns peer process ID from socket */ + /* Userspace NAL assigns peer_ni process ID from socket */ recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG; recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr); } else { @@ -1803,7 +1808,7 @@ ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn, if (!active) { *peerid = recv_id; - /* peer determines type */ + /* peer_ni determines type */ conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype); if (conn->ksnc_type == SOCKLND_CONN_NONE) { CERROR("Unexpected type %d from %s ip %pI4h\n", @@ -1845,7 +1850,7 @@ static int ksocknal_connect (ksock_route_t *route) { struct list_head zombies = LIST_HEAD_INIT(zombies); - ksock_peer_t *peer = route->ksnr_peer; + ksock_peer_ni_t *peer_ni = route->ksnr_peer; int type; int wanted; struct socket *sock; @@ -1866,19 +1871,19 @@ ksocknal_connect (ksock_route_t *route) for (;;) { wanted = ksocknal_route_mask() & ~route->ksnr_connected; - /* stop connecting if peer/route got closed under me, or + /* stop connecting if peer_ni/route got closed under me, or * route got connected while queued */ - if (peer->ksnp_closing || route->ksnr_deleted || + if (peer_ni->ksnp_closing || route->ksnr_deleted || wanted == 0) { retry_later = 0; break; } - /* reschedule if peer is connecting to me */ - if (peer->ksnp_accepting > 0) { + /* reschedule if peer_ni is connecting to me */ + if (peer_ni->ksnp_accepting > 0) { CDEBUG(D_NET, - "peer %s(%d) already connecting to me, retry later.\n", - libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting); + "peer_ni %s(%d) already connecting to me, retry later.\n", + libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting); retry_later = 1; } @@ -1900,21 +1905,21 @@ ksocknal_connect (ksock_route_t *route) if (cfs_time_aftereq(cfs_time_current(), deadline)) { rc = -ETIMEDOUT; - lnet_connect_console_error(rc, peer->ksnp_id.nid, + lnet_connect_console_error(rc, peer_ni->ksnp_id.nid, route->ksnr_ipaddr, route->ksnr_port); goto failed; } - rc = lnet_connect(&sock, peer->ksnp_id.nid, + rc = lnet_connect(&sock, peer_ni->ksnp_id.nid, route->ksnr_myipaddr, route->ksnr_ipaddr, route->ksnr_port); if (rc != 0) goto failed; - rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type); + rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type); if (rc < 0) { - lnet_connect_console_error(rc, peer->ksnp_id.nid, + lnet_connect_console_error(rc, peer_ni->ksnp_id.nid, route->ksnr_ipaddr, route->ksnr_port); goto failed; @@ -1924,8 +1929,8 @@ ksocknal_connect (ksock_route_t *route) * race or I have to renegotiate protocol version */ retry_later = (rc != 0); if (retry_later) - CDEBUG(D_NET, "peer %s: conn race, retry later.\n", - libcfs_nid2str(peer->ksnp_id.nid)); + CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n", + libcfs_nid2str(peer_ni->ksnp_id.nid)); write_lock_bh(&ksocknal_data.ksnd_global_lock); } @@ -1935,10 +1940,10 @@ ksocknal_connect (ksock_route_t *route) if (retry_later) { /* re-queue for attention; this frees me up to handle - * the peer's incoming connection request */ + * the peer_ni's incoming connection request */ if (rc == EALREADY || - (rc == 0 && peer->ksnp_accepting > 0)) { + (rc == 0 && peer_ni->ksnp_accepting > 0)) { /* We want to introduce a delay before next * attempt to connect if we lost conn race, * but the race is resolved quickly usually, @@ -1974,29 +1979,29 @@ ksocknal_connect (ksock_route_t *route) route->ksnr_timeout = cfs_time_add(cfs_time_current(), route->ksnr_retry_interval); - if (!list_empty(&peer->ksnp_tx_queue) && - peer->ksnp_accepting == 0 && - ksocknal_find_connecting_route_locked(peer) == NULL) { + if (!list_empty(&peer_ni->ksnp_tx_queue) && + peer_ni->ksnp_accepting == 0 && + ksocknal_find_connecting_route_locked(peer_ni) == NULL) { ksock_conn_t *conn; /* ksnp_tx_queue is queued on a conn on successful * connection for V1.x and V2.x */ - if (!list_empty(&peer->ksnp_conns)) { - conn = list_entry(peer->ksnp_conns.next, + if (!list_empty(&peer_ni->ksnp_conns)) { + conn = list_entry(peer_ni->ksnp_conns.next, ksock_conn_t, ksnc_list); LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x); } /* take all the blocked packets while I've got the lock and * complete below... */ - list_splice_init(&peer->ksnp_tx_queue, &zombies); + list_splice_init(&peer_ni->ksnp_tx_queue, &zombies); } write_unlock_bh(&ksocknal_data.ksnd_global_lock); - ksocknal_peer_failed(peer); - ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1); - return 0; + ksocknal_peer_failed(peer_ni); + ksocknal_txlist_done(peer_ni->ksnp_ni, &zombies, rc); + return 0; } /* @@ -2006,7 +2011,7 @@ ksocknal_connect (ksock_route_t *route) * running out of resource. */ static int -ksocknal_connd_check_start(long sec, long *timeout) +ksocknal_connd_check_start(time64_t sec, long *timeout) { char name[16]; int rc; @@ -2056,7 +2061,7 @@ ksocknal_connd_check_start(long sec, long *timeout) /* we tried ... */ LASSERT(ksocknal_data.ksnd_connd_starting > 0); ksocknal_data.ksnd_connd_starting--; - ksocknal_data.ksnd_connd_failed_stamp = cfs_time_current_sec(); + ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds(); return 1; } @@ -2068,7 +2073,7 @@ ksocknal_connd_check_start(long sec, long *timeout) * again to recheck these conditions. */ static int -ksocknal_connd_check_stop(long sec, long *timeout) +ksocknal_connd_check_stop(time64_t sec, long *timeout) { int val; @@ -2149,7 +2154,7 @@ ksocknal_connd (void *arg) while (!ksocknal_data.ksnd_shuttingdown) { ksock_route_t *route = NULL; - long sec = cfs_time_current_sec(); + time64_t sec = ktime_get_real_seconds(); long timeout = MAX_SCHEDULE_TIMEOUT; int dropped_lock = 0; @@ -2242,13 +2247,13 @@ ksocknal_connd (void *arg) } static ksock_conn_t * -ksocknal_find_timed_out_conn (ksock_peer_t *peer) +ksocknal_find_timed_out_conn (ksock_peer_ni_t *peer_ni) { /* We're called with a shared lock on ksnd_global_lock */ ksock_conn_t *conn; struct list_head *ctmp; - list_for_each(ctmp, &peer->ksnp_conns) { + list_for_each(ctmp, &peer_ni->ksnp_conns) { int error; conn = list_entry(ctmp, ksock_conn_t, ksnc_list); @@ -2264,7 +2269,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) CNETERR("A connection with %s " "(%pI4h:%d) was reset; " "it may have rebooted.\n", - libcfs_id2str(peer->ksnp_id), + libcfs_id2str(peer_ni->ksnp_id), &conn->ksnc_ipaddr, conn->ksnc_port); break; @@ -2272,7 +2277,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) CNETERR("A connection with %s " "(%pI4h:%d) timed out; the " "network or node may be down.\n", - libcfs_id2str(peer->ksnp_id), + libcfs_id2str(peer_ni->ksnp_id), &conn->ksnc_ipaddr, conn->ksnc_port); break; @@ -2280,7 +2285,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) CNETERR("An unexpected network error %d " "occurred with %s " "(%pI4h:%d\n", error, - libcfs_id2str(peer->ksnp_id), + libcfs_id2str(peer_ni->ksnp_id), &conn->ksnc_ipaddr, conn->ksnc_port); break; @@ -2296,7 +2301,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) ksocknal_conn_addref(conn); CNETERR("Timeout receiving from %s (%pI4h:%d), " "state %d wanted %d left %d\n", - libcfs_id2str(peer->ksnp_id), + libcfs_id2str(peer_ni->ksnp_id), &conn->ksnc_ipaddr, conn->ksnc_port, conn->ksnc_rx_state, @@ -2314,7 +2319,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) ksocknal_conn_addref(conn); CNETERR("Timeout sending data to %s (%pI4h:%d) " "the network or that node may be down.\n", - libcfs_id2str(peer->ksnp_id), + libcfs_id2str(peer_ni->ksnp_id), &conn->ksnc_ipaddr, conn->ksnc_port); return (conn); } @@ -2324,32 +2329,32 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) } static inline void -ksocknal_flush_stale_txs(ksock_peer_t *peer) +ksocknal_flush_stale_txs(ksock_peer_ni_t *peer_ni) { - ksock_tx_t *tx; - struct list_head stale_txs = LIST_HEAD_INIT(stale_txs); + ksock_tx_t *tx; + struct list_head stale_txs = LIST_HEAD_INIT(stale_txs); write_lock_bh(&ksocknal_data.ksnd_global_lock); - while (!list_empty(&peer->ksnp_tx_queue)) { - tx = list_entry(peer->ksnp_tx_queue.next, - ksock_tx_t, tx_list); + while (!list_empty(&peer_ni->ksnp_tx_queue)) { + tx = list_entry(peer_ni->ksnp_tx_queue.next, + ksock_tx_t, tx_list); - if (!cfs_time_aftereq(cfs_time_current(), - tx->tx_deadline)) - break; + if (!cfs_time_aftereq(cfs_time_current(), + tx->tx_deadline)) + break; list_del(&tx->tx_list); list_add_tail(&tx->tx_list, &stale_txs); - } + } write_unlock_bh(&ksocknal_data.ksnd_global_lock); - ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1); + ksocknal_txlist_done(peer_ni->ksnp_ni, &stale_txs, -ETIMEDOUT); } static int -ksocknal_send_keepalive_locked(ksock_peer_t *peer) +ksocknal_send_keepalive_locked(ksock_peer_ni_t *peer_ni) __must_hold(&ksocknal_data.ksnd_global_lock) { ksock_sched_t *sched; @@ -2357,27 +2362,27 @@ __must_hold(&ksocknal_data.ksnd_global_lock) ksock_tx_t *tx; /* last_alive will be updated by create_conn */ - if (list_empty(&peer->ksnp_conns)) + if (list_empty(&peer_ni->ksnp_conns)) return 0; - if (peer->ksnp_proto != &ksocknal_protocol_v3x) + if (peer_ni->ksnp_proto != &ksocknal_protocol_v3x) return 0; if (*ksocknal_tunables.ksnd_keepalive <= 0 || cfs_time_before(cfs_time_current(), - cfs_time_add(peer->ksnp_last_alive, + cfs_time_add(peer_ni->ksnp_last_alive, cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive)))) return 0; if (cfs_time_before(cfs_time_current(), - peer->ksnp_send_keepalive)) + peer_ni->ksnp_send_keepalive)) return 0; /* retry 10 secs later, so we wouldn't put pressure - * on this peer if we failed to send keepalive this time */ - peer->ksnp_send_keepalive = cfs_time_shift(10); + * on this peer_ni if we failed to send keepalive this time */ + peer_ni->ksnp_send_keepalive = cfs_time_shift(10); - conn = ksocknal_find_conn_locked(peer, NULL, 1); + conn = ksocknal_find_conn_locked(peer_ni, NULL, 1); if (conn != NULL) { sched = conn->ksnc_scheduler; @@ -2400,7 +2405,7 @@ __must_hold(&ksocknal_data.ksnd_global_lock) return -ENOMEM; } - if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) { + if (ksocknal_launch_packet(peer_ni->ksnp_ni, tx, peer_ni->ksnp_id) == 0) { read_lock(&ksocknal_data.ksnd_global_lock); return 1; } @@ -2416,7 +2421,7 @@ static void ksocknal_check_peer_timeouts (int idx) { struct list_head *peers = &ksocknal_data.ksnd_peers[idx]; - ksock_peer_t *peer; + ksock_peer_ni_t *peer_ni; ksock_conn_t *conn; ksock_tx_t *tx; @@ -2426,18 +2431,18 @@ ksocknal_check_peer_timeouts (int idx) * take a look... */ read_lock(&ksocknal_data.ksnd_global_lock); - list_for_each_entry(peer, peers, ksnp_list) { + list_for_each_entry(peer_ni, peers, ksnp_list) { ksock_tx_t *tx_stale; cfs_time_t deadline = 0; int resid = 0; int n = 0; - if (ksocknal_send_keepalive_locked(peer) != 0) { + if (ksocknal_send_keepalive_locked(peer_ni) != 0) { read_unlock(&ksocknal_data.ksnd_global_lock); goto again; } - conn = ksocknal_find_timed_out_conn (peer); + conn = ksocknal_find_timed_out_conn (peer_ni); if (conn != NULL) { read_unlock(&ksocknal_data.ksnd_global_lock); @@ -2445,7 +2450,7 @@ ksocknal_check_peer_timeouts (int idx) ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT); /* NB we won't find this one again, but we can't - * just proceed with the next peer, since we dropped + * just proceed with the next peer_ni, since we dropped * ksnd_global_lock and it might be dead already! */ ksocknal_conn_decref(conn); goto again; @@ -2453,30 +2458,30 @@ ksocknal_check_peer_timeouts (int idx) /* we can't process stale txs right here because we're * holding only shared lock */ - if (!list_empty(&peer->ksnp_tx_queue)) { + if (!list_empty(&peer_ni->ksnp_tx_queue)) { ksock_tx_t *tx = - list_entry(peer->ksnp_tx_queue.next, + list_entry(peer_ni->ksnp_tx_queue.next, ksock_tx_t, tx_list); if (cfs_time_aftereq(cfs_time_current(), tx->tx_deadline)) { - ksocknal_peer_addref(peer); + ksocknal_peer_addref(peer_ni); read_unlock(&ksocknal_data.ksnd_global_lock); - ksocknal_flush_stale_txs(peer); + ksocknal_flush_stale_txs(peer_ni); - ksocknal_peer_decref(peer); + ksocknal_peer_decref(peer_ni); goto again; } } - if (list_empty(&peer->ksnp_zc_req_list)) + if (list_empty(&peer_ni->ksnp_zc_req_list)) continue; tx_stale = NULL; - spin_lock(&peer->ksnp_lock); - list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) { + spin_lock(&peer_ni->ksnp_lock); + list_for_each_entry(tx, &peer_ni->ksnp_zc_req_list, tx_zc_list) { if (!cfs_time_aftereq(cfs_time_current(), tx->tx_deadline)) break; @@ -2489,7 +2494,7 @@ ksocknal_check_peer_timeouts (int idx) } if (tx_stale == NULL) { - spin_unlock(&peer->ksnp_lock); + spin_unlock(&peer_ni->ksnp_lock); continue; } @@ -2498,13 +2503,13 @@ ksocknal_check_peer_timeouts (int idx) conn = tx_stale->tx_conn; ksocknal_conn_addref(conn); - spin_unlock(&peer->ksnp_lock); + spin_unlock(&peer_ni->ksnp_lock); read_unlock(&ksocknal_data.ksnd_global_lock); - CERROR("Total %d stale ZC_REQs for peer %s detected; the " + CERROR("Total %d stale ZC_REQs for peer_ni %s detected; the " "oldest(%p) timed out %ld secs ago, " "resid: %d, wmem: %d\n", - n, libcfs_nid2str(peer->ksnp_id.nid), tx_stale, + n, libcfs_nid2str(peer_ni->ksnp_id.nid), tx_stale, cfs_duration_sec(cfs_time_current() - deadline), resid, conn->ksnc_sock->sk->sk_wmem_queued); @@ -2602,7 +2607,7 @@ int ksocknal_reaper(void *arg) int chunk = ksocknal_data.ksnd_peer_hash_size; /* Time to check for timeouts on a few more peers: I do - * checks every 'p' seconds on a proportion of the peer + * checks every 'p' seconds on a proportion of the peer_ni * table and I need to check every connection 'n' times * within a timeout interval, to ensure I detect a * timeout on any connection within (n+1)/n times the