X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fklnds%2Fsocklnd%2Fsocklnd_cb.c;h=b69599b16add8eb89de31ce5455f9ed63307b594;hp=c97cf2bece57e7c1188af398ae58e7606e564f58;hb=d8792a7dab933def57f6069296234ad48ea0da09;hpb=d38d331fa6525ffc02665f48fa52f94626360631 diff --git a/lnet/klnds/socklnd/socklnd_cb.c b/lnet/klnds/socklnd/socklnd_cb.c index c97cf2b..b69599b 100644 --- a/lnet/klnds/socklnd/socklnd_cb.c +++ b/lnet/klnds/socklnd/socklnd_cb.c @@ -1,14 +1,14 @@ /* * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * - * Copyright (c) 2011, 2012, Intel Corporation. + * Copyright (c) 2011, 2014, Intel Corporation. * * Author: Zach Brown * Author: Peter J. Braam * Author: Phil Schwan * Author: Eric Barton * - * This file is part of Portals, http://www.sf.net/projects/sandiaportals/ + * This file is part of Lustre, https://wiki.hpdd.intel.com/ * * Portals is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -37,11 +37,11 @@ ksocknal_alloc_tx(int type, int size) /* searching for a noop tx in free list */ spin_lock(&ksocknal_data.ksnd_tx_lock); - if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) { - tx = cfs_list_entry(ksocknal_data.ksnd_idle_noop_txs. \ + if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) { + tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \ next, ksock_tx_t, tx_list); LASSERT(tx->tx_desc_size == size); - cfs_list_del(&tx->tx_list); + list_del(&tx->tx_list); } spin_unlock(&ksocknal_data.ksnd_tx_lock); @@ -53,15 +53,15 @@ ksocknal_alloc_tx(int type, int size) if (tx == NULL) return NULL; - cfs_atomic_set(&tx->tx_refcount, 1); - tx->tx_zc_aborted = 0; - tx->tx_zc_capable = 0; - tx->tx_zc_checked = 0; - tx->tx_desc_size = size; + atomic_set(&tx->tx_refcount, 1); + tx->tx_zc_aborted = 0; + tx->tx_zc_capable = 0; + tx->tx_zc_checked = 0; + tx->tx_desc_size = size; - cfs_atomic_inc(&ksocknal_data.ksnd_nactive_txs); + atomic_inc(&ksocknal_data.ksnd_nactive_txs); - return tx; + return tx; } ksock_tx_t * @@ -83,7 +83,9 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk) tx->tx_niov = 1; tx->tx_nonblk = nonblk; - socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP); + tx->tx_msg.ksm_csum = 0; + tx->tx_msg.ksm_type = KSOCK_MSG_NOOP; + tx->tx_msg.ksm_zc_cookies[0] = 0; tx->tx_msg.ksm_zc_cookies[1] = cookie; return tx; @@ -93,13 +95,13 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk) void ksocknal_free_tx (ksock_tx_t *tx) { - cfs_atomic_dec(&ksocknal_data.ksnd_nactive_txs); + atomic_dec(&ksocknal_data.ksnd_nactive_txs); if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) { /* it's a noop tx */ spin_lock(&ksocknal_data.ksnd_tx_lock); - cfs_list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs); + list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs); spin_unlock(&ksocknal_data.ksnd_tx_lock); } else { @@ -107,10 +109,10 @@ ksocknal_free_tx (ksock_tx_t *tx) } } -int +static int ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) { - struct iovec *iov = tx->tx_iov; + struct kvec *iov = tx->tx_iov; int nob; int rc; @@ -131,7 +133,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) LASSERT (tx->tx_niov > 0); if (nob < (int) iov->iov_len) { - iov->iov_base = (void *)((char *)iov->iov_base + nob); + iov->iov_base += nob; iov->iov_len -= nob; return (rc); } @@ -144,7 +146,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) return (rc); } -int +static int ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) { lnet_kiov_t *kiov = tx->tx_kiov; @@ -182,17 +184,18 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) return (rc); } -int -ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) +static int +ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx) { - int rc; - int bufnob; + int rc; + int bufnob; - if (ksocknal_data.ksnd_stall_tx != 0) { - cfs_pause(cfs_time_seconds(ksocknal_data.ksnd_stall_tx)); - } + if (ksocknal_data.ksnd_stall_tx != 0) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx)); + } - LASSERT (tx->tx_resid != 0); + LASSERT(tx->tx_resid != 0); rc = ksocknal_connsock_addref(conn); if (rc != 0) { @@ -211,21 +214,21 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) rc = ksocknal_send_kiov (conn, tx); } - bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock); + bufnob = conn->ksnc_sock->sk->sk_wmem_queued; if (rc > 0) /* sent something? */ conn->ksnc_tx_bufnob += rc; /* account it */ - if (bufnob < conn->ksnc_tx_bufnob) { - /* allocated send buffer bytes < computed; infer - * something got ACKed */ - conn->ksnc_tx_deadline = - cfs_time_shift(*ksocknal_tunables.ksnd_timeout); - conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); - conn->ksnc_tx_bufnob = bufnob; - cfs_mb(); - } + if (bufnob < conn->ksnc_tx_bufnob) { + /* allocated send buffer bytes < computed; infer + * something got ACKed */ + conn->ksnc_tx_deadline = + cfs_time_shift(*ksocknal_tunables.ksnd_timeout); + conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); + conn->ksnc_tx_bufnob = bufnob; + smp_mb(); + } - if (rc <= 0) { /* Didn't write anything? */ + if (rc <= 0) { /* Didn't write anything? */ if (rc == 0) /* some stacks return 0 instead of -EAGAIN */ rc = -EAGAIN; @@ -238,7 +241,7 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) } /* socket's wmem_queued now includes 'rc' bytes */ - cfs_atomic_sub (rc, &conn->ksnc_tx_nob); + atomic_sub (rc, &conn->ksnc_tx_nob); rc = 0; } while (tx->tx_resid != 0); @@ -247,16 +250,16 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) return (rc); } -int +static int ksocknal_recv_iov (ksock_conn_t *conn) { - struct iovec *iov = conn->ksnc_rx_iov; + struct kvec *iov = conn->ksnc_rx_iov; int nob; int rc; LASSERT (conn->ksnc_rx_niov > 0); - /* Never touch conn->ksnc_rx_iov or change connection + /* Never touch conn->ksnc_rx_iov or change connection * status inside ksocknal_lib_recv_iov */ rc = ksocknal_lib_recv_iov(conn); @@ -266,21 +269,21 @@ ksocknal_recv_iov (ksock_conn_t *conn) /* received something... */ nob = rc; - conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); - conn->ksnc_rx_deadline = - cfs_time_shift(*ksocknal_tunables.ksnd_timeout); - cfs_mb(); /* order with setting rx_started */ - conn->ksnc_rx_started = 1; + conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); + conn->ksnc_rx_deadline = + cfs_time_shift(*ksocknal_tunables.ksnd_timeout); + smp_mb(); /* order with setting rx_started */ + conn->ksnc_rx_started = 1; - conn->ksnc_rx_nob_wanted -= nob; - conn->ksnc_rx_nob_left -= nob; + conn->ksnc_rx_nob_wanted -= nob; + conn->ksnc_rx_nob_left -= nob; do { LASSERT (conn->ksnc_rx_niov > 0); if (nob < (int)iov->iov_len) { iov->iov_len -= nob; - iov->iov_base = (void *)((char *)iov->iov_base + nob); + iov->iov_base += nob; return (-EAGAIN); } @@ -292,7 +295,7 @@ ksocknal_recv_iov (ksock_conn_t *conn) return (rc); } -int +static int ksocknal_recv_kiov (ksock_conn_t *conn) { lnet_kiov_t *kiov = conn->ksnc_rx_kiov; @@ -300,7 +303,7 @@ ksocknal_recv_kiov (ksock_conn_t *conn) int rc; LASSERT (conn->ksnc_rx_nkiov > 0); - /* Never touch conn->ksnc_rx_kiov or change connection + /* Never touch conn->ksnc_rx_kiov or change connection * status inside ksocknal_lib_recv_iov */ rc = ksocknal_lib_recv_kiov(conn); @@ -310,14 +313,14 @@ ksocknal_recv_kiov (ksock_conn_t *conn) /* received something... */ nob = rc; - conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); - conn->ksnc_rx_deadline = - cfs_time_shift(*ksocknal_tunables.ksnd_timeout); - cfs_mb(); /* order with setting rx_started */ - conn->ksnc_rx_started = 1; + conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); + conn->ksnc_rx_deadline = + cfs_time_shift(*ksocknal_tunables.ksnd_timeout); + smp_mb(); /* order with setting rx_started */ + conn->ksnc_rx_started = 1; - conn->ksnc_rx_nob_wanted -= nob; - conn->ksnc_rx_nob_left -= nob; + conn->ksnc_rx_nob_wanted -= nob; + conn->ksnc_rx_nob_left -= nob; do { LASSERT (conn->ksnc_rx_nkiov > 0); @@ -336,7 +339,7 @@ ksocknal_recv_kiov (ksock_conn_t *conn) return 1; } -int +static int ksocknal_receive (ksock_conn_t *conn) { /* Return 1 on success, 0 on EOF, < 0 on error. @@ -345,9 +348,10 @@ ksocknal_receive (ksock_conn_t *conn) int rc; ENTRY; - if (ksocknal_data.ksnd_stall_rx != 0) { - cfs_pause(cfs_time_seconds (ksocknal_data.ksnd_stall_rx)); - } + if (ksocknal_data.ksnd_stall_rx != 0) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx)); + } rc = ksocknal_connsock_addref(conn); if (rc != 0) { @@ -407,12 +411,12 @@ ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx) } void -ksocknal_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist, int error) +ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error) { ksock_tx_t *tx; - while (!cfs_list_empty (txlist)) { - tx = cfs_list_entry (txlist->next, ksock_tx_t, tx_list); + while (!list_empty(txlist)) { + tx = list_entry(txlist->next, ksock_tx_t, tx_list); if (error && tx->tx_lnetmsg != NULL) { CNETERR("Deleting packet type %d len %d %s->%s\n", @@ -424,9 +428,9 @@ ksocknal_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist, int error) CNETERR("Deleting noop packet\n"); } - cfs_list_del (&tx->tx_list); + list_del(&tx->tx_list); - LASSERT (cfs_atomic_read(&tx->tx_refcount) == 1); + LASSERT (atomic_read(&tx->tx_refcount) == 1); ksocknal_tx_done (ni, tx); } } @@ -435,11 +439,11 @@ static void ksocknal_check_zc_req(ksock_tx_t *tx) { ksock_conn_t *conn = tx->tx_conn; - ksock_peer_t *peer = conn->ksnc_peer; + ksock_peer_ni_t *peer_ni = conn->ksnc_peer; /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx * to ksnp_zc_req_list if some fragment of this message should be sent - * zero-copy. Our peer will send an ACK containing this cookie when + * zero-copy. Our peer_ni will send an ACK containing this cookie when * she has received this message to tell us we can signal completion. * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on * ksnp_zc_req_list. */ @@ -457,51 +461,51 @@ ksocknal_check_zc_req(ksock_tx_t *tx) ksocknal_tx_addref(tx); - spin_lock(&peer->ksnp_lock); + spin_lock(&peer_ni->ksnp_lock); - /* ZC_REQ is going to be pinned to the peer */ + /* ZC_REQ is going to be pinned to the peer_ni */ tx->tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0); - tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++; + tx->tx_msg.ksm_zc_cookies[0] = peer_ni->ksnp_zc_next_cookie++; - if (peer->ksnp_zc_next_cookie == 0) - peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1; + if (peer_ni->ksnp_zc_next_cookie == 0) + peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1; - cfs_list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list); + list_add_tail(&tx->tx_zc_list, &peer_ni->ksnp_zc_req_list); - spin_unlock(&peer->ksnp_lock); + spin_unlock(&peer_ni->ksnp_lock); } static void ksocknal_uncheck_zc_req(ksock_tx_t *tx) { - ksock_peer_t *peer = tx->tx_conn->ksnc_peer; + ksock_peer_ni_t *peer_ni = tx->tx_conn->ksnc_peer; LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); LASSERT(tx->tx_zc_capable); tx->tx_zc_checked = 0; - spin_lock(&peer->ksnp_lock); + spin_lock(&peer_ni->ksnp_lock); if (tx->tx_msg.ksm_zc_cookies[0] == 0) { /* Not waiting for an ACK */ - spin_unlock(&peer->ksnp_lock); + spin_unlock(&peer_ni->ksnp_lock); return; } tx->tx_msg.ksm_zc_cookies[0] = 0; - cfs_list_del(&tx->tx_zc_list); + list_del(&tx->tx_zc_list); - spin_unlock(&peer->ksnp_lock); + spin_unlock(&peer_ni->ksnp_lock); ksocknal_tx_decref(tx); } -int +static int ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) { int rc; @@ -529,23 +533,23 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) counter++; /* exponential backoff warnings */ if ((counter & (-counter)) == counter) CWARN("%u ENOMEM tx %p (%u allocated)\n", - counter, conn, cfs_atomic_read(&libcfs_kmemory)); + counter, conn, atomic_read(&libcfs_kmemory)); /* Queue on ksnd_enomem_conns for retry after a timeout */ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); /* enomem list takes over scheduler's ref... */ LASSERT (conn->ksnc_tx_scheduled); - cfs_list_add_tail(&conn->ksnc_tx_list, + list_add_tail(&conn->ksnc_tx_list, &ksocknal_data.ksnd_enomem_conns); - if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(), - SOCKNAL_ENOMEM_RETRY), - ksocknal_data.ksnd_reaper_waketime)) - cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq); + if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(), + SOCKNAL_ENOMEM_RETRY), + ksocknal_data.ksnd_reaper_waketime)) + wake_up(&ksocknal_data.ksnd_reaper_waitq); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); - return (rc); - } + return (rc); + } /* Actual error */ LASSERT (rc < 0); @@ -553,22 +557,20 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) if (!conn->ksnc_closing) { switch (rc) { case -ECONNRESET: - LCONSOLE_WARN("Host %u.%u.%u.%u reset our connection " + LCONSOLE_WARN("Host %pI4h reset our connection " "while we were sending data; it may have " "rebooted.\n", - HIPQUAD(conn->ksnc_ipaddr)); + &conn->ksnc_ipaddr); break; default: LCONSOLE_WARN("There was an unexpected network error " - "while writing to %u.%u.%u.%u: %d.\n", - HIPQUAD(conn->ksnc_ipaddr), rc); + "while writing to %pI4h: %d.\n", + &conn->ksnc_ipaddr, rc); break; } - CDEBUG(D_NET, "[%p] Error %d on write to %s" - " ip %d.%d.%d.%d:%d\n", conn, rc, - libcfs_id2str(conn->ksnc_peer->ksnp_id), - HIPQUAD(conn->ksnc_ipaddr), - conn->ksnc_port); + CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n", + conn, rc, libcfs_id2str(conn->ksnc_peer->ksnp_id), + &conn->ksnc_ipaddr, conn->ksnc_port); } if (tx->tx_zc_checked) @@ -581,7 +583,7 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) return (rc); } -void +static void ksocknal_launch_connection_locked (ksock_route_t *route) { @@ -596,22 +598,22 @@ ksocknal_launch_connection_locked (ksock_route_t *route) spin_lock_bh(&ksocknal_data.ksnd_connd_lock); - cfs_list_add_tail(&route->ksnr_connd_list, + list_add_tail(&route->ksnr_connd_list, &ksocknal_data.ksnd_connd_routes); - cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq); + wake_up(&ksocknal_data.ksnd_connd_waitq); spin_unlock_bh(&ksocknal_data.ksnd_connd_lock); } void -ksocknal_launch_all_connections_locked (ksock_peer_t *peer) +ksocknal_launch_all_connections_locked (ksock_peer_ni_t *peer_ni) { ksock_route_t *route; /* called holding write lock on ksnd_global_lock */ for (;;) { /* launch any/all connections that need it */ - route = ksocknal_find_connectable_route_locked(peer); + route = ksocknal_find_connectable_route_locked(peer_ni); if (route == NULL) return; @@ -620,19 +622,19 @@ ksocknal_launch_all_connections_locked (ksock_peer_t *peer) } ksock_conn_t * -ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) +ksocknal_find_conn_locked(ksock_peer_ni_t *peer_ni, ksock_tx_t *tx, int nonblk) { - cfs_list_t *tmp; + struct list_head *tmp; ksock_conn_t *conn; ksock_conn_t *typed = NULL; ksock_conn_t *fallback = NULL; int tnob = 0; int fnob = 0; - cfs_list_for_each (tmp, &peer->ksnp_conns) { - ksock_conn_t *c = cfs_list_entry(tmp, ksock_conn_t, ksnc_list); - int nob = cfs_atomic_read(&c->ksnc_tx_nob) + - libcfs_sock_wmem_queued(c->ksnc_sock); + list_for_each(tmp, &peer_ni->ksnp_conns) { + ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list); + int nob = atomic_read(&c->ksnc_tx_nob) + + c->ksnc_sock->sk->sk_wmem_queued; int rc; LASSERT (!c->ksnc_closing); @@ -681,7 +683,7 @@ ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx) { conn->ksnc_proto->pro_pack(tx); - cfs_atomic_add (tx->tx_nob, &conn->ksnc_tx_nob); + atomic_add (tx->tx_nob, &conn->ksnc_tx_nob); ksocknal_conn_addref(conn); /* +1 ref for tx */ tx->tx_conn = conn; } @@ -690,7 +692,7 @@ void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) { ksock_sched_t *sched = conn->ksnc_scheduler; - ksock_msg_t *msg = &tx->tx_msg; + struct ksock_msg *msg = &tx->tx_msg; ksock_tx_t *ztx = NULL; int bufnob = 0; @@ -700,16 +702,15 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) * ksnc_sock... */ LASSERT(!conn->ksnc_closing); - CDEBUG (D_NET, "Sending to %s ip %d.%d.%d.%d:%d\n", - libcfs_id2str(conn->ksnc_peer->ksnp_id), - HIPQUAD(conn->ksnc_ipaddr), - conn->ksnc_port); + CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n", + libcfs_id2str(conn->ksnc_peer->ksnp_id), + &conn->ksnc_ipaddr, conn->ksnc_port); ksocknal_tx_prep(conn, tx); /* Ensure the frags we've been given EXACTLY match the number of * bytes we want to send. Many TCP/IP stacks disregard any total - * size parameters passed to them and just look at the frags. + * size parameters passed to them and just look at the frags. * * We always expect at least 1 mapped fragment containing the * complete ksocknal message header. */ @@ -724,26 +725,22 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) KSOCK_MSG_NOOP, tx->tx_nob, tx->tx_niov, tx->tx_nkiov); - /* - * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__ - * but they're used inside spinlocks a lot. - */ - bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock); + bufnob = conn->ksnc_sock->sk->sk_wmem_queued; spin_lock_bh(&sched->kss_lock); - if (cfs_list_empty(&conn->ksnc_tx_queue) && bufnob == 0) { - /* First packet starts the timeout */ - conn->ksnc_tx_deadline = - cfs_time_shift(*ksocknal_tunables.ksnd_timeout); - if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */ - conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); - conn->ksnc_tx_bufnob = 0; - cfs_mb(); /* order with adding to tx_queue */ - } + if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) { + /* First packet starts the timeout */ + conn->ksnc_tx_deadline = + cfs_time_shift(*ksocknal_tunables.ksnd_timeout); + if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */ + conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); + conn->ksnc_tx_bufnob = 0; + smp_mb(); /* order with adding to tx_queue */ + } - if (msg->ksm_type == KSOCK_MSG_NOOP) { - /* The packet is noop ZC ACK, try to piggyback the ack_cookie - * on a normal packet so I don't need to send it */ + if (msg->ksm_type == KSOCK_MSG_NOOP) { + /* The packet is noop ZC ACK, try to piggyback the ack_cookie + * on a normal packet so I don't need to send it */ LASSERT (msg->ksm_zc_cookies[1] != 0); LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL); @@ -761,33 +758,33 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) } if (ztx != NULL) { - cfs_atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob); - cfs_list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs); + atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob); + list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs); } - if (conn->ksnc_tx_ready && /* able to send */ - !conn->ksnc_tx_scheduled) { /* not scheduled to send */ - /* +1 ref for scheduler */ - ksocknal_conn_addref(conn); - cfs_list_add_tail (&conn->ksnc_tx_list, - &sched->kss_tx_conns); - conn->ksnc_tx_scheduled = 1; - cfs_waitq_signal (&sched->kss_waitq); - } + if (conn->ksnc_tx_ready && /* able to send */ + !conn->ksnc_tx_scheduled) { /* not scheduled to send */ + /* +1 ref for scheduler */ + ksocknal_conn_addref(conn); + list_add_tail(&conn->ksnc_tx_list, + &sched->kss_tx_conns); + conn->ksnc_tx_scheduled = 1; + wake_up(&sched->kss_waitq); + } spin_unlock_bh(&sched->kss_lock); } ksock_route_t * -ksocknal_find_connectable_route_locked (ksock_peer_t *peer) +ksocknal_find_connectable_route_locked (ksock_peer_ni_t *peer_ni) { cfs_time_t now = cfs_time_current(); - cfs_list_t *tmp; + struct list_head *tmp; ksock_route_t *route; - cfs_list_for_each (tmp, &peer->ksnp_routes) { - route = cfs_list_entry (tmp, ksock_route_t, ksnr_list); + list_for_each(tmp, &peer_ni->ksnp_routes) { + route = list_entry(tmp, ksock_route_t, ksnr_list); LASSERT (!route->ksnr_connecting || route->ksnr_scheduled); @@ -801,9 +798,9 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer) if (!(route->ksnr_retry_interval == 0 || /* first attempt */ cfs_time_aftereq(now, route->ksnr_timeout))) { CDEBUG(D_NET, - "Too soon to retry route %u.%u.%u.%u " + "Too soon to retry route %pI4h " "(cnted %d, interval %ld, %ld secs later)\n", - HIPQUAD(route->ksnr_ipaddr), + &route->ksnr_ipaddr, route->ksnr_connected, route->ksnr_retry_interval, cfs_duration_sec(route->ksnr_timeout - now)); @@ -817,13 +814,13 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer) } ksock_route_t * -ksocknal_find_connecting_route_locked (ksock_peer_t *peer) +ksocknal_find_connecting_route_locked (ksock_peer_ni_t *peer_ni) { - cfs_list_t *tmp; + struct list_head *tmp; ksock_route_t *route; - cfs_list_for_each (tmp, &peer->ksnp_routes) { - route = cfs_list_entry (tmp, ksock_route_t, ksnr_list); + list_for_each(tmp, &peer_ni->ksnp_routes) { + route = list_entry(tmp, ksock_route_t, ksnr_list); LASSERT (!route->ksnr_connecting || route->ksnr_scheduled); @@ -837,7 +834,7 @@ ksocknal_find_connecting_route_locked (ksock_peer_t *peer) int ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) { - ksock_peer_t *peer; + ksock_peer_ni_t *peer_ni; ksock_conn_t *conn; rwlock_t *g_lock; int retry; @@ -849,10 +846,10 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) for (retry = 0;; retry = 1) { read_lock(g_lock); - peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) { - if (ksocknal_find_connectable_route_locked(peer) == NULL) { - conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk); + peer_ni = ksocknal_find_peer_locked(ni, id); + if (peer_ni != NULL) { + if (ksocknal_find_connectable_route_locked(peer_ni) == NULL) { + conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk); if (conn != NULL) { /* I've got no routes that need to be * connecting and I do have an actual @@ -869,8 +866,8 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) write_lock_bh(g_lock); - peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) + peer_ni = ksocknal_find_peer_locked(ni, id); + if (peer_ni != NULL) break; write_unlock_bh(g_lock); @@ -882,7 +879,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) } if (retry) { - CERROR("Can't find peer %s\n", libcfs_id2str(id)); + CERROR("Can't find peer_ni %s\n", libcfs_id2str(id)); return -EHOSTUNREACH; } @@ -890,15 +887,15 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) LNET_NIDADDR(id.nid), lnet_acceptor_port()); if (rc != 0) { - CERROR("Can't add peer %s: %d\n", + CERROR("Can't add peer_ni %s: %d\n", libcfs_id2str(id), rc); return rc; } } - ksocknal_launch_all_connections_locked(peer); + ksocknal_launch_all_connections_locked(peer_ni); - conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk); + conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk); if (conn != NULL) { /* Connection exists; queue message on it */ ksocknal_queue_tx_locked (tx, conn); @@ -906,14 +903,14 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) return (0); } - if (peer->ksnp_accepting > 0 || - ksocknal_find_connecting_route_locked (peer) != NULL) { - /* the message is going to be pinned to the peer */ + if (peer_ni->ksnp_accepting > 0 || + ksocknal_find_connecting_route_locked (peer_ni) != NULL) { + /* the message is going to be pinned to the peer_ni */ tx->tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); /* Queue the message until a connection is established */ - cfs_list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue); + list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue); write_unlock_bh(g_lock); return 0; } @@ -928,11 +925,11 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) int ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) { - int mpflag = 0; + int mpflag = 1; int type = lntmsg->msg_type; lnet_process_id_t target = lntmsg->msg_target; unsigned int payload_niov = lntmsg->msg_niov; - struct iovec *payload_iov = lntmsg->msg_iov; + struct kvec *payload_iov = lntmsg->msg_iov; lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; unsigned int payload_offset = lntmsg->msg_offset; unsigned int payload_nob = lntmsg->msg_len; @@ -946,18 +943,18 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n", payload_nob, payload_niov, libcfs_id2str(target)); - LASSERT (payload_nob == 0 || payload_niov > 0); - LASSERT (payload_niov <= LNET_MAX_IOV); - /* payload is either all vaddrs or all pages */ - LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); - LASSERT (!cfs_in_interrupt ()); + LASSERT (payload_nob == 0 || payload_niov > 0); + LASSERT (payload_niov <= LNET_MAX_IOV); + /* payload is either all vaddrs or all pages */ + LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); + LASSERT (!in_interrupt ()); - if (payload_iov != NULL) - desc_size = offsetof(ksock_tx_t, - tx_frags.virt.iov[1 + payload_niov]); - else - desc_size = offsetof(ksock_tx_t, - tx_frags.paged.kiov[payload_niov]); + if (payload_iov != NULL) + desc_size = offsetof(ksock_tx_t, + tx_frags.virt.iov[1 + payload_niov]); + else + desc_size = offsetof(ksock_tx_t, + tx_frags.paged.kiov[payload_niov]); if (lntmsg->msg_vmflush) mpflag = cfs_memory_pressure_get_and_set(); @@ -993,12 +990,16 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) tx->tx_zc_capable = 1; } - socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_LNET); + tx->tx_msg.ksm_csum = 0; + tx->tx_msg.ksm_type = KSOCK_MSG_LNET; + tx->tx_msg.ksm_zc_cookies[0] = 0; + tx->tx_msg.ksm_zc_cookies[1] = 0; /* The first fragment will be set later in pro_pack */ rc = ksocknal_launch_packet(ni, tx, target); - if (lntmsg->msg_vmflush) + if (!mpflag) cfs_memory_pressure_restore(mpflag); + if (rc == 0) return (0); @@ -1009,7 +1010,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name) { - cfs_task_t *task = kthread_run(fn, arg, name); + struct task_struct *task = kthread_run(fn, arg, name); if (IS_ERR(task)) return PTR_ERR(task); @@ -1044,20 +1045,20 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) ksocknal_lib_eager_ack(conn); } - if (nob_to_skip == 0) { /* right at next packet boundary now */ - conn->ksnc_rx_started = 0; - cfs_mb(); /* racing with timeout thread */ + if (nob_to_skip == 0) { /* right at next packet boundary now */ + conn->ksnc_rx_started = 0; + smp_mb(); /* racing with timeout thread */ - switch (conn->ksnc_proto->pro_version) { - case KSOCK_PROTO_V2: - case KSOCK_PROTO_V3: + switch (conn->ksnc_proto->pro_version) { + case KSOCK_PROTO_V2: + case KSOCK_PROTO_V3: conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER; - conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space; + conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg; - conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u); - conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u); - conn->ksnc_rx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u); + conn->ksnc_rx_nob_wanted = offsetof(struct ksock_msg, ksm_u); + conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u); + conn->ksnc_rx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u); break; case KSOCK_PROTO_V1: @@ -1066,7 +1067,7 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) conn->ksnc_rx_nob_wanted = sizeof(lnet_hdr_t); conn->ksnc_rx_nob_left = sizeof(lnet_hdr_t); - conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space; + conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg; conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t); break; @@ -1087,7 +1088,7 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) conn->ksnc_rx_state = SOCKNAL_RX_SLOP; conn->ksnc_rx_nob_left = nob_to_skip; - conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space; + conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; skipped = 0; niov = 0; @@ -1101,7 +1102,7 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) nob_to_skip -=nob; } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */ - niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec)); + niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct kvec)); conn->ksnc_rx_niov = niov; conn->ksnc_rx_kiov = NULL; @@ -1110,17 +1111,17 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) return (0); } -int +static int ksocknal_process_receive (ksock_conn_t *conn) { lnet_hdr_t *lhdr; lnet_process_id_t *id; int rc; - LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0); + LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0); - /* NB: sched lock NOT held */ - /* SOCKNAL_RX_LNET_HEADER is here for backward compatability */ + /* NB: sched lock NOT held */ + /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */ LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER || conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD || conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER || @@ -1129,22 +1130,23 @@ ksocknal_process_receive (ksock_conn_t *conn) if (conn->ksnc_rx_nob_wanted != 0) { rc = ksocknal_receive(conn); - if (rc <= 0) { - LASSERT (rc != -EAGAIN); + if (rc <= 0) { + lnet_process_id_t ksnp_id = conn->ksnc_peer->ksnp_id; - if (rc == 0) - CDEBUG (D_NET, "[%p] EOF from %s" - " ip %d.%d.%d.%d:%d\n", conn, - libcfs_id2str(conn->ksnc_peer->ksnp_id), - HIPQUAD(conn->ksnc_ipaddr), - conn->ksnc_port); - else if (!conn->ksnc_closing) - CERROR ("[%p] Error %d on read from %s" - " ip %d.%d.%d.%d:%d\n", - conn, rc, - libcfs_id2str(conn->ksnc_peer->ksnp_id), - HIPQUAD(conn->ksnc_ipaddr), + LASSERT(rc != -EAGAIN); + + if (rc == 0) + CDEBUG(D_NET, "[%p] EOF from %s " + "ip %pI4h:%d\n", conn, + libcfs_id2str(ksnp_id), + &conn->ksnc_ipaddr, conn->ksnc_port); + else if (!conn->ksnc_closing) + CERROR("[%p] Error %d on read from %s " + "ip %pI4h:%d\n", conn, rc, + libcfs_id2str(ksnp_id), + &conn->ksnc_ipaddr, + conn->ksnc_port); /* it's not an error if conn is being closed */ ksocknal_close_conn_and_siblings (conn, @@ -1200,7 +1202,7 @@ ksocknal_process_receive (ksock_conn_t *conn) conn->ksnc_msg.ksm_zc_cookies[1]); if (rc != 0) { - CERROR("%s: Unknown ZC-ACK cookie: "LPU64", "LPU64"\n", + CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n", libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie, conn->ksnc_msg.ksm_zc_cookies[1]); ksocknal_new_packet(conn, 0); @@ -1215,12 +1217,12 @@ ksocknal_process_receive (ksock_conn_t *conn) } conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER; - conn->ksnc_rx_nob_wanted = sizeof(ksock_lnet_msg_t); - conn->ksnc_rx_nob_left = sizeof(ksock_lnet_msg_t); + conn->ksnc_rx_nob_wanted = sizeof(struct ksock_lnet_msg); + conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg); - conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space; + conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg; - conn->ksnc_rx_iov[0].iov_len = sizeof(ksock_lnet_msg_t); + conn->ksnc_rx_iov[0].iov_len = sizeof(struct ksock_lnet_msg); conn->ksnc_rx_niov = 1; conn->ksnc_rx_kiov = NULL; @@ -1233,7 +1235,7 @@ ksocknal_process_receive (ksock_conn_t *conn) conn->ksnc_proto->pro_unpack(&conn->ksnc_msg); if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) { - /* Userspace peer */ + /* Userspace peer_ni */ lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; id = &conn->ksnc_peer->ksnp_id; @@ -1317,7 +1319,7 @@ ksocknal_process_receive (ksock_conn_t *conn) int ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, - unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov, + unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { ksock_conn_t *conn = (ksock_conn_t *)private; @@ -1354,12 +1356,12 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, spin_lock_bh(&sched->kss_lock); - switch (conn->ksnc_rx_state) { - case SOCKNAL_RX_PARSE_WAIT: - cfs_list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns); - cfs_waitq_signal (&sched->kss_waitq); - LASSERT (conn->ksnc_rx_ready); - break; + switch (conn->ksnc_rx_state) { + case SOCKNAL_RX_PARSE_WAIT: + list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns); + wake_up(&sched->kss_waitq); + LASSERT(conn->ksnc_rx_ready); + break; case SOCKNAL_RX_PARSE: /* scheduler hasn't noticed I'm parsing yet */ @@ -1381,8 +1383,8 @@ ksocknal_sched_cansleep(ksock_sched_t *sched) spin_lock_bh(&sched->kss_lock); rc = (!ksocknal_data.ksnd_shuttingdown && - cfs_list_empty(&sched->kss_rx_conns) && - cfs_list_empty(&sched->kss_tx_conns)); + list_empty(&sched->kss_rx_conns) && + list_empty(&sched->kss_tx_conns)); spin_unlock_bh(&sched->kss_lock); return rc; @@ -1405,8 +1407,8 @@ int ksocknal_scheduler(void *arg) rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt); if (rc != 0) { - CERROR("Can't set CPT affinity to %d: %d\n", - info->ksi_cpt, rc); + CWARN("Can't set CPU partition affinity to %d: %d\n", + info->ksi_cpt, rc); } spin_lock_bh(&sched->kss_lock); @@ -1416,10 +1418,10 @@ int ksocknal_scheduler(void *arg) /* Ensure I progress everything semi-fairly */ - if (!cfs_list_empty (&sched->kss_rx_conns)) { - conn = cfs_list_entry(sched->kss_rx_conns.next, + if (!list_empty(&sched->kss_rx_conns)) { + conn = list_entry(sched->kss_rx_conns.next, ksock_conn_t, ksnc_rx_list); - cfs_list_del(&conn->ksnc_rx_list); + list_del(&conn->ksnc_rx_list); LASSERT(conn->ksnc_rx_scheduled); LASSERT(conn->ksnc_rx_ready); @@ -1449,7 +1451,7 @@ int ksocknal_scheduler(void *arg) conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT; } else if (conn->ksnc_rx_ready) { /* reschedule for rx */ - cfs_list_add_tail (&conn->ksnc_rx_list, + list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns); } else { conn->ksnc_rx_scheduled = 0; @@ -1460,31 +1462,31 @@ int ksocknal_scheduler(void *arg) did_something = 1; } - if (!cfs_list_empty (&sched->kss_tx_conns)) { - CFS_LIST_HEAD (zlist); + if (!list_empty(&sched->kss_tx_conns)) { + struct list_head zlist = LIST_HEAD_INIT(zlist); - if (!cfs_list_empty(&sched->kss_zombie_noop_txs)) { - cfs_list_add(&zlist, + if (!list_empty(&sched->kss_zombie_noop_txs)) { + list_add(&zlist, &sched->kss_zombie_noop_txs); - cfs_list_del_init(&sched->kss_zombie_noop_txs); + list_del_init(&sched->kss_zombie_noop_txs); } - conn = cfs_list_entry(sched->kss_tx_conns.next, + conn = list_entry(sched->kss_tx_conns.next, ksock_conn_t, ksnc_tx_list); - cfs_list_del (&conn->ksnc_tx_list); + list_del(&conn->ksnc_tx_list); LASSERT(conn->ksnc_tx_scheduled); LASSERT(conn->ksnc_tx_ready); - LASSERT(!cfs_list_empty(&conn->ksnc_tx_queue)); + LASSERT(!list_empty(&conn->ksnc_tx_queue)); - tx = cfs_list_entry(conn->ksnc_tx_queue.next, + tx = list_entry(conn->ksnc_tx_queue.next, ksock_tx_t, tx_list); if (conn->ksnc_tx_carrier == tx) ksocknal_next_tx_carrier(conn); /* dequeue now so empty list => more to send */ - cfs_list_del(&tx->tx_list); + list_del(&tx->tx_list); /* Clear tx_ready in case send isn't complete. Do * it BEFORE we call process_transmit, since @@ -1493,8 +1495,8 @@ int ksocknal_scheduler(void *arg) conn->ksnc_tx_ready = 0; spin_unlock_bh(&sched->kss_lock); - if (!cfs_list_empty(&zlist)) { - /* free zombie noop txs, it's fast because + if (!list_empty(&zlist)) { + /* free zombie noop txs, it's fast because * noop txs are just put in freelist */ ksocknal_txlist_done(NULL, &zlist, 0); } @@ -1504,7 +1506,7 @@ int ksocknal_scheduler(void *arg) if (rc == -ENOMEM || rc == -EAGAIN) { /* Incomplete send: replace tx on HEAD of tx_queue */ spin_lock_bh(&sched->kss_lock); - cfs_list_add(&tx->tx_list, + list_add(&tx->tx_list, &conn->ksnc_tx_queue); } else { /* Complete send; tx -ref */ @@ -1519,9 +1521,9 @@ int ksocknal_scheduler(void *arg) /* Do nothing; after a short timeout, this * conn will be reposted on kss_tx_conns. */ } else if (conn->ksnc_tx_ready && - !cfs_list_empty (&conn->ksnc_tx_queue)) { + !list_empty(&conn->ksnc_tx_queue)) { /* reschedule for tx */ - cfs_list_add_tail (&conn->ksnc_tx_list, + list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns); } else { conn->ksnc_tx_scheduled = 0; @@ -1542,9 +1544,9 @@ int ksocknal_scheduler(void *arg) sched->kss_waitq, !ksocknal_sched_cansleep(sched)); LASSERT (rc == 0); - } else { - cfs_cond_resched(); - } + } else { + cond_resched(); + } spin_lock_bh(&sched->kss_lock); } @@ -1568,17 +1570,17 @@ void ksocknal_read_callback (ksock_conn_t *conn) spin_lock_bh(&sched->kss_lock); - conn->ksnc_rx_ready = 1; + conn->ksnc_rx_ready = 1; - if (!conn->ksnc_rx_scheduled) { /* not being progressed */ - cfs_list_add_tail(&conn->ksnc_rx_list, - &sched->kss_rx_conns); - conn->ksnc_rx_scheduled = 1; - /* extra ref for scheduler */ - ksocknal_conn_addref(conn); + if (!conn->ksnc_rx_scheduled) { /* not being progressed */ + list_add_tail(&conn->ksnc_rx_list, + &sched->kss_rx_conns); + conn->ksnc_rx_scheduled = 1; + /* extra ref for scheduler */ + ksocknal_conn_addref(conn); - cfs_waitq_signal (&sched->kss_waitq); - } + wake_up (&sched->kss_waitq); + } spin_unlock_bh(&sched->kss_lock); EXIT; @@ -1588,7 +1590,7 @@ void ksocknal_read_callback (ksock_conn_t *conn) * Add connection to kss_tx_conns of scheduler * and wakeup the scheduler. */ -void ksocknal_write_callback (ksock_conn_t *conn) +void ksocknal_write_callback(ksock_conn_t *conn) { ksock_sched_t *sched; ENTRY; @@ -1597,26 +1599,25 @@ void ksocknal_write_callback (ksock_conn_t *conn) spin_lock_bh(&sched->kss_lock); - conn->ksnc_tx_ready = 1; + conn->ksnc_tx_ready = 1; - if (!conn->ksnc_tx_scheduled && // not being progressed - !cfs_list_empty(&conn->ksnc_tx_queue)){//packets to send - cfs_list_add_tail (&conn->ksnc_tx_list, - &sched->kss_tx_conns); - conn->ksnc_tx_scheduled = 1; - /* extra ref for scheduler */ - ksocknal_conn_addref(conn); + if (!conn->ksnc_tx_scheduled && /* not being progressed */ + !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */ + list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns); + conn->ksnc_tx_scheduled = 1; + /* extra ref for scheduler */ + ksocknal_conn_addref(conn); - cfs_waitq_signal (&sched->kss_waitq); - } + wake_up(&sched->kss_waitq); + } spin_unlock_bh(&sched->kss_lock); EXIT; } -ksock_proto_t * -ksocknal_parse_proto_version (ksock_hello_msg_t *hello) +static ksock_proto_t * +ksocknal_parse_proto_version (struct ksock_hello_msg *hello) { __u32 version = 0; @@ -1647,7 +1648,7 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello) lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello; CLASSERT (sizeof (lnet_magicversion_t) == - offsetof (ksock_hello_msg_t, kshm_src_nid)); + offsetof (struct ksock_hello_msg, kshm_src_nid)); if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) && hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR)) @@ -1659,7 +1660,7 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello) int ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn, - lnet_nid_t peer_nid, ksock_hello_msg_t *hello) + lnet_nid_t peer_nid, struct ksock_hello_msg *hello) { /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */ ksock_net_t *net = (ksock_net_t *)ni->ni_data; @@ -1679,7 +1680,7 @@ ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn, return conn->ksnc_proto->pro_send_hello(conn, hello); } -int +static int ksocknal_invert_type(int type) { switch (type) @@ -1697,16 +1698,16 @@ ksocknal_invert_type(int type) } int -ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, - ksock_hello_msg_t *hello, lnet_process_id_t *peerid, - __u64 *incarnation) +ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn, + struct ksock_hello_msg *hello, lnet_process_id_t *peerid, + __u64 *incarnation) { /* Return < 0 fatal error * 0 success * EALREADY lost connection race * EPROTO protocol version mismatch */ - cfs_socket_t *sock = conn->ksnc_sock; + struct socket *sock = conn->ksnc_sock; int active = (conn->ksnc_proto != NULL); int timeout; int proto_match; @@ -1714,16 +1715,17 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, ksock_proto_t *proto; lnet_process_id_t recv_id; - /* socket type set on active connections - not set on passive */ - LASSERT (!active == !(conn->ksnc_type != SOCKLND_CONN_NONE)); + /* socket type set on active connections - not set on passive */ + LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE)); - timeout = active ? *ksocknal_tunables.ksnd_timeout : - lnet_acceptor_timeout(); + timeout = active ? *ksocknal_tunables.ksnd_timeout : + lnet_acceptor_timeout(); - rc = libcfs_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout); + rc = lnet_sock_read(sock, &hello->kshm_magic, + sizeof(hello->kshm_magic), timeout); if (rc != 0) { - CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n", - rc, HIPQUAD(conn->ksnc_ipaddr)); + CERROR("Error %d reading HELLO from %pI4h\n", + rc, &conn->ksnc_ipaddr); LASSERT (rc < 0); return rc; } @@ -1733,25 +1735,24 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) { /* Unexpected magic! */ CERROR ("Bad magic(1) %#08x (%#08x expected) from " - "%u.%u.%u.%u\n", __cpu_to_le32 (hello->kshm_magic), - LNET_PROTO_TCP_MAGIC, - HIPQUAD(conn->ksnc_ipaddr)); + "%pI4h\n", __cpu_to_le32 (hello->kshm_magic), + LNET_PROTO_TCP_MAGIC, &conn->ksnc_ipaddr); return -EPROTO; } - rc = libcfs_sock_read(sock, &hello->kshm_version, - sizeof(hello->kshm_version), timeout); + rc = lnet_sock_read(sock, &hello->kshm_version, + sizeof(hello->kshm_version), timeout); if (rc != 0) { - CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n", - rc, HIPQUAD(conn->ksnc_ipaddr)); - LASSERT (rc < 0); + CERROR("Error %d reading HELLO from %pI4h\n", + rc, &conn->ksnc_ipaddr); + LASSERT(rc < 0); return rc; } proto = ksocknal_parse_proto_version(hello); if (proto == NULL) { if (!active) { - /* unknown protocol from peer, tell peer my protocol */ + /* unknown protocol from peer_ni, tell peer_ni my protocol */ conn->ksnc_proto = &ksocknal_protocol_v3x; #if SOCKNAL_VERSION_DEBUG if (*ksocknal_tunables.ksnd_protocol == 2) @@ -1763,10 +1764,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, ksocknal_send_hello(ni, conn, ni->ni_nid, hello); } - CERROR ("Unknown protocol version (%d.x expected)" - " from %u.%u.%u.%u\n", - conn->ksnc_proto->pro_version, - HIPQUAD(conn->ksnc_ipaddr)); + CERROR("Unknown protocol version (%d.x expected) from %pI4h\n", + conn->ksnc_proto->pro_version, &conn->ksnc_ipaddr); return -EPROTO; } @@ -1777,8 +1776,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, /* receive the rest of hello message anyway */ rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout); if (rc != 0) { - CERROR("Error %d reading or checking hello from from %u.%u.%u.%u\n", - rc, HIPQUAD(conn->ksnc_ipaddr)); + CERROR("Error %d reading or checking hello from from %pI4h\n", + rc, &conn->ksnc_ipaddr); LASSERT (rc < 0); return rc; } @@ -1787,13 +1786,13 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, if (hello->kshm_src_nid == LNET_NID_ANY) { CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY" - "from %u.%u.%u.%u\n", HIPQUAD(conn->ksnc_ipaddr)); + "from %pI4h\n", &conn->ksnc_ipaddr); return -EPROTO; } if (!active && conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) { - /* Userspace NAL assigns peer process ID from socket */ + /* Userspace NAL assigns peer_ni process ID from socket */ recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG; recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr); } else { @@ -1804,26 +1803,25 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, if (!active) { *peerid = recv_id; - /* peer determines type */ - conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype); - if (conn->ksnc_type == SOCKLND_CONN_NONE) { - CERROR ("Unexpected type %d from %s ip %u.%u.%u.%u\n", - hello->kshm_ctype, libcfs_id2str(*peerid), - HIPQUAD(conn->ksnc_ipaddr)); - return -EPROTO; - } - - return 0; - } + /* peer_ni determines type */ + conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype); + if (conn->ksnc_type == SOCKLND_CONN_NONE) { + CERROR("Unexpected type %d from %s ip %pI4h\n", + hello->kshm_ctype, libcfs_id2str(*peerid), + &conn->ksnc_ipaddr); + return -EPROTO; + } + return 0; + } if (peerid->pid != recv_id.pid || peerid->nid != recv_id.nid) { LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host" - " %u.%u.%u.%u, but they claimed they were " + " %pI4h, but they claimed they were " "%s; please check your Lustre " "configuration.\n", libcfs_id2str(*peerid), - HIPQUAD(conn->ksnc_ipaddr), + &conn->ksnc_ipaddr, libcfs_id2str(recv_id)); return -EPROTO; } @@ -1833,25 +1831,24 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, return proto_match ? EALREADY : EPROTO; } - if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) { - CERROR ("Mismatched types: me %d, %s ip %u.%u.%u.%u %d\n", - conn->ksnc_type, libcfs_id2str(*peerid), - HIPQUAD(conn->ksnc_ipaddr), - hello->kshm_ctype); - return -EPROTO; - } - - return 0; + if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) { + CERROR("Mismatched types: me %d, %s ip %pI4h %d\n", + conn->ksnc_type, libcfs_id2str(*peerid), + &conn->ksnc_ipaddr, + hello->kshm_ctype); + return -EPROTO; + } + return 0; } -int +static int ksocknal_connect (ksock_route_t *route) { - CFS_LIST_HEAD (zombies); - ksock_peer_t *peer = route->ksnr_peer; + struct list_head zombies = LIST_HEAD_INIT(zombies); + ksock_peer_ni_t *peer_ni = route->ksnr_peer; int type; int wanted; - cfs_socket_t *sock; + struct socket *sock; cfs_time_t deadline; int retry_later = 0; int rc = 0; @@ -1869,19 +1866,19 @@ ksocknal_connect (ksock_route_t *route) for (;;) { wanted = ksocknal_route_mask() & ~route->ksnr_connected; - /* stop connecting if peer/route got closed under me, or + /* stop connecting if peer_ni/route got closed under me, or * route got connected while queued */ - if (peer->ksnp_closing || route->ksnr_deleted || + if (peer_ni->ksnp_closing || route->ksnr_deleted || wanted == 0) { retry_later = 0; break; } - /* reschedule if peer is connecting to me */ - if (peer->ksnp_accepting > 0) { + /* reschedule if peer_ni is connecting to me */ + if (peer_ni->ksnp_accepting > 0) { CDEBUG(D_NET, - "peer %s(%d) already connecting to me, retry later.\n", - libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting); + "peer_ni %s(%d) already connecting to me, retry later.\n", + libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting); retry_later = 1; } @@ -1903,21 +1900,21 @@ ksocknal_connect (ksock_route_t *route) if (cfs_time_aftereq(cfs_time_current(), deadline)) { rc = -ETIMEDOUT; - lnet_connect_console_error(rc, peer->ksnp_id.nid, + lnet_connect_console_error(rc, peer_ni->ksnp_id.nid, route->ksnr_ipaddr, route->ksnr_port); goto failed; } - rc = lnet_connect(&sock, peer->ksnp_id.nid, + rc = lnet_connect(&sock, peer_ni->ksnp_id.nid, route->ksnr_myipaddr, route->ksnr_ipaddr, route->ksnr_port); if (rc != 0) goto failed; - rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type); + rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type); if (rc < 0) { - lnet_connect_console_error(rc, peer->ksnp_id.nid, + lnet_connect_console_error(rc, peer_ni->ksnp_id.nid, route->ksnr_ipaddr, route->ksnr_port); goto failed; @@ -1927,8 +1924,8 @@ ksocknal_connect (ksock_route_t *route) * race or I have to renegotiate protocol version */ retry_later = (rc != 0); if (retry_later) - CDEBUG(D_NET, "peer %s: conn race, retry later.\n", - libcfs_nid2str(peer->ksnp_id.nid)); + CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n", + libcfs_nid2str(peer_ni->ksnp_id.nid)); write_lock_bh(&ksocknal_data.ksnd_global_lock); } @@ -1938,10 +1935,10 @@ ksocknal_connect (ksock_route_t *route) if (retry_later) { /* re-queue for attention; this frees me up to handle - * the peer's incoming connection request */ + * the peer_ni's incoming connection request */ if (rc == EALREADY || - (rc == 0 && peer->ksnp_accepting > 0)) { + (rc == 0 && peer_ni->ksnp_accepting > 0)) { /* We want to introduce a delay before next * attempt to connect if we lost conn race, * but the race is resolved quickly usually, @@ -1977,35 +1974,28 @@ ksocknal_connect (ksock_route_t *route) route->ksnr_timeout = cfs_time_add(cfs_time_current(), route->ksnr_retry_interval); - if (!cfs_list_empty(&peer->ksnp_tx_queue) && - peer->ksnp_accepting == 0 && - ksocknal_find_connecting_route_locked(peer) == NULL) { + if (!list_empty(&peer_ni->ksnp_tx_queue) && + peer_ni->ksnp_accepting == 0 && + ksocknal_find_connecting_route_locked(peer_ni) == NULL) { ksock_conn_t *conn; /* ksnp_tx_queue is queued on a conn on successful * connection for V1.x and V2.x */ - if (!cfs_list_empty (&peer->ksnp_conns)) { - conn = cfs_list_entry(peer->ksnp_conns.next, + if (!list_empty(&peer_ni->ksnp_conns)) { + conn = list_entry(peer_ni->ksnp_conns.next, ksock_conn_t, ksnc_list); LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x); } /* take all the blocked packets while I've got the lock and * complete below... */ - cfs_list_splice_init(&peer->ksnp_tx_queue, &zombies); + list_splice_init(&peer_ni->ksnp_tx_queue, &zombies); } -#if 0 /* irrelevent with only eager routes */ - if (!route->ksnr_deleted) { - /* make this route least-favourite for re-selection */ - cfs_list_del(&route->ksnr_list); - cfs_list_add_tail(&route->ksnr_list, &peer->ksnp_routes); - } -#endif write_unlock_bh(&ksocknal_data.ksnd_global_lock); - ksocknal_peer_failed(peer); - ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1); + ksocknal_peer_failed(peer_ni); + ksocknal_txlist_done(peer_ni->ksnp_ni, &zombies, 1); return 0; } @@ -2016,7 +2006,7 @@ ksocknal_connect (ksock_route_t *route) * running out of resource. */ static int -ksocknal_connd_check_start(long sec, long *timeout) +ksocknal_connd_check_start(time64_t sec, long *timeout) { char name[16]; int rc; @@ -2066,7 +2056,7 @@ ksocknal_connd_check_start(long sec, long *timeout) /* we tried ... */ LASSERT(ksocknal_data.ksnd_connd_starting > 0); ksocknal_data.ksnd_connd_starting--; - ksocknal_data.ksnd_connd_failed_stamp = cfs_time_current_sec(); + ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds(); return 1; } @@ -2078,7 +2068,7 @@ ksocknal_connd_check_start(long sec, long *timeout) * again to recheck these conditions. */ static int -ksocknal_connd_check_stop(long sec, long *timeout) +ksocknal_connd_check_stop(time64_t sec, long *timeout) { int val; @@ -2117,69 +2107,69 @@ ksocknal_connd_check_stop(long sec, long *timeout) static ksock_route_t * ksocknal_connd_get_route_locked(signed long *timeout_p) { - ksock_route_t *route; - cfs_time_t now; + ksock_route_t *route; + cfs_time_t now; - now = cfs_time_current(); + now = cfs_time_current(); - /* connd_routes can contain both pending and ordinary routes */ - cfs_list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes, - ksnr_connd_list) { + /* connd_routes can contain both pending and ordinary routes */ + list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes, + ksnr_connd_list) { - if (route->ksnr_retry_interval == 0 || - cfs_time_aftereq(now, route->ksnr_timeout)) - return route; + if (route->ksnr_retry_interval == 0 || + cfs_time_aftereq(now, route->ksnr_timeout)) + return route; - if (*timeout_p == CFS_MAX_SCHEDULE_TIMEOUT || - (int)*timeout_p > (int)(route->ksnr_timeout - now)) - *timeout_p = (int)(route->ksnr_timeout - now); - } + if (*timeout_p == MAX_SCHEDULE_TIMEOUT || + (int)*timeout_p > (int)(route->ksnr_timeout - now)) + *timeout_p = (int)(route->ksnr_timeout - now); + } - return NULL; + return NULL; } int ksocknal_connd (void *arg) { spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; - ksock_connreq_t *cr; - cfs_waitlink_t wait; - int nloops = 0; - int cons_retry = 0; + ksock_connreq_t *cr; + wait_queue_t wait; + int nloops = 0; + int cons_retry = 0; - cfs_block_allsigs (); + cfs_block_allsigs(); - cfs_waitlink_init (&wait); + init_waitqueue_entry(&wait, current); spin_lock_bh(connd_lock); - LASSERT(ksocknal_data.ksnd_connd_starting > 0); - ksocknal_data.ksnd_connd_starting--; - ksocknal_data.ksnd_connd_running++; + LASSERT(ksocknal_data.ksnd_connd_starting > 0); + ksocknal_data.ksnd_connd_starting--; + ksocknal_data.ksnd_connd_running++; - while (!ksocknal_data.ksnd_shuttingdown) { - ksock_route_t *route = NULL; - long sec = cfs_time_current_sec(); - long timeout = CFS_MAX_SCHEDULE_TIMEOUT; - int dropped_lock = 0; - - if (ksocknal_connd_check_stop(sec, &timeout)) { - /* wakeup another one to check stop */ - cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq); - break; - } + while (!ksocknal_data.ksnd_shuttingdown) { + ksock_route_t *route = NULL; + time64_t sec = ktime_get_real_seconds(); + long timeout = MAX_SCHEDULE_TIMEOUT; + int dropped_lock = 0; + + if (ksocknal_connd_check_stop(sec, &timeout)) { + /* wakeup another one to check stop */ + wake_up(&ksocknal_data.ksnd_connd_waitq); + break; + } if (ksocknal_connd_check_start(sec, &timeout)) { /* created new thread */ dropped_lock = 1; } - if (!cfs_list_empty(&ksocknal_data.ksnd_connd_connreqs)) { + if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) { /* Connection accepted by the listener */ - cr = cfs_list_entry(ksocknal_data.ksnd_connd_connreqs. \ + cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \ next, ksock_connreq_t, ksncr_list); - cfs_list_del(&cr->ksncr_list); + list_del(&cr->ksncr_list); spin_unlock_bh(connd_lock); dropped_lock = 1; @@ -2199,7 +2189,7 @@ ksocknal_connd (void *arg) route = ksocknal_connd_get_route_locked(&timeout); } if (route != NULL) { - cfs_list_del (&route->ksnr_connd_list); + list_del(&route->ksnr_connd_list); ksocknal_data.ksnd_connd_connecting++; spin_unlock_bh(connd_lock); dropped_lock = 1; @@ -2208,8 +2198,8 @@ ksocknal_connd (void *arg) /* consecutive retry */ if (cons_retry++ > SOCKNAL_INSANITY_RECONN) { CWARN("massive consecutive " - "re-connecting to %u.%u.%u.%u\n", - HIPQUAD(route->ksnr_ipaddr)); + "re-connecting to %pI4h\n", + &route->ksnr_ipaddr); cons_retry = 0; } } else { @@ -2227,21 +2217,21 @@ ksocknal_connd (void *arg) continue; spin_unlock_bh(connd_lock); nloops = 0; - cfs_cond_resched(); + cond_resched(); spin_lock_bh(connd_lock); continue; } /* Nothing to do for 'timeout' */ - cfs_set_current_state(CFS_TASK_INTERRUPTIBLE); - cfs_waitq_add_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait); + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait); spin_unlock_bh(connd_lock); nloops = 0; - cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout); + schedule_timeout(timeout); - cfs_set_current_state(CFS_TASK_RUNNING); - cfs_waitq_del(&ksocknal_data.ksnd_connd_waitq, &wait); + set_current_state(TASK_RUNNING); + remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait); spin_lock_bh(connd_lock); } ksocknal_data.ksnd_connd_running--; @@ -2251,49 +2241,47 @@ ksocknal_connd (void *arg) return 0; } -ksock_conn_t * -ksocknal_find_timed_out_conn (ksock_peer_t *peer) +static ksock_conn_t * +ksocknal_find_timed_out_conn (ksock_peer_ni_t *peer_ni) { /* We're called with a shared lock on ksnd_global_lock */ ksock_conn_t *conn; - cfs_list_t *ctmp; + struct list_head *ctmp; - cfs_list_for_each (ctmp, &peer->ksnp_conns) { + list_for_each(ctmp, &peer_ni->ksnp_conns) { int error; - conn = cfs_list_entry (ctmp, ksock_conn_t, ksnc_list); + conn = list_entry(ctmp, ksock_conn_t, ksnc_list); /* Don't need the {get,put}connsock dance to deref ksnc_sock */ LASSERT (!conn->ksnc_closing); - /* SOCK_ERROR will reset error code of socket in - * some platform (like Darwin8.x) */ - error = libcfs_sock_error(conn->ksnc_sock); + error = conn->ksnc_sock->sk->sk_err; if (error != 0) { ksocknal_conn_addref(conn); switch (error) { case ECONNRESET: CNETERR("A connection with %s " - "(%u.%u.%u.%u:%d) was reset; " + "(%pI4h:%d) was reset; " "it may have rebooted.\n", - libcfs_id2str(peer->ksnp_id), - HIPQUAD(conn->ksnc_ipaddr), + libcfs_id2str(peer_ni->ksnp_id), + &conn->ksnc_ipaddr, conn->ksnc_port); break; case ETIMEDOUT: CNETERR("A connection with %s " - "(%u.%u.%u.%u:%d) timed out; the " + "(%pI4h:%d) timed out; the " "network or node may be down.\n", - libcfs_id2str(peer->ksnp_id), - HIPQUAD(conn->ksnc_ipaddr), + libcfs_id2str(peer_ni->ksnp_id), + &conn->ksnc_ipaddr, conn->ksnc_port); break; default: CNETERR("An unexpected network error %d " "occurred with %s " - "(%u.%u.%u.%u:%d\n", error, - libcfs_id2str(peer->ksnp_id), - HIPQUAD(conn->ksnc_ipaddr), + "(%pI4h:%d\n", error, + libcfs_id2str(peer_ni->ksnp_id), + &conn->ksnc_ipaddr, conn->ksnc_port); break; } @@ -2306,10 +2294,10 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) conn->ksnc_rx_deadline)) { /* Timed out incomplete incoming message */ ksocknal_conn_addref(conn); - CNETERR("Timeout receiving from %s (%u.%u.%u.%u:%d), " + CNETERR("Timeout receiving from %s (%pI4h:%d), " "state %d wanted %d left %d\n", - libcfs_id2str(peer->ksnp_id), - HIPQUAD(conn->ksnc_ipaddr), + libcfs_id2str(peer_ni->ksnp_id), + &conn->ksnc_ipaddr, conn->ksnc_port, conn->ksnc_rx_state, conn->ksnc_rx_nob_wanted, @@ -2317,18 +2305,17 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) return (conn); } - if ((!cfs_list_empty(&conn->ksnc_tx_queue) || - libcfs_sock_wmem_queued(conn->ksnc_sock) != 0) && + if ((!list_empty(&conn->ksnc_tx_queue) || + conn->ksnc_sock->sk->sk_wmem_queued != 0) && cfs_time_aftereq(cfs_time_current(), conn->ksnc_tx_deadline)) { /* Timed out messages queued for sending or * buffered in the socket's send buffer */ ksocknal_conn_addref(conn); - CNETERR("Timeout sending data to %s (%u.%u.%u.%u:%d) " + CNETERR("Timeout sending data to %s (%pI4h:%d) " "the network or that node may be down.\n", - libcfs_id2str(peer->ksnp_id), - HIPQUAD(conn->ksnc_ipaddr), - conn->ksnc_port); + libcfs_id2str(peer_ni->ksnp_id), + &conn->ksnc_ipaddr, conn->ksnc_port); return (conn); } } @@ -2337,63 +2324,65 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) } static inline void -ksocknal_flush_stale_txs(ksock_peer_t *peer) +ksocknal_flush_stale_txs(ksock_peer_ni_t *peer_ni) { ksock_tx_t *tx; - CFS_LIST_HEAD (stale_txs); + struct list_head stale_txs = LIST_HEAD_INIT(stale_txs); write_lock_bh(&ksocknal_data.ksnd_global_lock); - while (!cfs_list_empty (&peer->ksnp_tx_queue)) { - tx = cfs_list_entry (peer->ksnp_tx_queue.next, + while (!list_empty(&peer_ni->ksnp_tx_queue)) { + tx = list_entry(peer_ni->ksnp_tx_queue.next, ksock_tx_t, tx_list); if (!cfs_time_aftereq(cfs_time_current(), tx->tx_deadline)) break; - cfs_list_del (&tx->tx_list); - cfs_list_add_tail (&tx->tx_list, &stale_txs); + list_del(&tx->tx_list); + list_add_tail(&tx->tx_list, &stale_txs); } write_unlock_bh(&ksocknal_data.ksnd_global_lock); - ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1); + ksocknal_txlist_done(peer_ni->ksnp_ni, &stale_txs, 1); } -int -ksocknal_send_keepalive_locked(ksock_peer_t *peer) +static int +ksocknal_send_keepalive_locked(ksock_peer_ni_t *peer_ni) +__must_hold(&ksocknal_data.ksnd_global_lock) { ksock_sched_t *sched; ksock_conn_t *conn; ksock_tx_t *tx; - if (cfs_list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */ + /* last_alive will be updated by create_conn */ + if (list_empty(&peer_ni->ksnp_conns)) return 0; - if (peer->ksnp_proto != &ksocknal_protocol_v3x) + if (peer_ni->ksnp_proto != &ksocknal_protocol_v3x) return 0; if (*ksocknal_tunables.ksnd_keepalive <= 0 || cfs_time_before(cfs_time_current(), - cfs_time_add(peer->ksnp_last_alive, + cfs_time_add(peer_ni->ksnp_last_alive, cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive)))) return 0; if (cfs_time_before(cfs_time_current(), - peer->ksnp_send_keepalive)) + peer_ni->ksnp_send_keepalive)) return 0; /* retry 10 secs later, so we wouldn't put pressure - * on this peer if we failed to send keepalive this time */ - peer->ksnp_send_keepalive = cfs_time_shift(10); + * on this peer_ni if we failed to send keepalive this time */ + peer_ni->ksnp_send_keepalive = cfs_time_shift(10); - conn = ksocknal_find_conn_locked(peer, NULL, 1); + conn = ksocknal_find_conn_locked(peer_ni, NULL, 1); if (conn != NULL) { sched = conn->ksnc_scheduler; spin_lock_bh(&sched->kss_lock); - if (!cfs_list_empty(&conn->ksnc_tx_queue)) { + if (!list_empty(&conn->ksnc_tx_queue)) { spin_unlock_bh(&sched->kss_lock); /* there is an queued ACK, don't need keepalive */ return 0; @@ -2411,7 +2400,7 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer) return -ENOMEM; } - if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) { + if (ksocknal_launch_packet(peer_ni->ksnp_ni, tx, peer_ni->ksnp_id) == 0) { read_lock(&ksocknal_data.ksnd_global_lock); return 1; } @@ -2423,11 +2412,11 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer) } -void +static void ksocknal_check_peer_timeouts (int idx) { - cfs_list_t *peers = &ksocknal_data.ksnd_peers[idx]; - ksock_peer_t *peer; + struct list_head *peers = &ksocknal_data.ksnd_peers[idx]; + ksock_peer_ni_t *peer_ni; ksock_conn_t *conn; ksock_tx_t *tx; @@ -2437,17 +2426,18 @@ ksocknal_check_peer_timeouts (int idx) * take a look... */ read_lock(&ksocknal_data.ksnd_global_lock); - cfs_list_for_each_entry_typed(peer, peers, ksock_peer_t, ksnp_list) { - cfs_time_t deadline = 0; - int resid = 0; - int n = 0; + list_for_each_entry(peer_ni, peers, ksnp_list) { + ksock_tx_t *tx_stale; + cfs_time_t deadline = 0; + int resid = 0; + int n = 0; - if (ksocknal_send_keepalive_locked(peer) != 0) { + if (ksocknal_send_keepalive_locked(peer_ni) != 0) { read_unlock(&ksocknal_data.ksnd_global_lock); goto again; } - conn = ksocknal_find_timed_out_conn (peer); + conn = ksocknal_find_timed_out_conn (peer_ni); if (conn != NULL) { read_unlock(&ksocknal_data.ksnd_global_lock); @@ -2455,7 +2445,7 @@ ksocknal_check_peer_timeouts (int idx) ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT); /* NB we won't find this one again, but we can't - * just proceed with the next peer, since we dropped + * just proceed with the next peer_ni, since we dropped * ksnd_global_lock and it might be dead already! */ ksocknal_conn_decref(conn); goto again; @@ -2463,30 +2453,30 @@ ksocknal_check_peer_timeouts (int idx) /* we can't process stale txs right here because we're * holding only shared lock */ - if (!cfs_list_empty (&peer->ksnp_tx_queue)) { + if (!list_empty(&peer_ni->ksnp_tx_queue)) { ksock_tx_t *tx = - cfs_list_entry (peer->ksnp_tx_queue.next, + list_entry(peer_ni->ksnp_tx_queue.next, ksock_tx_t, tx_list); if (cfs_time_aftereq(cfs_time_current(), tx->tx_deadline)) { - ksocknal_peer_addref(peer); + ksocknal_peer_addref(peer_ni); read_unlock(&ksocknal_data.ksnd_global_lock); - ksocknal_flush_stale_txs(peer); + ksocknal_flush_stale_txs(peer_ni); - ksocknal_peer_decref(peer); + ksocknal_peer_decref(peer_ni); goto again; } } - if (cfs_list_empty(&peer->ksnp_zc_req_list)) + if (list_empty(&peer_ni->ksnp_zc_req_list)) continue; - spin_lock(&peer->ksnp_lock); - cfs_list_for_each_entry_typed(tx, &peer->ksnp_zc_req_list, - ksock_tx_t, tx_zc_list) { + tx_stale = NULL; + spin_lock(&peer_ni->ksnp_lock); + list_for_each_entry(tx, &peer_ni->ksnp_zc_req_list, tx_zc_list) { if (!cfs_time_aftereq(cfs_time_current(), tx->tx_deadline)) break; @@ -2494,29 +2484,29 @@ ksocknal_check_peer_timeouts (int idx) if (tx->tx_conn->ksnc_closing) continue; n++; + if (tx_stale == NULL) + tx_stale = tx; } - if (n == 0) { - spin_unlock(&peer->ksnp_lock); - continue; - } + if (tx_stale == NULL) { + spin_unlock(&peer_ni->ksnp_lock); + continue; + } - tx = cfs_list_entry(peer->ksnp_zc_req_list.next, - ksock_tx_t, tx_zc_list); - deadline = tx->tx_deadline; - resid = tx->tx_resid; - conn = tx->tx_conn; - ksocknal_conn_addref(conn); + deadline = tx_stale->tx_deadline; + resid = tx_stale->tx_resid; + conn = tx_stale->tx_conn; + ksocknal_conn_addref(conn); - spin_unlock(&peer->ksnp_lock); + spin_unlock(&peer_ni->ksnp_lock); read_unlock(&ksocknal_data.ksnd_global_lock); - CERROR("Total %d stale ZC_REQs for peer %s detected; the " - "oldest(%p) timed out %ld secs ago, " - "resid: %d, wmem: %d\n", - n, libcfs_nid2str(peer->ksnp_id.nid), tx, - cfs_duration_sec(cfs_time_current() - deadline), - resid, libcfs_sock_wmem_queued(conn->ksnc_sock)); + CERROR("Total %d stale ZC_REQs for peer_ni %s detected; the " + "oldest(%p) timed out %ld secs ago, " + "resid: %d, wmem: %d\n", + n, libcfs_nid2str(peer_ni->ksnp_id.nid), tx_stale, + cfs_duration_sec(cfs_time_current() - deadline), + resid, conn->ksnc_sock->sk->sk_wmem_queued); ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT); ksocknal_conn_decref(conn); @@ -2526,13 +2516,12 @@ ksocknal_check_peer_timeouts (int idx) read_unlock(&ksocknal_data.ksnd_global_lock); } -int -ksocknal_reaper (void *arg) +int ksocknal_reaper(void *arg) { - cfs_waitlink_t wait; - ksock_conn_t *conn; - ksock_sched_t *sched; - cfs_list_t enomem_conns; + wait_queue_t wait; + ksock_conn_t *conn; + ksock_sched_t *sched; + struct list_head enomem_conns; int nenomem_conns; cfs_duration_t timeout; int i; @@ -2541,18 +2530,18 @@ ksocknal_reaper (void *arg) cfs_block_allsigs (); - CFS_INIT_LIST_HEAD(&enomem_conns); - cfs_waitlink_init (&wait); + INIT_LIST_HEAD(&enomem_conns); + init_waitqueue_entry(&wait, current); spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); while (!ksocknal_data.ksnd_shuttingdown) { - if (!cfs_list_empty (&ksocknal_data.ksnd_deathrow_conns)) { - conn = cfs_list_entry (ksocknal_data. \ + if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) { + conn = list_entry(ksocknal_data. \ ksnd_deathrow_conns.next, ksock_conn_t, ksnc_list); - cfs_list_del (&conn->ksnc_list); + list_del(&conn->ksnc_list); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -2563,10 +2552,10 @@ ksocknal_reaper (void *arg) continue; } - if (!cfs_list_empty (&ksocknal_data.ksnd_zombie_conns)) { - conn = cfs_list_entry (ksocknal_data.ksnd_zombie_conns.\ + if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) { + conn = list_entry(ksocknal_data.ksnd_zombie_conns.\ next, ksock_conn_t, ksnc_list); - cfs_list_del (&conn->ksnc_list); + list_del(&conn->ksnc_list); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -2576,20 +2565,20 @@ ksocknal_reaper (void *arg) continue; } - if (!cfs_list_empty (&ksocknal_data.ksnd_enomem_conns)) { - cfs_list_add(&enomem_conns, + if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) { + list_add(&enomem_conns, &ksocknal_data.ksnd_enomem_conns); - cfs_list_del_init(&ksocknal_data.ksnd_enomem_conns); + list_del_init(&ksocknal_data.ksnd_enomem_conns); } spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); /* reschedule all the connections that stalled with ENOMEM... */ nenomem_conns = 0; - while (!cfs_list_empty (&enomem_conns)) { - conn = cfs_list_entry (enomem_conns.next, + while (!list_empty(&enomem_conns)) { + conn = list_entry(enomem_conns.next, ksock_conn_t, ksnc_tx_list); - cfs_list_del (&conn->ksnc_tx_list); + list_del(&conn->ksnc_tx_list); sched = conn->ksnc_scheduler; @@ -2597,9 +2586,9 @@ ksocknal_reaper (void *arg) LASSERT(conn->ksnc_tx_scheduled); conn->ksnc_tx_ready = 1; - cfs_list_add_tail(&conn->ksnc_tx_list, + list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns); - cfs_waitq_signal(&sched->kss_waitq); + wake_up(&sched->kss_waitq); spin_unlock_bh(&sched->kss_lock); nenomem_conns++; @@ -2613,7 +2602,7 @@ ksocknal_reaper (void *arg) int chunk = ksocknal_data.ksnd_peer_hash_size; /* Time to check for timeouts on a few more peers: I do - * checks every 'p' seconds on a proportion of the peer + * checks every 'p' seconds on a proportion of the peer_ni * table and I need to check every connection 'n' times * within a timeout interval, to ensure I detect a * timeout on any connection within (n+1)/n times the @@ -2643,17 +2632,16 @@ ksocknal_reaper (void *arg) ksocknal_data.ksnd_reaper_waketime = cfs_time_add(cfs_time_current(), timeout); - cfs_set_current_state (CFS_TASK_INTERRUPTIBLE); - cfs_waitq_add (&ksocknal_data.ksnd_reaper_waitq, &wait); + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait); - if (!ksocknal_data.ksnd_shuttingdown && - cfs_list_empty (&ksocknal_data.ksnd_deathrow_conns) && - cfs_list_empty (&ksocknal_data.ksnd_zombie_conns)) - cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE, - timeout); + if (!ksocknal_data.ksnd_shuttingdown && + list_empty(&ksocknal_data.ksnd_deathrow_conns) && + list_empty(&ksocknal_data.ksnd_zombie_conns)) + schedule_timeout(timeout); - cfs_set_current_state (CFS_TASK_RUNNING); - cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait); + set_current_state(TASK_RUNNING); + remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait); spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); }