X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fklnds%2Fsocklnd%2Fsocklnd_cb.c;h=ec3f9525ca28eb0d71dd84902b7e4bc5324ed644;hp=f7b39b707d3f566b30c742560d1a2fae533db367;hb=9976d2c35d40a1709c3539aed76033124e88040e;hpb=546993d587c5fc380e9745eae98f863e02e68575 diff --git a/lnet/klnds/socklnd/socklnd_cb.c b/lnet/klnds/socklnd/socklnd_cb.c index f7b39b7..ec3f952 100644 --- a/lnet/klnds/socklnd/socklnd_cb.c +++ b/lnet/klnds/socklnd/socklnd_cb.c @@ -24,7 +24,9 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include #include "socklnd.h" +#include struct ksock_tx * ksocknal_alloc_tx(int type, int size) @@ -37,9 +39,9 @@ ksocknal_alloc_tx(int type, int size) /* searching for a noop tx in free list */ spin_lock(&ksocknal_data.ksnd_tx_lock); - if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) { - tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next, - struct ksock_tx, tx_list); + tx = list_first_entry_or_null(&ksocknal_data.ksnd_idle_noop_txs, + struct ksock_tx, tx_list); + if (tx) { LASSERT(tx->tx_desc_size == size); list_del(&tx->tx_list); } @@ -53,7 +55,7 @@ ksocknal_alloc_tx(int type, int size) if (tx == NULL) return NULL; - atomic_set(&tx->tx_refcount, 1); + refcount_set(&tx->tx_refcount, 1); tx->tx_zc_aborted = 0; tx->tx_zc_capable = 0; tx->tx_zc_checked = 0; @@ -70,26 +72,25 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk) { struct ksock_tx *tx; - tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE); - if (tx == NULL) { - CERROR("Can't allocate noop tx desc\n"); - return NULL; - } + tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE); + if (tx == NULL) { + CERROR("Can't allocate noop tx desc\n"); + return NULL; + } - tx->tx_conn = NULL; - tx->tx_lnetmsg = NULL; - tx->tx_kiov = NULL; - tx->tx_nkiov = 0; - tx->tx_iov = tx->tx_frags.virt.iov; - tx->tx_niov = 1; - tx->tx_nonblk = nonblk; + tx->tx_conn = NULL; + tx->tx_lnetmsg = NULL; + tx->tx_kiov = NULL; + tx->tx_nkiov = 0; + tx->tx_niov = 1; + tx->tx_nonblk = nonblk; tx->tx_msg.ksm_csum = 0; tx->tx_msg.ksm_type = KSOCK_MSG_NOOP; tx->tx_msg.ksm_zc_cookies[0] = 0; - tx->tx_msg.ksm_zc_cookies[1] = cookie; + tx->tx_msg.ksm_zc_cookies[1] = cookie; - return tx; + return tx; } @@ -111,17 +112,17 @@ ksocknal_free_tx(struct ksock_tx *tx) } static int -ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx, +ksocknal_send_hdr(struct ksock_conn *conn, struct ksock_tx *tx, struct kvec *scratch_iov) { - struct kvec *iov = tx->tx_iov; + struct kvec *iov = &tx->tx_hdr; int nob; int rc; LASSERT(tx->tx_niov > 0); - /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */ - rc = ksocknal_lib_send_iov(conn, tx, scratch_iov); + /* Never touch tx->tx_hdr inside ksocknal_lib_send_hdr() */ + rc = ksocknal_lib_send_hdr(conn, tx, scratch_iov); if (rc <= 0) /* sent nothing? */ return rc; @@ -131,19 +132,16 @@ ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx, tx->tx_resid -= nob; /* "consume" iov */ - do { - LASSERT(tx->tx_niov > 0); + LASSERT(tx->tx_niov == 1); - if (nob < (int) iov->iov_len) { - iov->iov_base += nob; - iov->iov_len -= nob; - return rc; - } + if (nob < (int) iov->iov_len) { + iov->iov_base += nob; + iov->iov_len -= nob; + return rc; + } - nob -= iov->iov_len; - tx->tx_iov = ++iov; - tx->tx_niov--; - } while (nob != 0); + LASSERT(nob == iov->iov_len); + tx->tx_niov--; return rc; } @@ -152,7 +150,7 @@ static int ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx, struct kvec *scratch_iov) { - lnet_kiov_t *kiov = tx->tx_kiov; + struct bio_vec *kiov = tx->tx_kiov; int nob; int rc; @@ -173,13 +171,13 @@ ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx, do { LASSERT(tx->tx_nkiov > 0); - if (nob < (int)kiov->kiov_len) { - kiov->kiov_offset += nob; - kiov->kiov_len -= nob; + if (nob < (int)kiov->bv_len) { + kiov->bv_offset += nob; + kiov->bv_len -= nob; return rc; } - nob -= (int)kiov->kiov_len; + nob -= (int)kiov->bv_len; tx->tx_kiov = ++kiov; tx->tx_nkiov--; } while (nob != 0); @@ -194,10 +192,9 @@ ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx, int rc; int bufnob; - if (ksocknal_data.ksnd_stall_tx != 0) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx)); - } + if (ksocknal_data.ksnd_stall_tx != 0) + schedule_timeout_uninterruptible( + cfs_time_seconds(ksocknal_data.ksnd_stall_tx)); LASSERT(tx->tx_resid != 0); @@ -213,7 +210,7 @@ ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx, ksocknal_data.ksnd_enomem_tx--; rc = -EAGAIN; } else if (tx->tx_niov != 0) { - rc = ksocknal_send_iov(conn, tx, scratch_iov); + rc = ksocknal_send_hdr(conn, tx, scratch_iov); } else { rc = ksocknal_send_kiov(conn, tx, scratch_iov); } @@ -226,7 +223,7 @@ ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx, /* allocated send buffer bytes < computed; infer * something got ACKed */ conn->ksnc_tx_deadline = ktime_get_seconds() + - lnet_get_lnd_timeout(); + ksocknal_timeout(); conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds(); conn->ksnc_tx_bufnob = bufnob; smp_mb(); @@ -275,7 +272,7 @@ ksocknal_recv_iov(struct ksock_conn *conn, struct kvec *scratchiov) conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds(); conn->ksnc_rx_deadline = ktime_get_seconds() + - lnet_get_lnd_timeout(); + ksocknal_timeout(); smp_mb(); /* order with setting rx_started */ conn->ksnc_rx_started = 1; @@ -303,7 +300,7 @@ static int ksocknal_recv_kiov(struct ksock_conn *conn, struct page **rx_scratch_pgs, struct kvec *scratch_iov) { - lnet_kiov_t *kiov = conn->ksnc_rx_kiov; + struct bio_vec *kiov = conn->ksnc_rx_kiov; int nob; int rc; LASSERT(conn->ksnc_rx_nkiov > 0); @@ -320,7 +317,7 @@ ksocknal_recv_kiov(struct ksock_conn *conn, struct page **rx_scratch_pgs, conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds(); conn->ksnc_rx_deadline = ktime_get_seconds() + - lnet_get_lnd_timeout(); + ksocknal_timeout(); smp_mb(); /* order with setting rx_started */ conn->ksnc_rx_started = 1; @@ -330,13 +327,13 @@ ksocknal_recv_kiov(struct ksock_conn *conn, struct page **rx_scratch_pgs, do { LASSERT(conn->ksnc_rx_nkiov > 0); - if (nob < (int) kiov->kiov_len) { - kiov->kiov_offset += nob; - kiov->kiov_len -= nob; + if (nob < (int) kiov->bv_len) { + kiov->bv_offset += nob; + kiov->bv_len -= nob; return -EAGAIN; } - nob -= kiov->kiov_len; + nob -= kiov->bv_len; conn->ksnc_rx_kiov = ++kiov; conn->ksnc_rx_nkiov--; } while (nob != 0); @@ -354,10 +351,9 @@ ksocknal_receive(struct ksock_conn *conn, struct page **rx_scratch_pgs, int rc; ENTRY; - if (ksocknal_data.ksnd_stall_rx != 0) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx)); - } + if (ksocknal_data.ksnd_stall_rx != 0) + schedule_timeout_uninterruptible( + cfs_time_seconds(ksocknal_data.ksnd_stall_rx)); rc = ksocknal_connsock_addref(conn); if (rc != 0) { @@ -427,9 +423,8 @@ ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error) { struct ksock_tx *tx; - while (!list_empty(txlist)) { - tx = list_entry(txlist->next, struct ksock_tx, tx_list); - + while ((tx = list_first_entry_or_null(txlist, struct ksock_tx, + tx_list)) != NULL) { if (error && tx->tx_lnetmsg != NULL) { CNETERR("Deleting packet type %d len %d %s->%s\n", le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type), @@ -460,7 +455,7 @@ ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error) tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR; } - LASSERT(atomic_read(&tx->tx_refcount) == 1); + LASSERT(refcount_read(&tx->tx_refcount) == 1); ksocknal_tx_done(ni, tx, error); } } @@ -495,7 +490,7 @@ ksocknal_check_zc_req(struct ksock_tx *tx) /* ZC_REQ is going to be pinned to the peer_ni */ tx->tx_deadline = ktime_get_seconds() + - lnet_get_lnd_timeout(); + ksocknal_timeout(); LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0); @@ -570,8 +565,8 @@ ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx, counter++; /* exponential backoff warnings */ if ((counter & (-counter)) == counter) - CWARN("%u ENOMEM tx %p (%u allocated)\n", - counter, conn, atomic_read(&libcfs_kmemory)); + CWARN("%u ENOMEM tx %p (%lld allocated)\n", + counter, conn, libcfs_kmem_read()); /* Queue on ksnd_enomem_conns for retry after a timeout */ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -613,20 +608,17 @@ simulate_error: if (!conn->ksnc_closing) { switch (rc) { case -ECONNRESET: - LCONSOLE_WARN("Host %pI4h reset our connection " - "while we were sending data; it may have " - "rebooted.\n", - &conn->ksnc_ipaddr); + LCONSOLE_WARN("Host %pIS reset our connection while we were sending data; it may have rebooted.\n", + &conn->ksnc_peeraddr); break; default: - LCONSOLE_WARN("There was an unexpected network error " - "while writing to %pI4h: %d.\n", - &conn->ksnc_ipaddr, rc); + LCONSOLE_WARN("There was an unexpected network error while writing to %pIS: %d.\n", + &conn->ksnc_peeraddr, rc); break; } - CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n", + CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pISp\n", conn, rc, libcfs_id2str(conn->ksnc_peer->ksnp_id), - &conn->ksnc_ipaddr, conn->ksnc_port); + &conn->ksnc_peeraddr); } if (tx->tx_zc_checked) @@ -640,22 +632,24 @@ simulate_error: } static void -ksocknal_launch_connection_locked(struct ksock_route *route) +ksocknal_launch_connection_locked(struct ksock_conn_cb *conn_cb) { + /* called holding write lock on ksnd_global_lock */ - /* called holding write lock on ksnd_global_lock */ + LASSERT(!conn_cb->ksnr_scheduled); + LASSERT(!conn_cb->ksnr_connecting); + LASSERT((ksocknal_conn_cb_mask() & ~conn_cb->ksnr_connected) != 0); - LASSERT (!route->ksnr_scheduled); - LASSERT (!route->ksnr_connecting); - LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0); + /* scheduling conn for connd */ + conn_cb->ksnr_scheduled = 1; - route->ksnr_scheduled = 1; /* scheduling conn for connd */ - ksocknal_route_addref(route); /* extra ref for connd */ + /* extra ref for connd */ + ksocknal_conn_cb_addref(conn_cb); spin_lock_bh(&ksocknal_data.ksnd_connd_lock); - list_add_tail(&route->ksnr_connd_list, - &ksocknal_data.ksnd_connd_routes); + list_add_tail(&conn_cb->ksnr_connd_list, + &ksocknal_data.ksnd_connd_routes); wake_up(&ksocknal_data.ksnd_connd_waitq); spin_unlock_bh(&ksocknal_data.ksnd_connd_lock); @@ -664,32 +658,30 @@ ksocknal_launch_connection_locked(struct ksock_route *route) void ksocknal_launch_all_connections_locked(struct ksock_peer_ni *peer_ni) { - struct ksock_route *route; + struct ksock_conn_cb *conn_cb; - /* called holding write lock on ksnd_global_lock */ - for (;;) { - /* launch any/all connections that need it */ - route = ksocknal_find_connectable_route_locked(peer_ni); - if (route == NULL) - return; + /* called holding write lock on ksnd_global_lock */ + for (;;) { + /* launch any/all connections that need it */ + conn_cb = ksocknal_find_connectable_conn_cb_locked(peer_ni); + if (conn_cb == NULL) + return; - ksocknal_launch_connection_locked(route); - } + ksocknal_launch_connection_locked(conn_cb); + } } struct ksock_conn * ksocknal_find_conn_locked(struct ksock_peer_ni *peer_ni, struct ksock_tx *tx, int nonblk) { - struct list_head *tmp; + struct ksock_conn *c; struct ksock_conn *conn; struct ksock_conn *typed = NULL; struct ksock_conn *fallback = NULL; int tnob = 0; int fnob = 0; - list_for_each(tmp, &peer_ni->ksnp_conns) { - struct ksock_conn *c = list_entry(tmp, struct ksock_conn, - ksnc_list); + list_for_each_entry(c, &peer_ni->ksnp_conns, ksnc_list) { int nob = atomic_read(&c->ksnc_tx_nob) + c->ksnc_sock->sk->sk_wmem_queued; int rc; @@ -759,23 +751,24 @@ ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn) * ksnc_sock... */ LASSERT(!conn->ksnc_closing); - CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n", + CDEBUG(D_NET, "Sending to %s ip %pISp\n", libcfs_id2str(conn->ksnc_peer->ksnp_id), - &conn->ksnc_ipaddr, conn->ksnc_port); + &conn->ksnc_peeraddr); ksocknal_tx_prep(conn, tx); - /* Ensure the frags we've been given EXACTLY match the number of - * bytes we want to send. Many TCP/IP stacks disregard any total + /* Ensure the frags we've been given EXACTLY match the number of + * bytes we want to send. Many TCP/IP stacks disregard any total * size parameters passed to them and just look at the frags. - * - * We always expect at least 1 mapped fragment containing the - * complete ksocknal message header. */ - LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) + - lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) == - (unsigned int)tx->tx_nob); - LASSERT (tx->tx_niov >= 1); - LASSERT (tx->tx_resid == tx->tx_nob); + * + * We always expect at least 1 mapped fragment containing the + * complete ksocknal message header. + */ + LASSERT(lnet_iov_nob(tx->tx_niov, &tx->tx_hdr) + + lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) == + (unsigned int)tx->tx_nob); + LASSERT(tx->tx_niov >= 1); + LASSERT(tx->tx_resid == tx->tx_nob); CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n", tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type: @@ -788,7 +781,7 @@ ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn) if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) { /* First packet starts the timeout */ conn->ksnc_tx_deadline = ktime_get_seconds() + - lnet_get_lnd_timeout(); + ksocknal_timeout(); if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */ conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds(); conn->ksnc_tx_bufnob = 0; @@ -833,59 +826,51 @@ ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn) } -struct ksock_route * -ksocknal_find_connectable_route_locked(struct ksock_peer_ni *peer_ni) +struct ksock_conn_cb * +ksocknal_find_connectable_conn_cb_locked(struct ksock_peer_ni *peer_ni) { time64_t now = ktime_get_seconds(); - struct list_head *tmp; - struct ksock_route *route; - - list_for_each(tmp, &peer_ni->ksnp_routes) { - route = list_entry(tmp, struct ksock_route, ksnr_list); - - LASSERT (!route->ksnr_connecting || route->ksnr_scheduled); - - if (route->ksnr_scheduled) /* connections being established */ - continue; - - /* all route types connected ? */ - if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0) - continue; - - if (!(route->ksnr_retry_interval == 0 || /* first attempt */ - now >= route->ksnr_timeout)) { - CDEBUG(D_NET, - "Too soon to retry route %pI4h " - "(cnted %d, interval %lld, %lld secs later)\n", - &route->ksnr_ipaddr, - route->ksnr_connected, - route->ksnr_retry_interval, - route->ksnr_timeout - now); - continue; - } - - return (route); - } + struct ksock_conn_cb *conn_cb; + + conn_cb = peer_ni->ksnp_conn_cb; + if (!conn_cb) + return NULL; + + LASSERT(!conn_cb->ksnr_connecting || conn_cb->ksnr_scheduled); + + if (conn_cb->ksnr_scheduled) /* connections being established */ + return NULL; + + /* all conn types connected ? */ + if ((ksocknal_conn_cb_mask() & ~conn_cb->ksnr_connected) == 0) + return NULL; + + if (!(conn_cb->ksnr_retry_interval == 0 || /* first attempt */ + now >= conn_cb->ksnr_timeout)) { + CDEBUG(D_NET, + "Too soon to retry route %pIS (cnted %d, interval %lld, %lld secs later)\n", + &conn_cb->ksnr_addr, + conn_cb->ksnr_connected, + conn_cb->ksnr_retry_interval, + conn_cb->ksnr_timeout - now); + return NULL; + } - return (NULL); + return conn_cb; } -struct ksock_route * -ksocknal_find_connecting_route_locked(struct ksock_peer_ni *peer_ni) +struct ksock_conn_cb * +ksocknal_find_connecting_conn_cb_locked(struct ksock_peer_ni *peer_ni) { - struct list_head *tmp; - struct ksock_route *route; - - list_for_each(tmp, &peer_ni->ksnp_routes) { - route = list_entry(tmp, struct ksock_route, ksnr_list); + struct ksock_conn_cb *conn_cb; - LASSERT (!route->ksnr_connecting || route->ksnr_scheduled); + conn_cb = peer_ni->ksnp_conn_cb; + if (!conn_cb) + return NULL; - if (route->ksnr_scheduled) - return (route); - } + LASSERT(!conn_cb->ksnr_connecting || conn_cb->ksnr_scheduled); - return (NULL); + return conn_cb->ksnr_scheduled ? conn_cb : NULL; } int @@ -894,30 +879,32 @@ ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx, { struct ksock_peer_ni *peer_ni; struct ksock_conn *conn; + struct sockaddr_in sa; rwlock_t *g_lock; int retry; int rc; - LASSERT (tx->tx_conn == NULL); + LASSERT(tx->tx_conn == NULL); - g_lock = &ksocknal_data.ksnd_global_lock; + g_lock = &ksocknal_data.ksnd_global_lock; - for (retry = 0;; retry = 1) { + for (retry = 0;; retry = 1) { read_lock(g_lock); - peer_ni = ksocknal_find_peer_locked(ni, id); - if (peer_ni != NULL) { - if (ksocknal_find_connectable_route_locked(peer_ni) == NULL) { - conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk); - if (conn != NULL) { - /* I've got no routes that need to be - * connecting and I do have an actual - * connection... */ + peer_ni = ksocknal_find_peer_locked(ni, id); + if (peer_ni != NULL) { + if (ksocknal_find_connectable_conn_cb_locked(peer_ni) == NULL) { + conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk); + if (conn != NULL) { + /* I've got nothing that need to be + * connecting and I do have an actual + * connection... + */ ksocknal_queue_tx_locked (tx, conn); read_unlock(g_lock); return (0); - } - } - } + } + } + } /* I'll need a write lock... */ read_unlock(g_lock); @@ -941,9 +928,11 @@ ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx, return -EHOSTUNREACH; } - rc = ksocknal_add_peer(ni, id, - LNET_NIDADDR(id.nid), - lnet_acceptor_port()); + memset(&sa, 0, sizeof(sa)); + sa.sin_family = AF_INET; + sa.sin_addr.s_addr = htonl(LNET_NIDADDR(id.nid)); + sa.sin_port = htons(lnet_acceptor_port()); + rc = ksocknal_add_peer(ni, id, (struct sockaddr *)&sa); if (rc != 0) { CERROR("Can't add peer_ni %s: %d\n", libcfs_id2str(id), rc); @@ -961,17 +950,17 @@ ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx, return (0); } - if (peer_ni->ksnp_accepting > 0 || - ksocknal_find_connecting_route_locked (peer_ni) != NULL) { + if (peer_ni->ksnp_accepting > 0 || + ksocknal_find_connecting_conn_cb_locked(peer_ni) != NULL) { /* the message is going to be pinned to the peer_ni */ tx->tx_deadline = ktime_get_seconds() + - lnet_get_lnd_timeout(); + ksocknal_timeout(); /* Queue the message until a connection is established */ list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue); write_unlock_bh(g_lock); return 0; - } + } write_unlock_bh(g_lock); @@ -984,17 +973,17 @@ ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx, int ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) { - int mpflag = 1; + /* '1' for consistency with code that checks !mpflag to restore */ + unsigned int mpflag = 1; int type = lntmsg->msg_type; struct lnet_process_id target = lntmsg->msg_target; - unsigned int payload_niov = lntmsg->msg_niov; - struct kvec *payload_iov = lntmsg->msg_iov; - lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; - unsigned int payload_offset = lntmsg->msg_offset; - unsigned int payload_nob = lntmsg->msg_len; + unsigned int payload_niov = lntmsg->msg_niov; + struct bio_vec *payload_kiov = lntmsg->msg_kiov; + unsigned int payload_offset = lntmsg->msg_offset; + unsigned int payload_nob = lntmsg->msg_len; struct ksock_tx *tx; - int desc_size; - int rc; + int desc_size; + int rc; /* NB 'private' is different depending on what we're sending. * Just ignore it... */ @@ -1004,60 +993,48 @@ ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) LASSERT (payload_nob == 0 || payload_niov > 0); LASSERT (payload_niov <= LNET_MAX_IOV); - /* payload is either all vaddrs or all pages */ - LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); LASSERT (!in_interrupt ()); - if (payload_iov != NULL) - desc_size = offsetof(struct ksock_tx, - tx_frags.virt.iov[1 + payload_niov]); - else - desc_size = offsetof(struct ksock_tx, - tx_frags.paged.kiov[payload_niov]); + desc_size = offsetof(struct ksock_tx, + tx_payload[payload_niov]); if (lntmsg->msg_vmflush) - mpflag = cfs_memory_pressure_get_and_set(); - tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size); - if (tx == NULL) { - CERROR("Can't allocate tx desc type %d size %d\n", - type, desc_size); - if (lntmsg->msg_vmflush) - cfs_memory_pressure_restore(mpflag); - return (-ENOMEM); - } + mpflag = memalloc_noreclaim_save(); - tx->tx_conn = NULL; /* set when assigned a conn */ - tx->tx_lnetmsg = lntmsg; - - if (payload_iov != NULL) { - tx->tx_kiov = NULL; - tx->tx_nkiov = 0; - tx->tx_iov = tx->tx_frags.virt.iov; - tx->tx_niov = 1 + - lnet_extract_iov(payload_niov, &tx->tx_iov[1], - payload_niov, payload_iov, - payload_offset, payload_nob); - } else { - tx->tx_niov = 1; - tx->tx_iov = &tx->tx_frags.paged.iov; - tx->tx_kiov = tx->tx_frags.paged.kiov; - tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov, - payload_niov, payload_kiov, - payload_offset, payload_nob); - - if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload) - tx->tx_zc_capable = 1; - } + tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size); + if (tx == NULL) { + CERROR("Can't allocate tx desc type %d size %d\n", + type, desc_size); + if (lntmsg->msg_vmflush) + memalloc_noreclaim_restore(mpflag); + return -ENOMEM; + } + + tx->tx_conn = NULL; /* set when assigned a conn */ + tx->tx_lnetmsg = lntmsg; + + tx->tx_niov = 1; + tx->tx_kiov = tx->tx_payload; + tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov, + payload_niov, payload_kiov, + payload_offset, payload_nob); + + if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload) + tx->tx_zc_capable = 1; tx->tx_msg.ksm_csum = 0; tx->tx_msg.ksm_type = KSOCK_MSG_LNET; tx->tx_msg.ksm_zc_cookies[0] = 0; tx->tx_msg.ksm_zc_cookies[1] = 0; - /* The first fragment will be set later in pro_pack */ - rc = ksocknal_launch_packet(ni, tx, target); - if (!mpflag) - cfs_memory_pressure_restore(mpflag); + /* The first fragment will be set later in pro_pack */ + rc = ksocknal_launch_packet(ni, tx, target); + /* + * We can't test lntsmg->msg_vmflush again as lntmsg may + * have been freed. + */ + if (!mpflag) + memalloc_noreclaim_restore(mpflag); if (rc == 0) return (0); @@ -1067,26 +1044,11 @@ ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) return (-EIO); } -int -ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name) -{ - struct task_struct *task = kthread_run(fn, arg, name); - - if (IS_ERR(task)) - return PTR_ERR(task); - - write_lock_bh(&ksocknal_data.ksnd_global_lock); - ksocknal_data.ksnd_nthreads++; - write_unlock_bh(&ksocknal_data.ksnd_global_lock); - return 0; -} - void ksocknal_thread_fini (void) { - write_lock_bh(&ksocknal_data.ksnd_global_lock); - ksocknal_data.ksnd_nthreads--; - write_unlock_bh(&ksocknal_data.ksnd_global_lock); + if (atomic_dec_and_test(&ksocknal_data.ksnd_nthreads)) + wake_up_var(&ksocknal_data.ksnd_nthreads); } int @@ -1145,22 +1107,22 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip) /* Set up to skip as much as possible now. If there's more left * (ran out of iov entries) we'll get called again */ - conn->ksnc_rx_state = SOCKNAL_RX_SLOP; - conn->ksnc_rx_nob_left = nob_to_skip; + conn->ksnc_rx_state = SOCKNAL_RX_SLOP; + conn->ksnc_rx_nob_left = nob_to_skip; conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; - skipped = 0; - niov = 0; + skipped = 0; + niov = 0; - do { - nob = MIN (nob_to_skip, sizeof (ksocknal_slop_buffer)); + do { + nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer)); - conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer; - conn->ksnc_rx_iov[niov].iov_len = nob; - niov++; - skipped += nob; - nob_to_skip -=nob; + conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer; + conn->ksnc_rx_iov[niov].iov_len = nob; + niov++; + skipped += nob; + nob_to_skip -= nob; - } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */ + } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */ niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct kvec)); conn->ksnc_rx_niov = niov; @@ -1179,7 +1141,7 @@ ksocknal_process_receive(struct ksock_conn *conn, struct lnet_process_id *id; int rc; - LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0); + LASSERT(refcount_read(&conn->ksnc_conn_refcount) > 0); /* NB: sched lock NOT held */ /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */ @@ -1199,17 +1161,13 @@ ksocknal_process_receive(struct ksock_conn *conn, LASSERT(rc != -EAGAIN); if (rc == 0) - CDEBUG(D_NET, "[%p] EOF from %s " - "ip %pI4h:%d\n", conn, - libcfs_id2str(ksnp_id), - &conn->ksnc_ipaddr, - conn->ksnc_port); + CDEBUG(D_NET, "[%p] EOF from %s ip %pISp\n", + conn, libcfs_id2str(ksnp_id), + &conn->ksnc_peeraddr); else if (!conn->ksnc_closing) - CERROR("[%p] Error %d on read from %s " - "ip %pI4h:%d\n", conn, rc, - libcfs_id2str(ksnp_id), - &conn->ksnc_ipaddr, - conn->ksnc_port); + CERROR("[%p] Error %d on read from %s ip %pISp\n", + conn, rc, libcfs_id2str(ksnp_id), + &conn->ksnc_peeraddr); /* it's not an error if conn is being closed */ ksocknal_close_conn_and_siblings (conn, @@ -1385,8 +1343,8 @@ ksocknal_process_receive(struct ksock_conn *conn, int ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg, - int delayed, unsigned int niov, struct kvec *iov, - lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, + int delayed, unsigned int niov, + struct bio_vec *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { struct ksock_conn *conn = private; @@ -1399,21 +1357,19 @@ ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg, conn->ksnc_rx_nob_wanted = mlen; conn->ksnc_rx_nob_left = rlen; - if (mlen == 0 || iov != NULL) { - conn->ksnc_rx_nkiov = 0; - conn->ksnc_rx_kiov = NULL; - conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov; - conn->ksnc_rx_niov = - lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov, - niov, iov, offset, mlen); - } else { - conn->ksnc_rx_niov = 0; - conn->ksnc_rx_iov = NULL; - conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov; - conn->ksnc_rx_nkiov = - lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov, - niov, kiov, offset, mlen); - } + if (mlen == 0) { + conn->ksnc_rx_nkiov = 0; + conn->ksnc_rx_kiov = NULL; + conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov; + conn->ksnc_rx_niov = 0; + } else { + conn->ksnc_rx_niov = 0; + conn->ksnc_rx_iov = NULL; + conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov; + conn->ksnc_rx_nkiov = + lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov, + niov, kiov, offset, mlen); + } LASSERT (mlen == lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) + @@ -1463,7 +1419,6 @@ int ksocknal_scheduler(void *arg) struct ksock_conn *conn; struct ksock_tx *tx; int rc; - int nloops = 0; long id = (long)arg; struct page **rx_scratch_pgs; struct kvec *scratch_iov; @@ -1484,8 +1439,6 @@ int ksocknal_scheduler(void *arg) return -ENOMEM; } - cfs_block_allsigs(); - rc = cfs_cpt_bind(lnet_cpt_table(), sched->kss_cpt); if (rc != 0) { CWARN("Can't set CPU partition affinity to %d: %d\n", @@ -1495,13 +1448,13 @@ int ksocknal_scheduler(void *arg) spin_lock_bh(&sched->kss_lock); while (!ksocknal_data.ksnd_shuttingdown) { - int did_something = 0; + bool did_something = false; /* Ensure I progress everything semi-fairly */ - - if (!list_empty(&sched->kss_rx_conns)) { - conn = list_entry(sched->kss_rx_conns.next, - struct ksock_conn, ksnc_rx_list); + conn = list_first_entry_or_null(&sched->kss_rx_conns, + struct ksock_conn, + ksnc_rx_list); + if (conn) { list_del(&conn->ksnc_rx_list); LASSERT(conn->ksnc_rx_scheduled); @@ -1541,28 +1494,25 @@ int ksocknal_scheduler(void *arg) ksocknal_conn_decref(conn); } - did_something = 1; + did_something = true; } if (!list_empty(&sched->kss_tx_conns)) { LIST_HEAD(zlist); - if (!list_empty(&sched->kss_zombie_noop_txs)) { - list_add(&zlist, - &sched->kss_zombie_noop_txs); - list_del_init(&sched->kss_zombie_noop_txs); - } + list_splice_init(&sched->kss_zombie_noop_txs, &zlist); - conn = list_entry(sched->kss_tx_conns.next, - struct ksock_conn, ksnc_tx_list); + conn = list_first_entry(&sched->kss_tx_conns, + struct ksock_conn, + ksnc_tx_list); list_del(&conn->ksnc_tx_list); LASSERT(conn->ksnc_tx_scheduled); LASSERT(conn->ksnc_tx_ready); LASSERT(!list_empty(&conn->ksnc_tx_queue)); - tx = list_entry(conn->ksnc_tx_queue.next, - struct ksock_tx, tx_list); + tx = list_first_entry(&conn->ksnc_tx_queue, + struct ksock_tx, tx_list); if (conn->ksnc_tx_carrier == tx) ksocknal_next_tx_carrier(conn); @@ -1613,14 +1563,12 @@ int ksocknal_scheduler(void *arg) ksocknal_conn_decref(conn); } - did_something = 1; + did_something = true; } - if (!did_something || /* nothing to do */ - ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */ + if (!did_something || /* nothing to do */ + need_resched()) { /* hogging CPU? */ spin_unlock_bh(&sched->kss_lock); - nloops = 0; - if (!did_something) { /* wait for something to do */ rc = wait_event_interruptible_exclusive( sched->kss_waitq, @@ -1635,10 +1583,8 @@ int ksocknal_scheduler(void *arg) } spin_unlock_bh(&sched->kss_lock); - LIBCFS_FREE(rx_scratch_pgs, sizeof(*rx_scratch_pgs) * - LNET_MAX_IOV); - LIBCFS_FREE(scratch_iov, sizeof(*scratch_iov) * - LNET_MAX_IOV); + CFS_FREE_PTR_ARRAY(rx_scratch_pgs, LNET_MAX_IOV); + CFS_FREE_PTR_ARRAY(scratch_iov, LNET_MAX_IOV); ksocknal_thread_fini(); return 0; } @@ -1702,8 +1648,8 @@ void ksocknal_write_callback(struct ksock_conn *conn) EXIT; } -static struct ksock_proto * -ksocknal_parse_proto_version (struct ksock_hello_msg *hello) +static const struct ksock_proto * +ksocknal_parse_proto_version(struct ksock_hello_msg *hello) { __u32 version = 0; @@ -1733,7 +1679,7 @@ ksocknal_parse_proto_version (struct ksock_hello_msg *hello) if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) { struct lnet_magicversion *hmv; - CLASSERT(sizeof(struct lnet_magicversion) == + BUILD_BUG_ON(sizeof(struct lnet_magicversion) != offsetof(struct ksock_hello_msg, kshm_src_nid)); hmv = (struct lnet_magicversion *)hello; @@ -1801,39 +1747,39 @@ ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn, int timeout; int proto_match; int rc; - struct ksock_proto *proto; + const struct ksock_proto *proto; struct lnet_process_id recv_id; /* socket type set on active connections - not set on passive */ LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE)); - timeout = active ? lnet_get_lnd_timeout() : + timeout = active ? ksocknal_timeout() : lnet_acceptor_timeout(); rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout); - if (rc != 0) { - CERROR("Error %d reading HELLO from %pI4h\n", - rc, &conn->ksnc_ipaddr); - LASSERT (rc < 0); - return rc; - } + if (rc != 0) { + CERROR("Error %d reading HELLO from %pIS\n", + rc, &conn->ksnc_peeraddr); + LASSERT(rc < 0); + return rc; + } - if (hello->kshm_magic != LNET_PROTO_MAGIC && - hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) && - hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) { - /* Unexpected magic! */ - CERROR ("Bad magic(1) %#08x (%#08x expected) from " - "%pI4h\n", __cpu_to_le32 (hello->kshm_magic), - LNET_PROTO_TCP_MAGIC, &conn->ksnc_ipaddr); - return -EPROTO; - } + if (hello->kshm_magic != LNET_PROTO_MAGIC && + hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) && + hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) { + /* Unexpected magic! */ + CERROR("Bad magic(1) %#08x (%#08x expected) from %pIS\n", + __cpu_to_le32 (hello->kshm_magic), + LNET_PROTO_TCP_MAGIC, &conn->ksnc_peeraddr); + return -EPROTO; + } rc = lnet_sock_read(sock, &hello->kshm_version, sizeof(hello->kshm_version), timeout); if (rc != 0) { - CERROR("Error %d reading HELLO from %pI4h\n", - rc, &conn->ksnc_ipaddr); + CERROR("Error %d reading HELLO from %pIS\n", + rc, &conn->ksnc_peeraddr); LASSERT(rc < 0); return rc; } @@ -1853,8 +1799,8 @@ ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn, ksocknal_send_hello(ni, conn, ni->ni_nid, hello); } - CERROR("Unknown protocol version (%d.x expected) from %pI4h\n", - conn->ksnc_proto->pro_version, &conn->ksnc_ipaddr); + CERROR("Unknown protocol version (%d.x expected) from %pIS\n", + conn->ksnc_proto->pro_version, &conn->ksnc_peeraddr); return -EPROTO; } @@ -1865,29 +1811,36 @@ ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn, /* receive the rest of hello message anyway */ rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout); if (rc != 0) { - CERROR("Error %d reading or checking hello from from %pI4h\n", - rc, &conn->ksnc_ipaddr); + CERROR("Error %d reading or checking hello from from %pIS\n", + rc, &conn->ksnc_peeraddr); LASSERT (rc < 0); return rc; } *incarnation = hello->kshm_src_incarnation; - if (hello->kshm_src_nid == LNET_NID_ANY) { - CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY" - "from %pI4h\n", &conn->ksnc_ipaddr); - return -EPROTO; - } + if (hello->kshm_src_nid == LNET_NID_ANY) { + CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pIS\n", + &conn->ksnc_peeraddr); + return -EPROTO; + } - if (!active && - conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) { - /* Userspace NAL assigns peer_ni process ID from socket */ - recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG; - recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr); - } else { - recv_id.nid = hello->kshm_src_nid; - recv_id.pid = hello->kshm_src_pid; - } + if (!active && + rpc_get_port((struct sockaddr *)&conn->ksnc_peeraddr) > + LNET_ACCEPTOR_MAX_RESERVED_PORT) { + /* Userspace NAL assigns peer_ni process ID from socket */ + recv_id.pid = rpc_get_port((struct sockaddr *) + &conn->ksnc_peeraddr) | + LNET_PID_USERFLAG; + LASSERT(conn->ksnc_peeraddr.ss_family == AF_INET); + recv_id.nid = LNET_MKNID( + LNET_NIDNET(ni->ni_nid), + ntohl(((struct sockaddr_in *) + &conn->ksnc_peeraddr)->sin_addr.s_addr)); + } else { + recv_id.nid = hello->kshm_src_nid; + recv_id.pid = hello->kshm_src_pid; + } if (!active) { *peerid = recv_id; @@ -1895,25 +1848,23 @@ ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn, /* peer_ni determines type */ conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype); if (conn->ksnc_type == SOCKLND_CONN_NONE) { - CERROR("Unexpected type %d from %s ip %pI4h\n", + CERROR("Unexpected type %d from %s ip %pIS\n", hello->kshm_ctype, libcfs_id2str(*peerid), - &conn->ksnc_ipaddr); + &conn->ksnc_peeraddr); return -EPROTO; } return 0; } - if (peerid->pid != recv_id.pid || - peerid->nid != recv_id.nid) { - LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host" - " %pI4h, but they claimed they were " - "%s; please check your Lustre " - "configuration.\n", - libcfs_id2str(*peerid), - &conn->ksnc_ipaddr, - libcfs_id2str(recv_id)); - return -EPROTO; - } + if (peerid->pid != recv_id.pid || + peerid->nid != recv_id.nid) { + LCONSOLE_ERROR_MSG(0x130, + "Connected successfully to %s on host %pIS, but they claimed they were %s; please check your Lustre configuration.\n", + libcfs_id2str(*peerid), + &conn->ksnc_peeraddr, + libcfs_id2str(recv_id)); + return -EPROTO; + } if (hello->kshm_ctype == SOCKLND_CONN_NONE) { /* Possible protocol mismatch or I lost the connection race */ @@ -1921,163 +1872,173 @@ ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn, } if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) { - CERROR("Mismatched types: me %d, %s ip %pI4h %d\n", + CERROR("Mismatched types: me %d, %s ip %pIS %d\n", conn->ksnc_type, libcfs_id2str(*peerid), - &conn->ksnc_ipaddr, + &conn->ksnc_peeraddr, hello->kshm_ctype); return -EPROTO; } return 0; } -static int -ksocknal_connect(struct ksock_route *route) +static bool +ksocknal_connect(struct ksock_conn_cb *conn_cb) { LIST_HEAD(zombies); - struct ksock_peer_ni *peer_ni = route->ksnr_peer; - int type; - int wanted; - struct socket *sock; + struct ksock_peer_ni *peer_ni = conn_cb->ksnr_peer; + int type; + int wanted; + struct socket *sock; time64_t deadline; - int retry_later = 0; - int rc = 0; + bool retry_later = false; + int rc = 0; - deadline = ktime_get_seconds() + lnet_get_lnd_timeout(); + deadline = ktime_get_seconds() + ksocknal_timeout(); write_lock_bh(&ksocknal_data.ksnd_global_lock); - LASSERT (route->ksnr_scheduled); - LASSERT (!route->ksnr_connecting); + LASSERT(conn_cb->ksnr_scheduled); + LASSERT(!conn_cb->ksnr_connecting); - route->ksnr_connecting = 1; + conn_cb->ksnr_connecting = 1; - for (;;) { - wanted = ksocknal_route_mask() & ~route->ksnr_connected; + for (;;) { + wanted = ksocknal_conn_cb_mask() & ~conn_cb->ksnr_connected; - /* stop connecting if peer_ni/route got closed under me, or - * route got connected while queued */ - if (peer_ni->ksnp_closing || route->ksnr_deleted || - wanted == 0) { - retry_later = 0; - break; - } + /* stop connecting if peer_ni/cb got closed under me, or + * conn cb got connected while queued + */ + if (peer_ni->ksnp_closing || conn_cb->ksnr_deleted || + wanted == 0) { + retry_later = false; + break; + } - /* reschedule if peer_ni is connecting to me */ - if (peer_ni->ksnp_accepting > 0) { - CDEBUG(D_NET, - "peer_ni %s(%d) already connecting to me, retry later.\n", - libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting); - retry_later = 1; - } + /* reschedule if peer_ni is connecting to me */ + if (peer_ni->ksnp_accepting > 0) { + CDEBUG(D_NET, + "peer_ni %s(%d) already connecting to me, retry later.\n", + libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting); + retry_later = true; + } - if (retry_later) /* needs reschedule */ - break; + if (retry_later) /* needs reschedule */ + break; - if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) { - type = SOCKLND_CONN_ANY; - } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) { - type = SOCKLND_CONN_CONTROL; - } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) { - type = SOCKLND_CONN_BULK_IN; - } else { - LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0); - type = SOCKLND_CONN_BULK_OUT; - } + if ((wanted & BIT(SOCKLND_CONN_ANY)) != 0) { + type = SOCKLND_CONN_ANY; + } else if ((wanted & BIT(SOCKLND_CONN_CONTROL)) != 0) { + type = SOCKLND_CONN_CONTROL; + } else if ((wanted & BIT(SOCKLND_CONN_BULK_IN)) != 0 && + conn_cb->ksnr_blki_conn_count <= conn_cb->ksnr_blko_conn_count) { + type = SOCKLND_CONN_BULK_IN; + } else { + LASSERT ((wanted & BIT(SOCKLND_CONN_BULK_OUT)) != 0); + type = SOCKLND_CONN_BULK_OUT; + } write_unlock_bh(&ksocknal_data.ksnd_global_lock); if (ktime_get_seconds() >= deadline) { - rc = -ETIMEDOUT; - lnet_connect_console_error(rc, peer_ni->ksnp_id.nid, - route->ksnr_ipaddr, - route->ksnr_port); - goto failed; - } + rc = -ETIMEDOUT; + lnet_connect_console_error(rc, peer_ni->ksnp_id.nid, + (struct sockaddr *) + &conn_cb->ksnr_addr); + goto failed; + } - rc = lnet_connect(&sock, peer_ni->ksnp_id.nid, - route->ksnr_myipaddr, - route->ksnr_ipaddr, route->ksnr_port, - peer_ni->ksnp_ni->ni_net_ns); - if (rc != 0) + sock = lnet_connect(peer_ni->ksnp_id.nid, + conn_cb->ksnr_myiface, + (struct sockaddr *)&conn_cb->ksnr_addr, + peer_ni->ksnp_ni->ni_net_ns); + if (IS_ERR(sock)) { + rc = PTR_ERR(sock); goto failed; + } - rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type); - if (rc < 0) { - lnet_connect_console_error(rc, peer_ni->ksnp_id.nid, - route->ksnr_ipaddr, - route->ksnr_port); - goto failed; - } + rc = ksocknal_create_conn(peer_ni->ksnp_ni, conn_cb, sock, + type); + if (rc < 0) { + lnet_connect_console_error(rc, peer_ni->ksnp_id.nid, + (struct sockaddr *) + &conn_cb->ksnr_addr); + goto failed; + } - /* A +ve RC means I have to retry because I lost the connection - * race or I have to renegotiate protocol version */ - retry_later = (rc != 0); - if (retry_later) - CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n", - libcfs_nid2str(peer_ni->ksnp_id.nid)); + /* A +ve RC means I have to retry because I lost the connection + * race or I have to renegotiate protocol version */ + retry_later = (rc != 0); + if (retry_later) + CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n", + libcfs_nid2str(peer_ni->ksnp_id.nid)); write_lock_bh(&ksocknal_data.ksnd_global_lock); - } + } - route->ksnr_scheduled = 0; - route->ksnr_connecting = 0; - - if (retry_later) { - /* re-queue for attention; this frees me up to handle - * the peer_ni's incoming connection request */ - - if (rc == EALREADY || - (rc == 0 && peer_ni->ksnp_accepting > 0)) { - /* We want to introduce a delay before next - * attempt to connect if we lost conn race, - * but the race is resolved quickly usually, - * so min_reconnectms should be good heuristic */ - route->ksnr_retry_interval = *ksocknal_tunables.ksnd_min_reconnectms / 1000; - route->ksnr_timeout = ktime_get_seconds() + - route->ksnr_retry_interval; - } + conn_cb->ksnr_scheduled = 0; + conn_cb->ksnr_connecting = 0; - ksocknal_launch_connection_locked(route); - } + if (retry_later) { + /* re-queue for attention; this frees me up to handle + * the peer_ni's incoming connection request + */ + + if (rc == EALREADY || + (rc == 0 && peer_ni->ksnp_accepting > 0)) { + /* We want to introduce a delay before next + * attempt to connect if we lost conn race, but + * the race is resolved quickly usually, so + * min_reconnectms should be good heuristic + */ + conn_cb->ksnr_retry_interval = + *ksocknal_tunables.ksnd_min_reconnectms / 1000; + conn_cb->ksnr_timeout = ktime_get_seconds() + + conn_cb->ksnr_retry_interval; + } + + ksocknal_launch_connection_locked(conn_cb); + } write_unlock_bh(&ksocknal_data.ksnd_global_lock); - return retry_later; + return retry_later; failed: write_lock_bh(&ksocknal_data.ksnd_global_lock); - route->ksnr_scheduled = 0; - route->ksnr_connecting = 0; + conn_cb->ksnr_scheduled = 0; + conn_cb->ksnr_connecting = 0; /* This is a retry rather than a new connection */ - route->ksnr_retry_interval *= 2; - route->ksnr_retry_interval = - max_t(time64_t, route->ksnr_retry_interval, + conn_cb->ksnr_retry_interval *= 2; + conn_cb->ksnr_retry_interval = + max_t(time64_t, conn_cb->ksnr_retry_interval, *ksocknal_tunables.ksnd_min_reconnectms / 1000); - route->ksnr_retry_interval = - min_t(time64_t, route->ksnr_retry_interval, + conn_cb->ksnr_retry_interval = + min_t(time64_t, conn_cb->ksnr_retry_interval, *ksocknal_tunables.ksnd_max_reconnectms / 1000); - LASSERT(route->ksnr_retry_interval); - route->ksnr_timeout = ktime_get_seconds() + route->ksnr_retry_interval; + LASSERT(conn_cb->ksnr_retry_interval); + conn_cb->ksnr_timeout = ktime_get_seconds() + + conn_cb->ksnr_retry_interval; if (!list_empty(&peer_ni->ksnp_tx_queue) && - peer_ni->ksnp_accepting == 0 && - ksocknal_find_connecting_route_locked(peer_ni) == NULL) { + peer_ni->ksnp_accepting == 0 && + !ksocknal_find_connecting_conn_cb_locked(peer_ni)) { struct ksock_conn *conn; - /* ksnp_tx_queue is queued on a conn on successful - * connection for V1.x and V2.x */ - if (!list_empty(&peer_ni->ksnp_conns)) { - conn = list_entry(peer_ni->ksnp_conns.next, - struct ksock_conn, ksnc_list); - LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x); - } + /* ksnp_tx_queue is queued on a conn on successful + * connection for V1.x and V2.x + */ + conn = list_first_entry_or_null(&peer_ni->ksnp_conns, + struct ksock_conn, ksnc_list); + if (conn) + LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x); - /* take all the blocked packets while I've got the lock and - * complete below... */ + /* take all the blocked packets while I've got the lock and + * complete below... + */ list_splice_init(&peer_ni->ksnp_tx_queue, &zombies); - } + } write_unlock_bh(&ksocknal_data.ksnd_global_lock); @@ -2095,7 +2056,6 @@ ksocknal_connect(struct ksock_route *route) static int ksocknal_connd_check_start(time64_t sec, long *timeout) { - char name[16]; int rc; int total = ksocknal_data.ksnd_connd_starting + ksocknal_data.ksnd_connd_running; @@ -2133,8 +2093,8 @@ ksocknal_connd_check_start(time64_t sec, long *timeout) spin_unlock_bh(&ksocknal_data.ksnd_connd_lock); /* NB: total is the next id */ - snprintf(name, sizeof(name), "socknal_cd%02d", total); - rc = ksocknal_thread_start(ksocknal_connd, NULL, name); + rc = ksocknal_thread_start(ksocknal_connd, NULL, + "socknal_cd%02d", total); spin_lock_bh(&ksocknal_data.ksnd_connd_lock); if (rc == 0) @@ -2189,25 +2149,28 @@ ksocknal_connd_check_stop(time64_t sec, long *timeout) ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV; } -/* Go through connd_routes queue looking for a route that we can process +/* Go through connd_cbs queue looking for a conn_cb that we can process * right now, @timeout_p can be updated if we need to come back later */ -static struct ksock_route * -ksocknal_connd_get_route_locked(signed long *timeout_p) +static struct ksock_conn_cb * +ksocknal_connd_get_conn_cb_locked(signed long *timeout_p) { time64_t now = ktime_get_seconds(); - struct ksock_route *route; + time64_t conn_timeout; + struct ksock_conn_cb *conn_cb; /* connd_routes can contain both pending and ordinary routes */ - list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes, - ksnr_connd_list) { + list_for_each_entry(conn_cb, &ksocknal_data.ksnd_connd_routes, + ksnr_connd_list) { + + conn_timeout = conn_cb->ksnr_timeout; - if (route->ksnr_retry_interval == 0 || - now >= route->ksnr_timeout) - return route; + if (conn_cb->ksnr_retry_interval == 0 || + now >= conn_timeout) + return conn_cb; if (*timeout_p == MAX_SCHEDULE_TIMEOUT || - *timeout_p > cfs_time_seconds(route->ksnr_timeout - now)) - *timeout_p = cfs_time_seconds(route->ksnr_timeout - now); + *timeout_p > cfs_time_seconds(conn_timeout - now)) + *timeout_p = cfs_time_seconds(conn_timeout - now); } return NULL; @@ -2219,12 +2182,9 @@ ksocknal_connd(void *arg) spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; struct ksock_connreq *cr; wait_queue_entry_t wait; - int nloops = 0; int cons_retry = 0; - cfs_block_allsigs(); - - init_waitqueue_entry(&wait, current); + init_wait(&wait); spin_lock_bh(connd_lock); @@ -2233,10 +2193,10 @@ ksocknal_connd(void *arg) ksocknal_data.ksnd_connd_running++; while (!ksocknal_data.ksnd_shuttingdown) { - struct ksock_route *route = NULL; + struct ksock_conn_cb *conn_cb = NULL; time64_t sec = ktime_get_real_seconds(); long timeout = MAX_SCHEDULE_TIMEOUT; - int dropped_lock = 0; + bool dropped_lock = false; if (ksocknal_connd_check_stop(sec, &timeout)) { /* wakeup another one to check stop */ @@ -2244,19 +2204,18 @@ ksocknal_connd(void *arg) break; } - if (ksocknal_connd_check_start(sec, &timeout)) { - /* created new thread */ - dropped_lock = 1; - } - - if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) { - /* Connection accepted by the listener */ - cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next, - struct ksock_connreq, ksncr_list); + if (ksocknal_connd_check_start(sec, &timeout)) { + /* created new thread */ + dropped_lock = true; + } + cr = list_first_entry_or_null(&ksocknal_data.ksnd_connd_connreqs, + struct ksock_connreq, ksncr_list); + if (cr) { + /* Connection accepted by the listener */ list_del(&cr->ksncr_list); spin_unlock_bh(connd_lock); - dropped_lock = 1; + dropped_lock = true; ksocknal_create_conn(cr->ksncr_ni, NULL, cr->ksncr_sock, SOCKLND_CONN_NONE); @@ -2264,44 +2223,43 @@ ksocknal_connd(void *arg) LIBCFS_FREE(cr, sizeof(*cr)); spin_lock_bh(connd_lock); - } + } - /* Only handle an outgoing connection request if there - * is a thread left to handle incoming connections and - * create new connd */ - if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV < - ksocknal_data.ksnd_connd_running) { - route = ksocknal_connd_get_route_locked(&timeout); - } - if (route != NULL) { - list_del(&route->ksnr_connd_list); - ksocknal_data.ksnd_connd_connecting++; + /* Only handle an outgoing connection request if there + * is a thread left to handle incoming connections and + * create new connd + */ + if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV < + ksocknal_data.ksnd_connd_running) + conn_cb = ksocknal_connd_get_conn_cb_locked(&timeout); + + if (conn_cb) { + list_del(&conn_cb->ksnr_connd_list); + ksocknal_data.ksnd_connd_connecting++; spin_unlock_bh(connd_lock); - dropped_lock = 1; - - if (ksocknal_connect(route)) { - /* consecutive retry */ - if (cons_retry++ > SOCKNAL_INSANITY_RECONN) { - CWARN("massive consecutive " - "re-connecting to %pI4h\n", - &route->ksnr_ipaddr); - cons_retry = 0; - } - } else { - cons_retry = 0; - } + dropped_lock = true; + + if (ksocknal_connect(conn_cb)) { + /* consecutive retry */ + if (cons_retry++ > SOCKNAL_INSANITY_RECONN) { + CWARN("massive consecutive re-connecting to %pIS\n", + &conn_cb->ksnr_addr); + cons_retry = 0; + } + } else { + cons_retry = 0; + } - ksocknal_route_decref(route); + ksocknal_conn_cb_decref(conn_cb); spin_lock_bh(connd_lock); ksocknal_data.ksnd_connd_connecting--; } if (dropped_lock) { - if (++nloops < SOCKNAL_RESCHED) + if (!need_resched()) continue; spin_unlock_bh(connd_lock); - nloops = 0; cond_resched(); spin_lock_bh(connd_lock); continue; @@ -2309,13 +2267,12 @@ ksocknal_connd(void *arg) /* Nothing to do for 'timeout' */ set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait); + add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, + &wait); spin_unlock_bh(connd_lock); - nloops = 0; schedule_timeout(timeout); - set_current_state(TASK_RUNNING); remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait); spin_lock_bh(connd_lock); } @@ -2331,14 +2288,11 @@ ksocknal_find_timed_out_conn(struct ksock_peer_ni *peer_ni) { /* We're called with a shared lock on ksnd_global_lock */ struct ksock_conn *conn; - struct list_head *ctmp; struct ksock_tx *tx; - list_for_each(ctmp, &peer_ni->ksnp_conns) { + list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) { int error; - conn = list_entry(ctmp, struct ksock_conn, ksnc_list); - /* Don't need the {get,put}connsock dance to deref ksnc_sock */ LASSERT (!conn->ksnc_closing); @@ -2346,70 +2300,60 @@ ksocknal_find_timed_out_conn(struct ksock_peer_ni *peer_ni) if (error != 0) { ksocknal_conn_addref(conn); - switch (error) { - case ECONNRESET: - CNETERR("A connection with %s " - "(%pI4h:%d) was reset; " - "it may have rebooted.\n", - libcfs_id2str(peer_ni->ksnp_id), - &conn->ksnc_ipaddr, - conn->ksnc_port); - break; - case ETIMEDOUT: - CNETERR("A connection with %s " - "(%pI4h:%d) timed out; the " - "network or node may be down.\n", - libcfs_id2str(peer_ni->ksnp_id), - &conn->ksnc_ipaddr, - conn->ksnc_port); - break; - default: - CNETERR("An unexpected network error %d " - "occurred with %s " - "(%pI4h:%d\n", error, - libcfs_id2str(peer_ni->ksnp_id), - &conn->ksnc_ipaddr, - conn->ksnc_port); - break; - } + switch (error) { + case ECONNRESET: + CNETERR("A connection with %s (%pISp) was reset; it may have rebooted.\n", + libcfs_id2str(peer_ni->ksnp_id), + &conn->ksnc_peeraddr); + break; + case ETIMEDOUT: + CNETERR("A connection with %s (%pISp) timed out; the network or node may be down.\n", + libcfs_id2str(peer_ni->ksnp_id), + &conn->ksnc_peeraddr); + break; + default: + CNETERR("An unexpected network error %d occurred with %s (%pISp\n", + error, + libcfs_id2str(peer_ni->ksnp_id), + &conn->ksnc_peeraddr); + break; + } - return (conn); - } + return conn; + } - if (conn->ksnc_rx_started && + if (conn->ksnc_rx_started && ktime_get_seconds() >= conn->ksnc_rx_deadline) { - /* Timed out incomplete incoming message */ - ksocknal_conn_addref(conn); - CNETERR("Timeout receiving from %s (%pI4h:%d), " - "state %d wanted %d left %d\n", - libcfs_id2str(peer_ni->ksnp_id), - &conn->ksnc_ipaddr, - conn->ksnc_port, - conn->ksnc_rx_state, - conn->ksnc_rx_nob_wanted, - conn->ksnc_rx_nob_left); - return (conn); - } + /* Timed out incomplete incoming message */ + ksocknal_conn_addref(conn); + CNETERR("Timeout receiving from %s (%pISp), state %d wanted %d left %d\n", + libcfs_id2str(peer_ni->ksnp_id), + &conn->ksnc_peeraddr, + conn->ksnc_rx_state, + conn->ksnc_rx_nob_wanted, + conn->ksnc_rx_nob_left); + return conn; + } if ((!list_empty(&conn->ksnc_tx_queue) || conn->ksnc_sock->sk->sk_wmem_queued != 0) && ktime_get_seconds() >= conn->ksnc_tx_deadline) { - /* Timed out messages queued for sending or - * buffered in the socket's send buffer */ - ksocknal_conn_addref(conn); + /* Timed out messages queued for sending or + * buffered in the socket's send buffer + */ + ksocknal_conn_addref(conn); list_for_each_entry(tx, &conn->ksnc_tx_queue, tx_list) tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT; - CNETERR("Timeout sending data to %s (%pI4h:%d) " - "the network or that node may be down.\n", - libcfs_id2str(peer_ni->ksnp_id), - &conn->ksnc_ipaddr, conn->ksnc_port); - return (conn); - } - } + CNETERR("Timeout sending data to %s (%pISp) the network or that node may be down.\n", + libcfs_id2str(peer_ni->ksnp_id), + &conn->ksnc_peeraddr); + return conn; + } + } - return (NULL); + return (NULL); } static inline void @@ -2420,17 +2364,15 @@ ksocknal_flush_stale_txs(struct ksock_peer_ni *peer_ni) write_lock_bh(&ksocknal_data.ksnd_global_lock); - while (!list_empty(&peer_ni->ksnp_tx_queue)) { - tx = list_entry(peer_ni->ksnp_tx_queue.next, - struct ksock_tx, tx_list); - + while ((tx = list_first_entry_or_null(&peer_ni->ksnp_tx_queue, + struct ksock_tx, + tx_list)) != NULL) { if (ktime_get_seconds() < tx->tx_deadline) break; tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT; - list_del(&tx->tx_list); - list_add_tail(&tx->tx_list, &stale_txs); + list_move_tail(&tx->tx_list, &stale_txs); } write_unlock_bh(&ksocknal_data.ksnd_global_lock); @@ -2503,62 +2445,61 @@ __must_hold(&ksocknal_data.ksnd_global_lock) static void ksocknal_check_peer_timeouts(int idx) { - struct list_head *peers = &ksocknal_data.ksnd_peers[idx]; + struct hlist_head *peers = &ksocknal_data.ksnd_peers[idx]; struct ksock_peer_ni *peer_ni; struct ksock_conn *conn; struct ksock_tx *tx; again: - /* NB. We expect to have a look at all the peers and not find any - * connections to time out, so we just use a shared lock while we - * take a look... */ + /* NB. We expect to have a look at all the peers and not find any + * connections to time out, so we just use a shared lock while we + * take a look... + */ read_lock(&ksocknal_data.ksnd_global_lock); - list_for_each_entry(peer_ni, peers, ksnp_list) { + hlist_for_each_entry(peer_ni, peers, ksnp_list) { struct ksock_tx *tx_stale; time64_t deadline = 0; int resid = 0; int n = 0; - if (ksocknal_send_keepalive_locked(peer_ni) != 0) { + if (ksocknal_send_keepalive_locked(peer_ni) != 0) { read_unlock(&ksocknal_data.ksnd_global_lock); - goto again; - } + goto again; + } - conn = ksocknal_find_timed_out_conn (peer_ni); + conn = ksocknal_find_timed_out_conn(peer_ni); - if (conn != NULL) { + if (conn != NULL) { read_unlock(&ksocknal_data.ksnd_global_lock); - ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT); + ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT); - /* NB we won't find this one again, but we can't - * just proceed with the next peer_ni, since we dropped - * ksnd_global_lock and it might be dead already! */ - ksocknal_conn_decref(conn); - goto again; - } - - /* we can't process stale txs right here because we're - * holding only shared lock */ - if (!list_empty(&peer_ni->ksnp_tx_queue)) { - struct ksock_tx *tx; + /* NB we won't find this one again, but we can't + * just proceed with the next peer_ni, since we dropped + * ksnd_global_lock and it might be dead already! + */ + ksocknal_conn_decref(conn); + goto again; + } - tx = list_entry(peer_ni->ksnp_tx_queue.next, - struct ksock_tx, tx_list); - if (ktime_get_seconds() >= tx->tx_deadline) { - ksocknal_peer_addref(peer_ni); - read_unlock(&ksocknal_data.ksnd_global_lock); + /* we can't process stale txs right here because we're + * holding only shared lock + */ + tx = list_first_entry_or_null(&peer_ni->ksnp_tx_queue, + struct ksock_tx, tx_list); + if (tx && ktime_get_seconds() >= tx->tx_deadline) { + ksocknal_peer_addref(peer_ni); + read_unlock(&ksocknal_data.ksnd_global_lock); - ksocknal_flush_stale_txs(peer_ni); + ksocknal_flush_stale_txs(peer_ni); - ksocknal_peer_decref(peer_ni); - goto again; - } - } + ksocknal_peer_decref(peer_ni); + goto again; + } if (list_empty(&peer_ni->ksnp_zc_req_list)) - continue; + continue; tx_stale = NULL; spin_lock(&peer_ni->ksnp_lock); @@ -2606,24 +2547,21 @@ int ksocknal_reaper(void *arg) wait_queue_entry_t wait; struct ksock_conn *conn; struct ksock_sched *sched; - struct list_head enomem_conns; + LIST_HEAD(enomem_conns); int nenomem_conns; time64_t timeout; int i; int peer_index = 0; time64_t deadline = ktime_get_seconds(); - cfs_block_allsigs (); - - INIT_LIST_HEAD(&enomem_conns); - init_waitqueue_entry(&wait, current); + init_wait(&wait); spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); while (!ksocknal_data.ksnd_shuttingdown) { - if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) { - conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next, - struct ksock_conn, ksnc_list); + conn = list_first_entry_or_null(&ksocknal_data.ksnd_deathrow_conns, + struct ksock_conn, ksnc_list); + if (conn) { list_del(&conn->ksnc_list); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -2635,9 +2573,9 @@ int ksocknal_reaper(void *arg) continue; } - if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) { - conn = list_entry(ksocknal_data.ksnd_zombie_conns.next, - struct ksock_conn, ksnc_list); + conn = list_first_entry_or_null(&ksocknal_data.ksnd_zombie_conns, + struct ksock_conn, ksnc_list); + if (conn) { list_del(&conn->ksnc_list); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -2645,22 +2583,19 @@ int ksocknal_reaper(void *arg) ksocknal_destroy_conn(conn); spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); - continue; - } + continue; + } - if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) { - list_add(&enomem_conns, - &ksocknal_data.ksnd_enomem_conns); - list_del_init(&ksocknal_data.ksnd_enomem_conns); - } + list_splice_init(&ksocknal_data.ksnd_enomem_conns, + &enomem_conns); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); /* reschedule all the connections that stalled with ENOMEM... */ nenomem_conns = 0; - while (!list_empty(&enomem_conns)) { - conn = list_entry(enomem_conns.next, - struct ksock_conn, ksnc_tx_list); + while ((conn = list_first_entry_or_null(&enomem_conns, + struct ksock_conn, + ksnc_tx_list)) != NULL) { list_del(&conn->ksnc_tx_list); sched = conn->ksnc_scheduler; @@ -2677,34 +2612,35 @@ int ksocknal_reaper(void *arg) nenomem_conns++; } - /* careful with the jiffy wrap... */ + /* careful with the jiffy wrap... */ while ((timeout = deadline - ktime_get_seconds()) <= 0) { - const int n = 4; - const int p = 1; - int chunk = ksocknal_data.ksnd_peer_hash_size; + const int n = 4; + const int p = 1; + int chunk = HASH_SIZE(ksocknal_data.ksnd_peers); unsigned int lnd_timeout; - /* Time to check for timeouts on a few more peers: I do - * checks every 'p' seconds on a proportion of the peer_ni - * table and I need to check every connection 'n' times - * within a timeout interval, to ensure I detect a - * timeout on any connection within (n+1)/n times the - * timeout interval. */ + /* Time to check for timeouts on a few more peers: I + * do checks every 'p' seconds on a proportion of the + * peer_ni table and I need to check every connection + * 'n' times within a timeout interval, to ensure I + * detect a timeout on any connection within (n+1)/n + * times the timeout interval. + */ - lnd_timeout = lnet_get_lnd_timeout(); + lnd_timeout = ksocknal_timeout(); if (lnd_timeout > n * p) chunk = (chunk * n * p) / lnd_timeout; if (chunk == 0) chunk = 1; - for (i = 0; i < chunk; i++) { - ksocknal_check_peer_timeouts (peer_index); - peer_index = (peer_index + 1) % - ksocknal_data.ksnd_peer_hash_size; - } + for (i = 0; i < chunk; i++) { + ksocknal_check_peer_timeouts(peer_index); + peer_index = (peer_index + 1) % + HASH_SIZE(ksocknal_data.ksnd_peers); + } deadline += p; - } + } if (nenomem_conns != 0) { /* Reduce my timeout if I rescheduled ENOMEM conns.