/* searching for a noop tx in free list */
spin_lock(&ksocknal_data.ksnd_tx_lock);
- if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
- struct ksock_tx, tx_list);
+ tx = list_first_entry_or_null(&ksocknal_data.ksnd_idle_noop_txs,
+ struct ksock_tx, tx_list);
+ if (tx) {
LASSERT(tx->tx_desc_size == size);
list_del(&tx->tx_list);
}
{
struct ksock_tx *tx;
- while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, struct ksock_tx, tx_list);
-
+ while ((tx = list_first_entry_or_null(txlist, struct ksock_tx,
+ tx_list)) != NULL) {
if (error && tx->tx_lnetmsg != NULL) {
CNETERR("Deleting packet type %d len %d %s->%s\n",
le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
}
static void
-ksocknal_launch_connection_locked(struct ksock_route *route)
+ksocknal_launch_connection_locked(struct ksock_conn_cb *conn_cb)
{
+ /* called holding write lock on ksnd_global_lock */
- /* called holding write lock on ksnd_global_lock */
+ LASSERT(!conn_cb->ksnr_scheduled);
+ LASSERT(!conn_cb->ksnr_connecting);
+ LASSERT((ksocknal_conn_cb_mask() & ~conn_cb->ksnr_connected) != 0);
- LASSERT (!route->ksnr_scheduled);
- LASSERT (!route->ksnr_connecting);
- LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
+ /* scheduling conn for connd */
+ conn_cb->ksnr_scheduled = 1;
- route->ksnr_scheduled = 1; /* scheduling conn for connd */
- ksocknal_route_addref(route); /* extra ref for connd */
+ /* extra ref for connd */
+ ksocknal_conn_cb_addref(conn_cb);
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- list_add_tail(&route->ksnr_connd_list,
- &ksocknal_data.ksnd_connd_routes);
+ list_add_tail(&conn_cb->ksnr_connd_list,
+ &ksocknal_data.ksnd_connd_routes);
wake_up(&ksocknal_data.ksnd_connd_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
void
ksocknal_launch_all_connections_locked(struct ksock_peer_ni *peer_ni)
{
- struct ksock_route *route;
+ struct ksock_conn_cb *conn_cb;
- /* called holding write lock on ksnd_global_lock */
- for (;;) {
- /* launch any/all connections that need it */
- route = ksocknal_find_connectable_route_locked(peer_ni);
- if (route == NULL)
- return;
+ /* called holding write lock on ksnd_global_lock */
+ for (;;) {
+ /* launch any/all connections that need it */
+ conn_cb = ksocknal_find_connectable_conn_cb_locked(peer_ni);
+ if (conn_cb == NULL)
+ return;
- ksocknal_launch_connection_locked(route);
- }
+ ksocknal_launch_connection_locked(conn_cb);
+ }
}
struct ksock_conn *
}
-struct ksock_route *
-ksocknal_find_connectable_route_locked(struct ksock_peer_ni *peer_ni)
+struct ksock_conn_cb *
+ksocknal_find_connectable_conn_cb_locked(struct ksock_peer_ni *peer_ni)
{
time64_t now = ktime_get_seconds();
- struct list_head *tmp;
- struct ksock_route *route;
-
- list_for_each(tmp, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, struct ksock_route, ksnr_list);
+ struct ksock_conn_cb *conn_cb;
- LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
+ conn_cb = peer_ni->ksnp_conn_cb;
+ if (!conn_cb)
+ return NULL;
- if (route->ksnr_scheduled) /* connections being established */
- continue;
+ LASSERT(!conn_cb->ksnr_connecting || conn_cb->ksnr_scheduled);
- /* all route types connected ? */
- if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
- continue;
+ if (conn_cb->ksnr_scheduled) /* connections being established */
+ return NULL;
- if (!(route->ksnr_retry_interval == 0 || /* first attempt */
- now >= route->ksnr_timeout)) {
- CDEBUG(D_NET,
- "Too soon to retry route %pIS (cnted %d, interval %lld, %lld secs later)\n",
- &route->ksnr_addr,
- route->ksnr_connected,
- route->ksnr_retry_interval,
- route->ksnr_timeout - now);
- continue;
- }
+ /* all conn types connected ? */
+ if ((ksocknal_conn_cb_mask() & ~conn_cb->ksnr_connected) == 0)
+ return NULL;
- return (route);
- }
+ if (!(conn_cb->ksnr_retry_interval == 0 || /* first attempt */
+ now >= conn_cb->ksnr_timeout)) {
+ CDEBUG(D_NET,
+ "Too soon to retry route %pIS (cnted %d, interval %lld, %lld secs later)\n",
+ &conn_cb->ksnr_addr,
+ conn_cb->ksnr_connected,
+ conn_cb->ksnr_retry_interval,
+ conn_cb->ksnr_timeout - now);
+ return NULL;
+ }
- return (NULL);
+ return conn_cb;
}
-struct ksock_route *
-ksocknal_find_connecting_route_locked(struct ksock_peer_ni *peer_ni)
+struct ksock_conn_cb *
+ksocknal_find_connecting_conn_cb_locked(struct ksock_peer_ni *peer_ni)
{
- struct list_head *tmp;
- struct ksock_route *route;
-
- list_for_each(tmp, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, struct ksock_route, ksnr_list);
+ struct ksock_conn_cb *conn_cb;
- LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
+ conn_cb = peer_ni->ksnp_conn_cb;
+ if (!conn_cb)
+ return NULL;
- if (route->ksnr_scheduled)
- return (route);
- }
+ LASSERT(!conn_cb->ksnr_connecting || conn_cb->ksnr_scheduled);
- return (NULL);
+ return conn_cb->ksnr_scheduled ? conn_cb : NULL;
}
int
int retry;
int rc;
- LASSERT (tx->tx_conn == NULL);
+ LASSERT(tx->tx_conn == NULL);
- g_lock = &ksocknal_data.ksnd_global_lock;
+ g_lock = &ksocknal_data.ksnd_global_lock;
- for (retry = 0;; retry = 1) {
+ for (retry = 0;; retry = 1) {
read_lock(g_lock);
- peer_ni = ksocknal_find_peer_locked(ni, id);
- if (peer_ni != NULL) {
- if (ksocknal_find_connectable_route_locked(peer_ni) == NULL) {
- conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
- if (conn != NULL) {
- /* I've got no routes that need to be
- * connecting and I do have an actual
- * connection... */
+ peer_ni = ksocknal_find_peer_locked(ni, id);
+ if (peer_ni != NULL) {
+ if (ksocknal_find_connectable_conn_cb_locked(peer_ni) == NULL) {
+ conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
+ if (conn != NULL) {
+ /* I've got nothing that need to be
+ * connecting and I do have an actual
+ * connection...
+ */
ksocknal_queue_tx_locked (tx, conn);
read_unlock(g_lock);
return (0);
- }
- }
- }
+ }
+ }
+ }
/* I'll need a write lock... */
read_unlock(g_lock);
return (0);
}
- if (peer_ni->ksnp_accepting > 0 ||
- ksocknal_find_connecting_route_locked (peer_ni) != NULL) {
+ if (peer_ni->ksnp_accepting > 0 ||
+ ksocknal_find_connecting_conn_cb_locked(peer_ni) != NULL) {
/* the message is going to be pinned to the peer_ni */
tx->tx_deadline = ktime_get_seconds() +
ksocknal_timeout();
list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue);
write_unlock_bh(g_lock);
return 0;
- }
+ }
write_unlock_bh(g_lock);
bool did_something = false;
/* Ensure I progress everything semi-fairly */
-
- if (!list_empty(&sched->kss_rx_conns)) {
- conn = list_entry(sched->kss_rx_conns.next,
- struct ksock_conn, ksnc_rx_list);
+ conn = list_first_entry_or_null(&sched->kss_rx_conns,
+ struct ksock_conn,
+ ksnc_rx_list);
+ if (conn) {
list_del(&conn->ksnc_rx_list);
LASSERT(conn->ksnc_rx_scheduled);
list_splice_init(&sched->kss_zombie_noop_txs, &zlist);
- conn = list_entry(sched->kss_tx_conns.next,
- struct ksock_conn, ksnc_tx_list);
+ conn = list_first_entry(&sched->kss_tx_conns,
+ struct ksock_conn,
+ ksnc_tx_list);
list_del(&conn->ksnc_tx_list);
LASSERT(conn->ksnc_tx_scheduled);
LASSERT(conn->ksnc_tx_ready);
LASSERT(!list_empty(&conn->ksnc_tx_queue));
- tx = list_entry(conn->ksnc_tx_queue.next,
- struct ksock_tx, tx_list);
+ tx = list_first_entry(&conn->ksnc_tx_queue,
+ struct ksock_tx, tx_list);
if (conn->ksnc_tx_carrier == tx)
ksocknal_next_tx_carrier(conn);
}
static bool
-ksocknal_connect(struct ksock_route *route)
+ksocknal_connect(struct ksock_conn_cb *conn_cb)
{
LIST_HEAD(zombies);
- struct ksock_peer_ni *peer_ni = route->ksnr_peer;
+ struct ksock_peer_ni *peer_ni = conn_cb->ksnr_peer;
int type;
int wanted;
struct socket *sock;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- LASSERT(route->ksnr_scheduled);
- LASSERT(!route->ksnr_connecting);
+ LASSERT(conn_cb->ksnr_scheduled);
+ LASSERT(!conn_cb->ksnr_connecting);
- route->ksnr_connecting = 1;
+ conn_cb->ksnr_connecting = 1;
for (;;) {
- wanted = ksocknal_route_mask() & ~route->ksnr_connected;
+ wanted = ksocknal_conn_cb_mask() & ~conn_cb->ksnr_connected;
- /* stop connecting if peer_ni/route got closed under me, or
- * route got connected while queued */
- if (peer_ni->ksnp_closing || route->ksnr_deleted ||
+ /* stop connecting if peer_ni/cb got closed under me, or
+ * conn cb got connected while queued
+ */
+ if (peer_ni->ksnp_closing || conn_cb->ksnr_deleted ||
wanted == 0) {
retry_later = false;
break;
type = SOCKLND_CONN_ANY;
} else if ((wanted & BIT(SOCKLND_CONN_CONTROL)) != 0) {
type = SOCKLND_CONN_CONTROL;
- } else if ((wanted & BIT(SOCKLND_CONN_BULK_IN)) != 0) {
+ } else if ((wanted & BIT(SOCKLND_CONN_BULK_IN)) != 0 &&
+ conn_cb->ksnr_blki_conn_count <= conn_cb->ksnr_blko_conn_count) {
type = SOCKLND_CONN_BULK_IN;
} else {
LASSERT ((wanted & BIT(SOCKLND_CONN_BULK_OUT)) != 0);
rc = -ETIMEDOUT;
lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
(struct sockaddr *)
- &route->ksnr_addr);
+ &conn_cb->ksnr_addr);
goto failed;
}
sock = lnet_connect(peer_ni->ksnp_id.nid,
- route->ksnr_myiface,
- (struct sockaddr *)&route->ksnr_addr,
+ conn_cb->ksnr_myiface,
+ (struct sockaddr *)&conn_cb->ksnr_addr,
peer_ni->ksnp_ni->ni_net_ns);
if (IS_ERR(sock)) {
rc = PTR_ERR(sock);
goto failed;
}
- rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type);
+ rc = ksocknal_create_conn(peer_ni->ksnp_ni, conn_cb, sock,
+ type);
if (rc < 0) {
lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
(struct sockaddr *)
- &route->ksnr_addr);
+ &conn_cb->ksnr_addr);
goto failed;
}
write_lock_bh(&ksocknal_data.ksnd_global_lock);
}
- route->ksnr_scheduled = 0;
- route->ksnr_connecting = 0;
+ conn_cb->ksnr_scheduled = 0;
+ conn_cb->ksnr_connecting = 0;
if (retry_later) {
/* re-queue for attention; this frees me up to handle
* the race is resolved quickly usually, so
* min_reconnectms should be good heuristic
*/
- route->ksnr_retry_interval = *ksocknal_tunables.ksnd_min_reconnectms / 1000;
- route->ksnr_timeout = ktime_get_seconds() +
- route->ksnr_retry_interval;
+ conn_cb->ksnr_retry_interval =
+ *ksocknal_tunables.ksnd_min_reconnectms / 1000;
+ conn_cb->ksnr_timeout = ktime_get_seconds() +
+ conn_cb->ksnr_retry_interval;
}
- ksocknal_launch_connection_locked(route);
+ ksocknal_launch_connection_locked(conn_cb);
}
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
failed:
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- route->ksnr_scheduled = 0;
- route->ksnr_connecting = 0;
+ conn_cb->ksnr_scheduled = 0;
+ conn_cb->ksnr_connecting = 0;
/* This is a retry rather than a new connection */
- route->ksnr_retry_interval *= 2;
- route->ksnr_retry_interval =
- max_t(time64_t, route->ksnr_retry_interval,
+ conn_cb->ksnr_retry_interval *= 2;
+ conn_cb->ksnr_retry_interval =
+ max_t(time64_t, conn_cb->ksnr_retry_interval,
*ksocknal_tunables.ksnd_min_reconnectms / 1000);
- route->ksnr_retry_interval =
- min_t(time64_t, route->ksnr_retry_interval,
+ conn_cb->ksnr_retry_interval =
+ min_t(time64_t, conn_cb->ksnr_retry_interval,
*ksocknal_tunables.ksnd_max_reconnectms / 1000);
- LASSERT(route->ksnr_retry_interval);
- route->ksnr_timeout = ktime_get_seconds() + route->ksnr_retry_interval;
+ LASSERT(conn_cb->ksnr_retry_interval);
+ conn_cb->ksnr_timeout = ktime_get_seconds() +
+ conn_cb->ksnr_retry_interval;
if (!list_empty(&peer_ni->ksnp_tx_queue) &&
peer_ni->ksnp_accepting == 0 &&
- ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
+ !ksocknal_find_connecting_conn_cb_locked(peer_ni)) {
struct ksock_conn *conn;
/* ksnp_tx_queue is queued on a conn on successful
* connection for V1.x and V2.x
*/
- if (!list_empty(&peer_ni->ksnp_conns)) {
- conn = list_entry(peer_ni->ksnp_conns.next,
- struct ksock_conn, ksnc_list);
+ conn = list_first_entry_or_null(&peer_ni->ksnp_conns,
+ struct ksock_conn, ksnc_list);
+ if (conn)
LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
- }
/* take all the blocked packets while I've got the lock and
* complete below...
ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
}
-/* Go through connd_routes queue looking for a route that we can process
+/* Go through connd_cbs queue looking for a conn_cb that we can process
* right now, @timeout_p can be updated if we need to come back later */
-static struct ksock_route *
-ksocknal_connd_get_route_locked(signed long *timeout_p)
+static struct ksock_conn_cb *
+ksocknal_connd_get_conn_cb_locked(signed long *timeout_p)
{
time64_t now = ktime_get_seconds();
- struct ksock_route *route;
+ time64_t conn_timeout;
+ struct ksock_conn_cb *conn_cb;
/* connd_routes can contain both pending and ordinary routes */
- list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
- ksnr_connd_list) {
+ list_for_each_entry(conn_cb, &ksocknal_data.ksnd_connd_routes,
+ ksnr_connd_list) {
+
+ conn_timeout = conn_cb->ksnr_timeout;
- if (route->ksnr_retry_interval == 0 ||
- now >= route->ksnr_timeout)
- return route;
+ if (conn_cb->ksnr_retry_interval == 0 ||
+ now >= conn_timeout)
+ return conn_cb;
if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
- *timeout_p > cfs_time_seconds(route->ksnr_timeout - now))
- *timeout_p = cfs_time_seconds(route->ksnr_timeout - now);
+ *timeout_p > cfs_time_seconds(conn_timeout - now))
+ *timeout_p = cfs_time_seconds(conn_timeout - now);
}
return NULL;
ksocknal_data.ksnd_connd_running++;
while (!ksocknal_data.ksnd_shuttingdown) {
- struct ksock_route *route = NULL;
+ struct ksock_conn_cb *conn_cb = NULL;
time64_t sec = ktime_get_real_seconds();
long timeout = MAX_SCHEDULE_TIMEOUT;
bool dropped_lock = false;
dropped_lock = true;
}
- if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
+ cr = list_first_entry_or_null(&ksocknal_data.ksnd_connd_connreqs,
+ struct ksock_connreq, ksncr_list);
+ if (cr) {
/* Connection accepted by the listener */
- cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
- struct ksock_connreq, ksncr_list);
-
list_del(&cr->ksncr_list);
spin_unlock_bh(connd_lock);
dropped_lock = true;
* create new connd
*/
if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
- ksocknal_data.ksnd_connd_running) {
- route = ksocknal_connd_get_route_locked(&timeout);
- }
- if (route != NULL) {
- list_del(&route->ksnr_connd_list);
+ ksocknal_data.ksnd_connd_running)
+ conn_cb = ksocknal_connd_get_conn_cb_locked(&timeout);
+
+ if (conn_cb) {
+ list_del(&conn_cb->ksnr_connd_list);
ksocknal_data.ksnd_connd_connecting++;
spin_unlock_bh(connd_lock);
dropped_lock = true;
- if (ksocknal_connect(route)) {
+ if (ksocknal_connect(conn_cb)) {
/* consecutive retry */
if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
CWARN("massive consecutive re-connecting to %pIS\n",
- &route->ksnr_addr);
+ &conn_cb->ksnr_addr);
cons_retry = 0;
}
} else {
cons_retry = 0;
}
- ksocknal_route_decref(route);
+ ksocknal_conn_cb_decref(conn_cb);
spin_lock_bh(connd_lock);
ksocknal_data.ksnd_connd_connecting--;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- while (!list_empty(&peer_ni->ksnp_tx_queue)) {
- tx = list_entry(peer_ni->ksnp_tx_queue.next,
- struct ksock_tx, tx_list);
-
+ while ((tx = list_first_entry_or_null(&peer_ni->ksnp_tx_queue,
+ struct ksock_tx,
+ tx_list)) != NULL) {
if (ktime_get_seconds() < tx->tx_deadline)
break;
/* we can't process stale txs right here because we're
* holding only shared lock
*/
- if (!list_empty(&peer_ni->ksnp_tx_queue)) {
- struct ksock_tx *tx;
-
- tx = list_entry(peer_ni->ksnp_tx_queue.next,
- struct ksock_tx, tx_list);
- if (ktime_get_seconds() >= tx->tx_deadline) {
- ksocknal_peer_addref(peer_ni);
- read_unlock(&ksocknal_data.ksnd_global_lock);
+ tx = list_first_entry_or_null(&peer_ni->ksnp_tx_queue,
+ struct ksock_tx, tx_list);
+ if (tx && ktime_get_seconds() >= tx->tx_deadline) {
+ ksocknal_peer_addref(peer_ni);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
- ksocknal_flush_stale_txs(peer_ni);
+ ksocknal_flush_stale_txs(peer_ni);
- ksocknal_peer_decref(peer_ni);
- goto again;
- }
+ ksocknal_peer_decref(peer_ni);
+ goto again;
}
if (list_empty(&peer_ni->ksnp_zc_req_list))
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
- if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
- conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
- struct ksock_conn, ksnc_list);
+ conn = list_first_entry_or_null(&ksocknal_data.ksnd_deathrow_conns,
+ struct ksock_conn, ksnc_list);
+ if (conn) {
list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
continue;
}
- if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
- conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
- struct ksock_conn, ksnc_list);
+ conn = list_first_entry_or_null(&ksocknal_data.ksnd_zombie_conns,
+ struct ksock_conn, ksnc_list);
+ if (conn) {
list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
/* reschedule all the connections that stalled with ENOMEM... */
nenomem_conns = 0;
- while (!list_empty(&enomem_conns)) {
- conn = list_entry(enomem_conns.next,
- struct ksock_conn, ksnc_tx_list);
+ while ((conn = list_first_entry_or_null(&enomem_conns,
+ struct ksock_conn,
+ ksnc_tx_list)) != NULL) {
list_del(&conn->ksnc_tx_list);
sched = conn->ksnc_scheduler;