#include <libcfs/linux/linux-mem.h>
#include "socklnd.h"
+#include <linux/sunrpc/addr.h>
struct ksock_tx *
ksocknal_alloc_tx(int type, int size)
if (tx == NULL)
return NULL;
- atomic_set(&tx->tx_refcount, 1);
+ refcount_set(&tx->tx_refcount, 1);
tx->tx_zc_aborted = 0;
tx->tx_zc_capable = 0;
tx->tx_zc_checked = 0;
tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
}
- LASSERT(atomic_read(&tx->tx_refcount) == 1);
+ LASSERT(refcount_read(&tx->tx_refcount) == 1);
ksocknal_tx_done(ni, tx, error);
}
}
counter++; /* exponential backoff warnings */
if ((counter & (-counter)) == counter)
- CWARN("%u ENOMEM tx %p (%u allocated)\n",
- counter, conn, atomic_read(&libcfs_kmemory));
+ CWARN("%u ENOMEM tx %p (%lld allocated)\n",
+ counter, conn, libcfs_kmem_read());
/* Queue on ksnd_enomem_conns for retry after a timeout */
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
if (!conn->ksnc_closing) {
switch (rc) {
case -ECONNRESET:
- LCONSOLE_WARN("Host %pI4h reset our connection "
- "while we were sending data; it may have "
- "rebooted.\n",
- &conn->ksnc_ipaddr);
+ LCONSOLE_WARN("Host %pIS reset our connection while we were sending data; it may have rebooted.\n",
+ &conn->ksnc_peeraddr);
break;
default:
- LCONSOLE_WARN("There was an unexpected network error "
- "while writing to %pI4h: %d.\n",
- &conn->ksnc_ipaddr, rc);
+ LCONSOLE_WARN("There was an unexpected network error while writing to %pIS: %d.\n",
+ &conn->ksnc_peeraddr, rc);
break;
}
- CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
+ CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pISp\n",
conn, rc, libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr, conn->ksnc_port);
+ &conn->ksnc_peeraddr);
}
if (tx->tx_zc_checked)
* ksnc_sock... */
LASSERT(!conn->ksnc_closing);
- CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
+ CDEBUG(D_NET, "Sending to %s ip %pISp\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr, conn->ksnc_port);
+ &conn->ksnc_peeraddr);
ksocknal_tx_prep(conn, tx);
if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
continue;
- if (!(route->ksnr_retry_interval == 0 || /* first attempt */
+ if (!(route->ksnr_retry_interval == 0 || /* first attempt */
now >= route->ksnr_timeout)) {
- CDEBUG(D_NET,
- "Too soon to retry route %pI4h "
- "(cnted %d, interval %lld, %lld secs later)\n",
- &route->ksnr_ipaddr,
- route->ksnr_connected,
- route->ksnr_retry_interval,
+ CDEBUG(D_NET,
+ "Too soon to retry route %pIS (cnted %d, interval %lld, %lld secs later)\n",
+ &route->ksnr_addr,
+ route->ksnr_connected,
+ route->ksnr_retry_interval,
route->ksnr_timeout - now);
- continue;
- }
+ continue;
+ }
return (route);
}
{
struct ksock_peer_ni *peer_ni;
struct ksock_conn *conn;
+ struct sockaddr_in sa;
rwlock_t *g_lock;
int retry;
int rc;
return -EHOSTUNREACH;
}
- rc = ksocknal_add_peer(ni, id,
- LNET_NIDADDR(id.nid),
- lnet_acceptor_port());
+ memset(&sa, 0, sizeof(sa));
+ sa.sin_family = AF_INET;
+ sa.sin_addr.s_addr = htonl(LNET_NIDADDR(id.nid));
+ sa.sin_port = htons(lnet_acceptor_port());
+ rc = ksocknal_add_peer(ni, id, (struct sockaddr *)&sa);
if (rc != 0) {
CERROR("Can't add peer_ni %s: %d\n",
libcfs_id2str(id), rc);
int
ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- struct task_struct *task = kthread_run(fn, arg, name);
+ struct task_struct *task = kthread_run(fn, arg, "%s", name);
if (IS_ERR(task))
return PTR_ERR(task);
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- ksocknal_data.ksnd_nthreads++;
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+ atomic_inc(&ksocknal_data.ksnd_nthreads);
return 0;
}
void
ksocknal_thread_fini (void)
{
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- if (--ksocknal_data.ksnd_nthreads == 0)
+ if (atomic_dec_and_test(&ksocknal_data.ksnd_nthreads))
wake_up_var(&ksocknal_data.ksnd_nthreads);
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
}
int
struct lnet_process_id *id;
int rc;
- LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+ LASSERT(refcount_read(&conn->ksnc_conn_refcount) > 0);
/* NB: sched lock NOT held */
/* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
LASSERT(rc != -EAGAIN);
if (rc == 0)
- CDEBUG(D_NET, "[%p] EOF from %s "
- "ip %pI4h:%d\n", conn,
- libcfs_id2str(ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
+ CDEBUG(D_NET, "[%p] EOF from %s ip %pISp\n",
+ conn, libcfs_id2str(ksnp_id),
+ &conn->ksnc_peeraddr);
else if (!conn->ksnc_closing)
- CERROR("[%p] Error %d on read from %s "
- "ip %pI4h:%d\n", conn, rc,
- libcfs_id2str(ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
+ CERROR("[%p] Error %d on read from %s ip %pISp\n",
+ conn, rc, libcfs_id2str(ksnp_id),
+ &conn->ksnc_peeraddr);
/* it's not an error if conn is being closed */
ksocknal_close_conn_and_siblings (conn,
spin_lock_bh(&sched->kss_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
- int did_something = 0;
+ bool did_something = false;
/* Ensure I progress everything semi-fairly */
ksocknal_conn_decref(conn);
}
- did_something = 1;
+ did_something = true;
}
if (!list_empty(&sched->kss_tx_conns)) {
ksocknal_conn_decref(conn);
}
- did_something = 1;
+ did_something = true;
}
if (!did_something || /* nothing to do */
need_resched()) { /* hogging CPU? */
rc = lnet_sock_read(sock, &hello->kshm_magic,
sizeof(hello->kshm_magic), timeout);
- if (rc != 0) {
- CERROR("Error %d reading HELLO from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT (rc < 0);
- return rc;
- }
+ if (rc != 0) {
+ CERROR("Error %d reading HELLO from %pIS\n",
+ rc, &conn->ksnc_peeraddr);
+ LASSERT(rc < 0);
+ return rc;
+ }
- if (hello->kshm_magic != LNET_PROTO_MAGIC &&
- hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
- hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
- /* Unexpected magic! */
- CERROR ("Bad magic(1) %#08x (%#08x expected) from "
- "%pI4h\n", __cpu_to_le32 (hello->kshm_magic),
- LNET_PROTO_TCP_MAGIC, &conn->ksnc_ipaddr);
- return -EPROTO;
- }
+ if (hello->kshm_magic != LNET_PROTO_MAGIC &&
+ hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
+ hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
+ /* Unexpected magic! */
+ CERROR("Bad magic(1) %#08x (%#08x expected) from %pIS\n",
+ __cpu_to_le32 (hello->kshm_magic),
+ LNET_PROTO_TCP_MAGIC, &conn->ksnc_peeraddr);
+ return -EPROTO;
+ }
rc = lnet_sock_read(sock, &hello->kshm_version,
sizeof(hello->kshm_version), timeout);
if (rc != 0) {
- CERROR("Error %d reading HELLO from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
+ CERROR("Error %d reading HELLO from %pIS\n",
+ rc, &conn->ksnc_peeraddr);
LASSERT(rc < 0);
return rc;
}
ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
}
- CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
- conn->ksnc_proto->pro_version, &conn->ksnc_ipaddr);
+ CERROR("Unknown protocol version (%d.x expected) from %pIS\n",
+ conn->ksnc_proto->pro_version, &conn->ksnc_peeraddr);
return -EPROTO;
}
/* receive the rest of hello message anyway */
rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
if (rc != 0) {
- CERROR("Error %d reading or checking hello from from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
+ CERROR("Error %d reading or checking hello from from %pIS\n",
+ rc, &conn->ksnc_peeraddr);
LASSERT (rc < 0);
return rc;
}
*incarnation = hello->kshm_src_incarnation;
if (hello->kshm_src_nid == LNET_NID_ANY) {
- CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
- &conn->ksnc_ipaddr);
+ CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pIS\n",
+ &conn->ksnc_peeraddr);
return -EPROTO;
}
- if (!active &&
- conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
- /* Userspace NAL assigns peer_ni process ID from socket */
- recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
- recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
- } else {
- recv_id.nid = hello->kshm_src_nid;
- recv_id.pid = hello->kshm_src_pid;
- }
+ if (!active &&
+ rpc_get_port((struct sockaddr *)&conn->ksnc_peeraddr) >
+ LNET_ACCEPTOR_MAX_RESERVED_PORT) {
+ /* Userspace NAL assigns peer_ni process ID from socket */
+ recv_id.pid = rpc_get_port((struct sockaddr *)
+ &conn->ksnc_peeraddr) |
+ LNET_PID_USERFLAG;
+ LASSERT(conn->ksnc_peeraddr.ss_family == AF_INET);
+ recv_id.nid = LNET_MKNID(
+ LNET_NIDNET(ni->ni_nid),
+ ntohl(((struct sockaddr_in *)
+ &conn->ksnc_peeraddr)->sin_addr.s_addr));
+ } else {
+ recv_id.nid = hello->kshm_src_nid;
+ recv_id.pid = hello->kshm_src_pid;
+ }
if (!active) {
*peerid = recv_id;
/* peer_ni determines type */
conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
if (conn->ksnc_type == SOCKLND_CONN_NONE) {
- CERROR("Unexpected type %d from %s ip %pI4h\n",
+ CERROR("Unexpected type %d from %s ip %pIS\n",
hello->kshm_ctype, libcfs_id2str(*peerid),
- &conn->ksnc_ipaddr);
+ &conn->ksnc_peeraddr);
return -EPROTO;
}
return 0;
}
- if (peerid->pid != recv_id.pid ||
- peerid->nid != recv_id.nid) {
- LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host"
- " %pI4h, but they claimed they were "
- "%s; please check your Lustre "
- "configuration.\n",
- libcfs_id2str(*peerid),
- &conn->ksnc_ipaddr,
- libcfs_id2str(recv_id));
- return -EPROTO;
- }
+ if (peerid->pid != recv_id.pid ||
+ peerid->nid != recv_id.nid) {
+ LCONSOLE_ERROR_MSG(0x130,
+ "Connected successfully to %s on host %pIS, but they claimed they were %s; please check your Lustre configuration.\n",
+ libcfs_id2str(*peerid),
+ &conn->ksnc_peeraddr,
+ libcfs_id2str(recv_id));
+ return -EPROTO;
+ }
if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
/* Possible protocol mismatch or I lost the connection race */
}
if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
- CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
+ CERROR("Mismatched types: me %d, %s ip %pIS %d\n",
conn->ksnc_type, libcfs_id2str(*peerid),
- &conn->ksnc_ipaddr,
+ &conn->ksnc_peeraddr,
hello->kshm_ctype);
return -EPROTO;
}
return 0;
}
-static int
+static bool
ksocknal_connect(struct ksock_route *route)
{
LIST_HEAD(zombies);
struct ksock_peer_ni *peer_ni = route->ksnr_peer;
- int type;
- int wanted;
- struct socket *sock;
+ int type;
+ int wanted;
+ struct socket *sock;
time64_t deadline;
- int retry_later = 0;
- int rc = 0;
+ bool retry_later = false;
+ int rc = 0;
deadline = ktime_get_seconds() + ksocknal_timeout();
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- LASSERT (route->ksnr_scheduled);
- LASSERT (!route->ksnr_connecting);
+ LASSERT(route->ksnr_scheduled);
+ LASSERT(!route->ksnr_connecting);
- route->ksnr_connecting = 1;
+ route->ksnr_connecting = 1;
for (;;) {
wanted = ksocknal_route_mask() & ~route->ksnr_connected;
* route got connected while queued */
if (peer_ni->ksnp_closing || route->ksnr_deleted ||
wanted == 0) {
- retry_later = 0;
+ retry_later = false;
break;
}
CDEBUG(D_NET,
"peer_ni %s(%d) already connecting to me, retry later.\n",
libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting);
- retry_later = 1;
+ retry_later = true;
}
if (retry_later) /* needs reschedule */
if (ktime_get_seconds() >= deadline) {
rc = -ETIMEDOUT;
lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
- route->ksnr_ipaddr,
- route->ksnr_port);
+ (struct sockaddr *)
+ &route->ksnr_addr);
goto failed;
}
sock = lnet_connect(peer_ni->ksnp_id.nid,
route->ksnr_myiface,
- route->ksnr_ipaddr, route->ksnr_port,
+ (struct sockaddr *)&route->ksnr_addr,
peer_ni->ksnp_ni->ni_net_ns);
if (IS_ERR(sock)) {
rc = PTR_ERR(sock);
rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type);
if (rc < 0) {
lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
- route->ksnr_ipaddr,
- route->ksnr_port);
+ (struct sockaddr *)
+ &route->ksnr_addr);
goto failed;
}
write_lock_bh(&ksocknal_data.ksnd_global_lock);
}
- route->ksnr_scheduled = 0;
- route->ksnr_connecting = 0;
+ route->ksnr_scheduled = 0;
+ route->ksnr_connecting = 0;
- if (retry_later) {
- /* re-queue for attention; this frees me up to handle
- * the peer_ni's incoming connection request */
+ if (retry_later) {
+ /* re-queue for attention; this frees me up to handle
+ * the peer_ni's incoming connection request
+ */
- if (rc == EALREADY ||
- (rc == 0 && peer_ni->ksnp_accepting > 0)) {
- /* We want to introduce a delay before next
- * attempt to connect if we lost conn race,
- * but the race is resolved quickly usually,
- * so min_reconnectms should be good heuristic */
+ if (rc == EALREADY ||
+ (rc == 0 && peer_ni->ksnp_accepting > 0)) {
+ /* We want to introduce a delay before next
+ * attempt to connect if we lost conn race, but
+ * the race is resolved quickly usually, so
+ * min_reconnectms should be good heuristic
+ */
route->ksnr_retry_interval = *ksocknal_tunables.ksnd_min_reconnectms / 1000;
route->ksnr_timeout = ktime_get_seconds() +
- route->ksnr_retry_interval;
- }
+ route->ksnr_retry_interval;
+ }
- ksocknal_launch_connection_locked(route);
- }
+ ksocknal_launch_connection_locked(route);
+ }
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
- return retry_later;
+ return retry_later;
failed:
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- route->ksnr_scheduled = 0;
- route->ksnr_connecting = 0;
+ route->ksnr_scheduled = 0;
+ route->ksnr_connecting = 0;
/* This is a retry rather than a new connection */
route->ksnr_retry_interval *= 2;
route->ksnr_timeout = ktime_get_seconds() + route->ksnr_retry_interval;
if (!list_empty(&peer_ni->ksnp_tx_queue) &&
- peer_ni->ksnp_accepting == 0 &&
- ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
+ peer_ni->ksnp_accepting == 0 &&
+ ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
struct ksock_conn *conn;
- /* ksnp_tx_queue is queued on a conn on successful
- * connection for V1.x and V2.x */
+ /* ksnp_tx_queue is queued on a conn on successful
+ * connection for V1.x and V2.x
+ */
if (!list_empty(&peer_ni->ksnp_conns)) {
conn = list_entry(peer_ni->ksnp_conns.next,
struct ksock_conn, ksnc_list);
- LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
- }
+ LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
+ }
- /* take all the blocked packets while I've got the lock and
- * complete below... */
+ /* take all the blocked packets while I've got the lock and
+ * complete below...
+ */
list_splice_init(&peer_ni->ksnp_tx_queue, &zombies);
- }
+ }
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
wait_queue_entry_t wait;
int cons_retry = 0;
- init_waitqueue_entry(&wait, current);
+ init_wait(&wait);
spin_lock_bh(connd_lock);
struct ksock_route *route = NULL;
time64_t sec = ktime_get_real_seconds();
long timeout = MAX_SCHEDULE_TIMEOUT;
- int dropped_lock = 0;
+ bool dropped_lock = false;
if (ksocknal_connd_check_stop(sec, &timeout)) {
/* wakeup another one to check stop */
break;
}
- if (ksocknal_connd_check_start(sec, &timeout)) {
- /* created new thread */
- dropped_lock = 1;
- }
+ if (ksocknal_connd_check_start(sec, &timeout)) {
+ /* created new thread */
+ dropped_lock = true;
+ }
if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
- /* Connection accepted by the listener */
+ /* Connection accepted by the listener */
cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
struct ksock_connreq, ksncr_list);
list_del(&cr->ksncr_list);
spin_unlock_bh(connd_lock);
- dropped_lock = 1;
+ dropped_lock = true;
ksocknal_create_conn(cr->ksncr_ni, NULL,
cr->ksncr_sock, SOCKLND_CONN_NONE);
LIBCFS_FREE(cr, sizeof(*cr));
spin_lock_bh(connd_lock);
- }
+ }
- /* Only handle an outgoing connection request if there
- * is a thread left to handle incoming connections and
- * create new connd */
- if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
- ksocknal_data.ksnd_connd_running) {
- route = ksocknal_connd_get_route_locked(&timeout);
- }
- if (route != NULL) {
+ /* Only handle an outgoing connection request if there
+ * is a thread left to handle incoming connections and
+ * create new connd
+ */
+ if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
+ ksocknal_data.ksnd_connd_running) {
+ route = ksocknal_connd_get_route_locked(&timeout);
+ }
+ if (route != NULL) {
list_del(&route->ksnr_connd_list);
- ksocknal_data.ksnd_connd_connecting++;
+ ksocknal_data.ksnd_connd_connecting++;
spin_unlock_bh(connd_lock);
- dropped_lock = 1;
-
- if (ksocknal_connect(route)) {
- /* consecutive retry */
- if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
- CWARN("massive consecutive "
- "re-connecting to %pI4h\n",
- &route->ksnr_ipaddr);
- cons_retry = 0;
- }
- } else {
- cons_retry = 0;
- }
+ dropped_lock = true;
+
+ if (ksocknal_connect(route)) {
+ /* consecutive retry */
+ if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
+ CWARN("massive consecutive re-connecting to %pIS\n",
+ &route->ksnr_addr);
+ cons_retry = 0;
+ }
+ } else {
+ cons_retry = 0;
+ }
- ksocknal_route_decref(route);
+ ksocknal_route_decref(route);
spin_lock_bh(connd_lock);
ksocknal_data.ksnd_connd_connecting--;
/* Nothing to do for 'timeout' */
set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+ add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq,
+ &wait);
spin_unlock_bh(connd_lock);
schedule_timeout(timeout);
if (error != 0) {
ksocknal_conn_addref(conn);
- switch (error) {
- case ECONNRESET:
- CNETERR("A connection with %s "
- "(%pI4h:%d) was reset; "
- "it may have rebooted.\n",
- libcfs_id2str(peer_ni->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- break;
- case ETIMEDOUT:
- CNETERR("A connection with %s "
- "(%pI4h:%d) timed out; the "
- "network or node may be down.\n",
- libcfs_id2str(peer_ni->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- break;
- default:
- CNETERR("An unexpected network error %d "
- "occurred with %s "
- "(%pI4h:%d\n", error,
- libcfs_id2str(peer_ni->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- break;
- }
+ switch (error) {
+ case ECONNRESET:
+ CNETERR("A connection with %s (%pISp) was reset; it may have rebooted.\n",
+ libcfs_id2str(peer_ni->ksnp_id),
+ &conn->ksnc_peeraddr);
+ break;
+ case ETIMEDOUT:
+ CNETERR("A connection with %s (%pISp) timed out; the network or node may be down.\n",
+ libcfs_id2str(peer_ni->ksnp_id),
+ &conn->ksnc_peeraddr);
+ break;
+ default:
+ CNETERR("An unexpected network error %d occurred with %s (%pISp\n",
+ error,
+ libcfs_id2str(peer_ni->ksnp_id),
+ &conn->ksnc_peeraddr);
+ break;
+ }
- return (conn);
- }
+ return conn;
+ }
- if (conn->ksnc_rx_started &&
+ if (conn->ksnc_rx_started &&
ktime_get_seconds() >= conn->ksnc_rx_deadline) {
- /* Timed out incomplete incoming message */
- ksocknal_conn_addref(conn);
- CNETERR("Timeout receiving from %s (%pI4h:%d), "
- "state %d wanted %d left %d\n",
- libcfs_id2str(peer_ni->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port,
- conn->ksnc_rx_state,
- conn->ksnc_rx_nob_wanted,
- conn->ksnc_rx_nob_left);
- return (conn);
- }
+ /* Timed out incomplete incoming message */
+ ksocknal_conn_addref(conn);
+ CNETERR("Timeout receiving from %s (%pISp), state %d wanted %d left %d\n",
+ libcfs_id2str(peer_ni->ksnp_id),
+ &conn->ksnc_peeraddr,
+ conn->ksnc_rx_state,
+ conn->ksnc_rx_nob_wanted,
+ conn->ksnc_rx_nob_left);
+ return conn;
+ }
if ((!list_empty(&conn->ksnc_tx_queue) ||
conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
ktime_get_seconds() >= conn->ksnc_tx_deadline) {
- /* Timed out messages queued for sending or
- * buffered in the socket's send buffer */
- ksocknal_conn_addref(conn);
+ /* Timed out messages queued for sending or
+ * buffered in the socket's send buffer
+ */
+ ksocknal_conn_addref(conn);
list_for_each_entry(tx, &conn->ksnc_tx_queue,
tx_list)
tx->tx_hstatus =
LNET_MSG_STATUS_LOCAL_TIMEOUT;
- CNETERR("Timeout sending data to %s (%pI4h:%d) "
- "the network or that node may be down.\n",
- libcfs_id2str(peer_ni->ksnp_id),
- &conn->ksnc_ipaddr, conn->ksnc_port);
- return (conn);
- }
- }
+ CNETERR("Timeout sending data to %s (%pISp) the network or that node may be down.\n",
+ libcfs_id2str(peer_ni->ksnp_id),
+ &conn->ksnc_peeraddr);
+ return conn;
+ }
+ }
- return (NULL);
+ return (NULL);
}
static inline void
int peer_index = 0;
time64_t deadline = ktime_get_seconds();
- init_waitqueue_entry(&wait, current);
+ init_wait(&wait);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);