From a3275d1d79df5ab5f7ac2cbb5cdef982e8f3c959 Mon Sep 17 00:00:00 2001 From: Mr NeilBrown Date: Mon, 6 Jul 2020 08:34:41 -0400 Subject: [PATCH] LU-12678 socklnd: change various ints to bool. Each of these int variables, and one int function, are really truth values, so change to bool. Also convert some spaces to tabs etc. Signed-off-by: Mr NeilBrown Change-Id: Ia62a86e549c90a287a20a3b2ef7533c1b700d17e Reviewed-on: https://review.whamcloud.com/39302 Tested-by: jenkins Reviewed-by: James Simmons Reviewed-by: Chris Horn Tested-by: Maloo Reviewed-by: Oleg Drokin --- lnet/klnds/socklnd/socklnd.c | 73 ++++++++++++----------- lnet/klnds/socklnd/socklnd_cb.c | 128 +++++++++++++++++++++------------------- 2 files changed, 105 insertions(+), 96 deletions(-) diff --git a/lnet/klnds/socklnd/socklnd.c b/lnet/klnds/socklnd/socklnd.c index c71b36a..c7f9184 100644 --- a/lnet/klnds/socklnd/socklnd.c +++ b/lnet/klnds/socklnd/socklnd.c @@ -1615,7 +1615,7 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error) void ksocknal_peer_failed(struct ksock_peer_ni *peer_ni) { - int notify = 0; + bool notify = false; time64_t last_alive = 0; /* There has been a connection failure or comms error; but I'll only @@ -1628,7 +1628,7 @@ ksocknal_peer_failed(struct ksock_peer_ni *peer_ni) list_empty(&peer_ni->ksnp_conns) && peer_ni->ksnp_accepting == 0 && ksocknal_find_connecting_route_locked(peer_ni) == NULL) { - notify = 1; + notify = true; last_alive = peer_ni->ksnp_last_alive; } @@ -1677,28 +1677,29 @@ ksocknal_finalize_zcreq(struct ksock_conn *conn) void ksocknal_terminate_conn(struct ksock_conn *conn) { - /* This gets called by the reaper (guaranteed thread context) to - * disengage the socket from its callbacks and close it. - * ksnc_refcount will eventually hit zero, and then the reaper will - * destroy it. */ + /* This gets called by the reaper (guaranteed thread context) to + * disengage the socket from its callbacks and close it. + * ksnc_refcount will eventually hit zero, and then the reaper will + * destroy it. + */ struct ksock_peer_ni *peer_ni = conn->ksnc_peer; struct ksock_sched *sched = conn->ksnc_scheduler; - int failed = 0; + bool failed = false; - LASSERT(conn->ksnc_closing); + LASSERT(conn->ksnc_closing); - /* wake up the scheduler to "send" all remaining packets to /dev/null */ + /* wake up the scheduler to "send" all remaining packets to /dev/null */ spin_lock_bh(&sched->kss_lock); - /* a closing conn is always ready to tx */ - conn->ksnc_tx_ready = 1; + /* a closing conn is always ready to tx */ + conn->ksnc_tx_ready = 1; - if (!conn->ksnc_tx_scheduled && + if (!conn->ksnc_tx_scheduled && !list_empty(&conn->ksnc_tx_queue)) { list_add_tail(&conn->ksnc_tx_list, - &sched->kss_tx_conns); - conn->ksnc_tx_scheduled = 1; - /* extra ref for scheduler */ + &sched->kss_tx_conns); + conn->ksnc_tx_scheduled = 1; + /* extra ref for scheduler */ ksocknal_conn_addref(conn); wake_up (&sched->kss_waitq); @@ -1709,30 +1710,32 @@ ksocknal_terminate_conn(struct ksock_conn *conn) /* serialise with callbacks */ write_lock_bh(&ksocknal_data.ksnd_global_lock); - ksocknal_lib_reset_callback(conn->ksnc_sock, conn); + ksocknal_lib_reset_callback(conn->ksnc_sock, conn); - /* OK, so this conn may not be completely disengaged from its - * scheduler yet, but it _has_ committed to terminate... */ - conn->ksnc_scheduler->kss_nconns--; + /* OK, so this conn may not be completely disengaged from its + * scheduler yet, but it _has_ committed to terminate... + */ + conn->ksnc_scheduler->kss_nconns--; - if (peer_ni->ksnp_error != 0) { - /* peer_ni's last conn closed in error */ + if (peer_ni->ksnp_error != 0) { + /* peer_ni's last conn closed in error */ LASSERT(list_empty(&peer_ni->ksnp_conns)); - failed = 1; - peer_ni->ksnp_error = 0; /* avoid multiple notifications */ - } + failed = true; + peer_ni->ksnp_error = 0; /* avoid multiple notifications */ + } write_unlock_bh(&ksocknal_data.ksnd_global_lock); - if (failed) - ksocknal_peer_failed(peer_ni); + if (failed) + ksocknal_peer_failed(peer_ni); - /* The socket is closed on the final put; either here, or in - * ksocknal_{send,recv}msg(). Since we set up the linger2 option - * when the connection was established, this will close the socket - * immediately, aborting anything buffered in it. Any hung - * zero-copy transmits will therefore complete in finite time. */ - ksocknal_connsock_decref(conn); + /* The socket is closed on the final put; either here, or in + * ksocknal_{send,recv}msg(). Since we set up the linger2 option + * when the connection was established, this will close the socket + * immediately, aborting anything buffered in it. Any hung + * zero-copy transmits will therefore complete in finite time. + */ + ksocknal_connsock_decref(conn); } void @@ -2603,7 +2606,7 @@ ksocknal_search_new_ipif(struct ksock_net *net) for (i = 0; i < net->ksnn_ninterfaces; i++) { char *ifnam = &net->ksnn_interfaces[i].ksni_name[0]; char *colon = strchr(ifnam, ':'); - int found = 0; + bool found = false; struct ksock_net *tmp; int j; @@ -2611,10 +2614,10 @@ ksocknal_search_new_ipif(struct ksock_net *net) *colon = 0; list_for_each_entry(tmp, &ksocknal_data.ksnd_nets, - ksnn_list) { + ksnn_list) { for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) { char *ifnam2 = &tmp->ksnn_interfaces[j].\ - ksni_name[0]; + ksni_name[0]; char *colon2 = strchr(ifnam2, ':'); if (colon2 != NULL) diff --git a/lnet/klnds/socklnd/socklnd_cb.c b/lnet/klnds/socklnd/socklnd_cb.c index 0097815..72bf35f 100644 --- a/lnet/klnds/socklnd/socklnd_cb.c +++ b/lnet/klnds/socklnd/socklnd_cb.c @@ -1464,7 +1464,7 @@ int ksocknal_scheduler(void *arg) spin_lock_bh(&sched->kss_lock); while (!ksocknal_data.ksnd_shuttingdown) { - int did_something = 0; + bool did_something = false; /* Ensure I progress everything semi-fairly */ @@ -1510,7 +1510,7 @@ int ksocknal_scheduler(void *arg) ksocknal_conn_decref(conn); } - did_something = 1; + did_something = true; } if (!list_empty(&sched->kss_tx_conns)) { @@ -1578,7 +1578,7 @@ int ksocknal_scheduler(void *arg) ksocknal_conn_decref(conn); } - did_something = 1; + did_something = true; } if (!did_something || /* nothing to do */ need_resched()) { /* hogging CPU? */ @@ -1896,26 +1896,26 @@ ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn, return 0; } -static int +static bool ksocknal_connect(struct ksock_route *route) { LIST_HEAD(zombies); struct ksock_peer_ni *peer_ni = route->ksnr_peer; - int type; - int wanted; - struct socket *sock; + int type; + int wanted; + struct socket *sock; time64_t deadline; - int retry_later = 0; - int rc = 0; + bool retry_later = false; + int rc = 0; deadline = ktime_get_seconds() + ksocknal_timeout(); write_lock_bh(&ksocknal_data.ksnd_global_lock); - LASSERT (route->ksnr_scheduled); - LASSERT (!route->ksnr_connecting); + LASSERT(route->ksnr_scheduled); + LASSERT(!route->ksnr_connecting); - route->ksnr_connecting = 1; + route->ksnr_connecting = 1; for (;;) { wanted = ksocknal_route_mask() & ~route->ksnr_connected; @@ -1924,7 +1924,7 @@ ksocknal_connect(struct ksock_route *route) * route got connected while queued */ if (peer_ni->ksnp_closing || route->ksnr_deleted || wanted == 0) { - retry_later = 0; + retry_later = false; break; } @@ -1933,7 +1933,7 @@ ksocknal_connect(struct ksock_route *route) CDEBUG(D_NET, "peer_ni %s(%d) already connecting to me, retry later.\n", libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting); - retry_later = 1; + retry_later = true; } if (retry_later) /* needs reschedule */ @@ -1987,35 +1987,37 @@ ksocknal_connect(struct ksock_route *route) write_lock_bh(&ksocknal_data.ksnd_global_lock); } - route->ksnr_scheduled = 0; - route->ksnr_connecting = 0; + route->ksnr_scheduled = 0; + route->ksnr_connecting = 0; - if (retry_later) { - /* re-queue for attention; this frees me up to handle - * the peer_ni's incoming connection request */ + if (retry_later) { + /* re-queue for attention; this frees me up to handle + * the peer_ni's incoming connection request + */ - if (rc == EALREADY || - (rc == 0 && peer_ni->ksnp_accepting > 0)) { - /* We want to introduce a delay before next - * attempt to connect if we lost conn race, - * but the race is resolved quickly usually, - * so min_reconnectms should be good heuristic */ + if (rc == EALREADY || + (rc == 0 && peer_ni->ksnp_accepting > 0)) { + /* We want to introduce a delay before next + * attempt to connect if we lost conn race, but + * the race is resolved quickly usually, so + * min_reconnectms should be good heuristic + */ route->ksnr_retry_interval = *ksocknal_tunables.ksnd_min_reconnectms / 1000; route->ksnr_timeout = ktime_get_seconds() + - route->ksnr_retry_interval; - } + route->ksnr_retry_interval; + } - ksocknal_launch_connection_locked(route); - } + ksocknal_launch_connection_locked(route); + } write_unlock_bh(&ksocknal_data.ksnd_global_lock); - return retry_later; + return retry_later; failed: write_lock_bh(&ksocknal_data.ksnd_global_lock); - route->ksnr_scheduled = 0; - route->ksnr_connecting = 0; + route->ksnr_scheduled = 0; + route->ksnr_connecting = 0; /* This is a retry rather than a new connection */ route->ksnr_retry_interval *= 2; @@ -2030,22 +2032,24 @@ ksocknal_connect(struct ksock_route *route) route->ksnr_timeout = ktime_get_seconds() + route->ksnr_retry_interval; if (!list_empty(&peer_ni->ksnp_tx_queue) && - peer_ni->ksnp_accepting == 0 && - ksocknal_find_connecting_route_locked(peer_ni) == NULL) { + peer_ni->ksnp_accepting == 0 && + ksocknal_find_connecting_route_locked(peer_ni) == NULL) { struct ksock_conn *conn; - /* ksnp_tx_queue is queued on a conn on successful - * connection for V1.x and V2.x */ + /* ksnp_tx_queue is queued on a conn on successful + * connection for V1.x and V2.x + */ if (!list_empty(&peer_ni->ksnp_conns)) { conn = list_entry(peer_ni->ksnp_conns.next, struct ksock_conn, ksnc_list); - LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x); - } + LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x); + } - /* take all the blocked packets while I've got the lock and - * complete below... */ + /* take all the blocked packets while I've got the lock and + * complete below... + */ list_splice_init(&peer_ni->ksnp_tx_queue, &zombies); - } + } write_unlock_bh(&ksocknal_data.ksnd_global_lock); @@ -2201,7 +2205,7 @@ ksocknal_connd(void *arg) struct ksock_route *route = NULL; time64_t sec = ktime_get_real_seconds(); long timeout = MAX_SCHEDULE_TIMEOUT; - int dropped_lock = 0; + bool dropped_lock = false; if (ksocknal_connd_check_stop(sec, &timeout)) { /* wakeup another one to check stop */ @@ -2209,19 +2213,19 @@ ksocknal_connd(void *arg) break; } - if (ksocknal_connd_check_start(sec, &timeout)) { - /* created new thread */ - dropped_lock = 1; - } + if (ksocknal_connd_check_start(sec, &timeout)) { + /* created new thread */ + dropped_lock = true; + } if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) { - /* Connection accepted by the listener */ + /* Connection accepted by the listener */ cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next, struct ksock_connreq, ksncr_list); list_del(&cr->ksncr_list); spin_unlock_bh(connd_lock); - dropped_lock = 1; + dropped_lock = true; ksocknal_create_conn(cr->ksncr_ni, NULL, cr->ksncr_sock, SOCKLND_CONN_NONE); @@ -2229,20 +2233,21 @@ ksocknal_connd(void *arg) LIBCFS_FREE(cr, sizeof(*cr)); spin_lock_bh(connd_lock); - } + } - /* Only handle an outgoing connection request if there - * is a thread left to handle incoming connections and - * create new connd */ - if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV < - ksocknal_data.ksnd_connd_running) { - route = ksocknal_connd_get_route_locked(&timeout); - } - if (route != NULL) { + /* Only handle an outgoing connection request if there + * is a thread left to handle incoming connections and + * create new connd + */ + if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV < + ksocknal_data.ksnd_connd_running) { + route = ksocknal_connd_get_route_locked(&timeout); + } + if (route != NULL) { list_del(&route->ksnr_connd_list); - ksocknal_data.ksnd_connd_connecting++; + ksocknal_data.ksnd_connd_connecting++; spin_unlock_bh(connd_lock); - dropped_lock = 1; + dropped_lock = true; if (ksocknal_connect(route)) { /* consecutive retry */ @@ -2255,7 +2260,7 @@ ksocknal_connd(void *arg) cons_retry = 0; } - ksocknal_route_decref(route); + ksocknal_route_decref(route); spin_lock_bh(connd_lock); ksocknal_data.ksnd_connd_connecting--; @@ -2272,7 +2277,8 @@ ksocknal_connd(void *arg) /* Nothing to do for 'timeout' */ set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait); + add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, + &wait); spin_unlock_bh(connd_lock); schedule_timeout(timeout); -- 1.8.3.1