From 59c25356208706b4dd0920bc7754f21e2db14a0f Mon Sep 17 00:00:00 2001 From: James Simmons Date: Tue, 23 May 2017 02:50:02 -0400 Subject: [PATCH] LU-9397 ksocklnd: move remaining time handling to 64 bits Examination of the ksocklnd time handle revealed that the code only requires second level precision. Since this is the case we can move away from using jiffies to time64_t. This allows us to be independent of the HZ settings in addition to making it clear what is time handling, using time64_t verses unsigned long. In the process we remove many of the various libcfs time wrappers as well. Change-Id: I968630ef94febd4bff703fb633e677996939f95b Signed-off-by: James Simmons Reviewed-on: https://review.whamcloud.com/26813 Reviewed-by: Doug Oucharek Tested-by: Jenkins Tested-by: Maloo Reviewed-by: Amir Shehata Reviewed-by: Oleg Drokin --- lnet/klnds/socklnd/socklnd.c | 38 +++++----- lnet/klnds/socklnd/socklnd.h | 26 +++---- lnet/klnds/socklnd/socklnd_cb.c | 151 ++++++++++++++++++---------------------- 3 files changed, 100 insertions(+), 115 deletions(-) diff --git a/lnet/klnds/socklnd/socklnd.c b/lnet/klnds/socklnd/socklnd.c index 0fdedba..23f5490 100644 --- a/lnet/klnds/socklnd/socklnd.c +++ b/lnet/klnds/socklnd/socklnd.c @@ -1268,7 +1268,7 @@ ksocknal_create_conn(struct lnet_ni *ni, ksock_route_t *route, } conn->ksnc_peer = peer_ni; /* conn takes my ref on peer_ni */ - peer_ni->ksnp_last_alive = ktime_get_real_seconds(); + peer_ni->ksnp_last_alive = ktime_get_seconds(); peer_ni->ksnp_send_keepalive = 0; peer_ni->ksnp_error = 0; @@ -1285,10 +1285,11 @@ ksocknal_create_conn(struct lnet_ni *ni, ksock_route_t *route, sched->kss_nconns++; conn->ksnc_scheduler = sched; - conn->ksnc_tx_last_post = ktime_get_real_seconds(); + conn->ksnc_tx_last_post = ktime_get_seconds(); /* Set the deadline for the outgoing HELLO to drain */ conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued; - conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); + conn->ksnc_tx_deadline = ktime_get_seconds() + + *ksocknal_tunables.ksnd_timeout; smp_mb(); /* order with adding to peer_ni's conn list */ list_add(&conn->ksnc_list, &peer_ni->ksnp_conns); @@ -1516,7 +1517,7 @@ void ksocknal_peer_failed (ksock_peer_ni_t *peer_ni) { int notify = 0; - cfs_time_t last_alive = 0; + time64_t last_alive = 0; /* There has been a connection failure or comms error; but I'll only * tell LNET I think the peer_ni is dead if it's to another kernel and @@ -1536,7 +1537,7 @@ ksocknal_peer_failed (ksock_peer_ni_t *peer_ni) if (notify) lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, 0, - last_alive); + cfs_time_seconds(last_alive)); /* to jiffies */ } void @@ -1651,9 +1652,9 @@ ksocknal_queue_zombie_conn (ksock_conn_t *conn) } void -ksocknal_destroy_conn (ksock_conn_t *conn) +ksocknal_destroy_conn(ksock_conn_t *conn) { - cfs_time_t last_rcv; + time64_t last_rcv; /* Final coup-de-grace of the reaper */ CDEBUG (D_NET, "connection %p\n", conn); @@ -1670,15 +1671,14 @@ ksocknal_destroy_conn (ksock_conn_t *conn) switch (conn->ksnc_rx_state) { case SOCKNAL_RX_LNET_PAYLOAD: last_rcv = conn->ksnc_rx_deadline - - cfs_time_seconds(*ksocknal_tunables.ksnd_timeout); + *ksocknal_tunables.ksnd_timeout; CERROR("Completing partial receive from %s[%d], " "ip %pI4h:%d, with error, wanted: %d, left: %d, " - "last alive is %ld secs ago\n", + "last alive is %lld secs ago\n", libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type, &conn->ksnc_ipaddr, conn->ksnc_port, conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left, - cfs_duration_sec(cfs_time_sub(ktime_get_real_seconds(), - last_rcv))); + ktime_get_seconds() - last_rcv); lnet_finalize(conn->ksnc_cookie, -EIO); break; case SOCKNAL_RX_LNET_HEADER: @@ -1822,7 +1822,7 @@ ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when) { int connect = 1; time64_t last_alive = 0; - time64_t now = ktime_get_real_seconds(); + time64_t now = ktime_get_seconds(); ksock_peer_ni_t *peer_ni = NULL; rwlock_t *glock = &ksocknal_data.ksnd_global_lock; struct lnet_process_id id = { @@ -1842,10 +1842,10 @@ ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when) conn = list_entry(tmp, ksock_conn_t, ksnc_list); bufnob = conn->ksnc_sock->sk->sk_wmem_queued; - if (bufnob < conn->ksnc_tx_bufnob) { - /* something got ACKed */ - conn->ksnc_tx_deadline = - cfs_time_shift(*ksocknal_tunables.ksnd_timeout); + if (bufnob < conn->ksnc_tx_bufnob) { + /* something got ACKed */ + conn->ksnc_tx_deadline = ktime_get_seconds() + + *ksocknal_tunables.ksnd_timeout; peer_ni->ksnp_last_alive = now; conn->ksnc_tx_bufnob = bufnob; } @@ -1859,11 +1859,11 @@ ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when) read_unlock(glock); if (last_alive != 0) - *when = last_alive; + *when = cfs_time_seconds(last_alive); - CDEBUG(D_NET, "peer_ni %s %p, alive %ld secs ago, connect %d\n", + CDEBUG(D_NET, "peer_ni %s %p, alive %lld secs ago, connect %d\n", libcfs_nid2str(nid), peer_ni, - last_alive ? cfs_duration_sec(now - last_alive) : -1, + last_alive ? now - last_alive : -1, connect); if (!connect) diff --git a/lnet/klnds/socklnd/socklnd.h b/lnet/klnds/socklnd/socklnd.h index 3152a45..b882025 100644 --- a/lnet/klnds/socklnd/socklnd.h +++ b/lnet/klnds/socklnd/socklnd.h @@ -74,7 +74,7 @@ #define SOCKNAL_PEER_HASH_SIZE 101 /* # peer_ni lists */ #define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */ #define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */ -#define SOCKNAL_ENOMEM_RETRY CFS_TICK /* jiffies between retries */ +#define SOCKNAL_ENOMEM_RETRY 1 /* seconds between retries */ #define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */ #define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */ @@ -218,7 +218,7 @@ typedef struct /* reaper sleeps here */ wait_queue_head_t ksnd_reaper_waitq; /* when reaper will wake */ - cfs_time_t ksnd_reaper_waketime; + time64_t ksnd_reaper_waketime; /* serialise */ spinlock_t ksnd_reaper_lock; @@ -287,8 +287,8 @@ typedef struct /* transmit packet */ lnet_kiov_t *tx_kiov; /* packet page frags */ struct ksock_conn *tx_conn; /* owning conn */ struct lnet_msg *tx_lnetmsg; /* lnet message for lnet_finalize() */ - cfs_time_t tx_deadline; /* when (in jiffies) tx times out */ - struct ksock_msg tx_msg; /* socklnd message buffer */ + time64_t tx_deadline; /* when (in secs) tx times out */ + struct ksock_msg tx_msg; /* socklnd message buffer */ int tx_desc_size; /* size of this descriptor */ union { struct { @@ -344,7 +344,7 @@ typedef struct ksock_conn /* where I enq waiting input or a forwarding descriptor */ struct list_head ksnc_rx_list; - cfs_time_t ksnc_rx_deadline; /* when (in jiffies) receive times out */ + time64_t ksnc_rx_deadline; /* when (in seconds) receive times out */ __u8 ksnc_rx_started; /* started receiving a message */ __u8 ksnc_rx_ready; /* data ready to read */ __u8 ksnc_rx_scheduled;/* being progressed */ @@ -372,8 +372,8 @@ typedef struct ksock_conn struct list_head ksnc_tx_queue; /* next TX that can carry a LNet message or ZC-ACK */ ksock_tx_t *ksnc_tx_carrier; - /* when (in jiffies) tx times out */ - cfs_time_t ksnc_tx_deadline; + /* when (in seconds) tx times out */ + time64_t ksnc_tx_deadline; /* send buffer marker */ int ksnc_tx_bufnob; /* # bytes queued */ @@ -383,7 +383,7 @@ typedef struct ksock_conn /* being progressed */ int ksnc_tx_scheduled; /* time stamp of the last posted TX */ - cfs_time_t ksnc_tx_last_post; + time64_t ksnc_tx_last_post; } ksock_conn_t; typedef struct ksock_route @@ -392,8 +392,8 @@ typedef struct ksock_route struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */ struct ksock_peer *ksnr_peer; /* owning peer_ni */ atomic_t ksnr_refcount; /* # users */ - cfs_time_t ksnr_timeout; /* when (in jiffies) reconnection can happen next */ - cfs_duration_t ksnr_retry_interval; /* how long between retries */ + time64_t ksnr_timeout; /* when (in secs) reconnection can happen next */ + time64_t ksnr_retry_interval; /* how long between retries */ __u32 ksnr_myipaddr; /* my IP */ __u32 ksnr_ipaddr; /* IP address to connect to */ int ksnr_port; /* port to connect to */ @@ -410,7 +410,7 @@ typedef struct ksock_route typedef struct ksock_peer { struct list_head ksnp_list; /* stash on global peer_ni list */ - cfs_time_t ksnp_last_alive; /* when (in jiffies) I was last alive */ + time64_t ksnp_last_alive;/* when (in seconds) I was last alive */ struct lnet_process_id ksnp_id; /* who's on the other end(s) */ atomic_t ksnp_refcount; /* # users */ int ksnp_sharecount; /* lconf usage counter */ @@ -426,7 +426,7 @@ typedef struct ksock_peer spinlock_t ksnp_lock; /* serialize, g_lock unsafe */ /* zero copy requests wait for ACK */ struct list_head ksnp_zc_req_list; - cfs_time_t ksnp_send_keepalive; /* time to send keepalive */ + time64_t ksnp_send_keepalive; /* time to send keepalive */ struct lnet_ni *ksnp_ni; /* which network */ int ksnp_n_passive_ips; /* # of... */ __u32 ksnp_passive_ips[LNET_NUM_INTERFACES]; /* preferred local interfaces */ @@ -647,7 +647,7 @@ extern void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn); extern void ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error); extern void ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive); -extern void ksocknal_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when); +extern void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when); extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name); extern void ksocknal_thread_fini (void); extern void ksocknal_launch_all_connections_locked (ksock_peer_ni_t *peer_ni); diff --git a/lnet/klnds/socklnd/socklnd_cb.c b/lnet/klnds/socklnd/socklnd_cb.c index 3265b02..cabb246 100644 --- a/lnet/klnds/socklnd/socklnd_cb.c +++ b/lnet/klnds/socklnd/socklnd_cb.c @@ -221,9 +221,9 @@ ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx) if (bufnob < conn->ksnc_tx_bufnob) { /* allocated send buffer bytes < computed; infer * something got ACKed */ - conn->ksnc_tx_deadline = - cfs_time_shift(*ksocknal_tunables.ksnd_timeout); - conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); + conn->ksnc_tx_deadline = ktime_get_seconds() + + *ksocknal_tunables.ksnd_timeout; + conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds(); conn->ksnc_tx_bufnob = bufnob; smp_mb(); } @@ -269,9 +269,9 @@ ksocknal_recv_iov (ksock_conn_t *conn) /* received something... */ nob = rc; - conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); - conn->ksnc_rx_deadline = - cfs_time_shift(*ksocknal_tunables.ksnd_timeout); + conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds(); + conn->ksnc_rx_deadline = ktime_get_seconds() + + *ksocknal_tunables.ksnd_timeout; smp_mb(); /* order with setting rx_started */ conn->ksnc_rx_started = 1; @@ -313,9 +313,9 @@ ksocknal_recv_kiov (ksock_conn_t *conn) /* received something... */ nob = rc; - conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); - conn->ksnc_rx_deadline = - cfs_time_shift(*ksocknal_tunables.ksnd_timeout); + conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds(); + conn->ksnc_rx_deadline = ktime_get_seconds() + + *ksocknal_tunables.ksnd_timeout; smp_mb(); /* order with setting rx_started */ conn->ksnc_rx_started = 1; @@ -463,8 +463,8 @@ ksocknal_check_zc_req(ksock_tx_t *tx) spin_lock(&peer_ni->ksnp_lock); /* ZC_REQ is going to be pinned to the peer_ni */ - tx->tx_deadline = - cfs_time_shift(*ksocknal_tunables.ksnd_timeout); + tx->tx_deadline = ktime_get_seconds() + + *ksocknal_tunables.ksnd_timeout; LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0); @@ -541,9 +541,8 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) LASSERT (conn->ksnc_tx_scheduled); list_add_tail(&conn->ksnc_tx_list, &ksocknal_data.ksnd_enomem_conns); - if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(), - SOCKNAL_ENOMEM_RETRY), - ksocknal_data.ksnd_reaper_waketime)) + if (ktime_get_seconds() + SOCKNAL_ENOMEM_RETRY < + ksocknal_data.ksnd_reaper_waketime) wake_up(&ksocknal_data.ksnd_reaper_waitq); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -651,7 +650,7 @@ ksocknal_find_conn_locked(ksock_peer_ni_t *peer_ni, ksock_tx_t *tx, int nonblk) case SOCKNAL_MATCH_YES: /* typed connection */ if (typed == NULL || tnob > nob || (tnob == nob && *ksocknal_tunables.ksnd_round_robin && - cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) { + typed->ksnc_tx_last_post > c->ksnc_tx_last_post)) { typed = c; tnob = nob; } @@ -660,7 +659,7 @@ ksocknal_find_conn_locked(ksock_peer_ni_t *peer_ni, ksock_tx_t *tx, int nonblk) case SOCKNAL_MATCH_MAY: /* fallback connection */ if (fallback == NULL || fnob > nob || (fnob == nob && *ksocknal_tunables.ksnd_round_robin && - cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) { + fallback->ksnc_tx_last_post > c->ksnc_tx_last_post)) { fallback = c; fnob = nob; } @@ -672,7 +671,7 @@ ksocknal_find_conn_locked(ksock_peer_ni_t *peer_ni, ksock_tx_t *tx, int nonblk) conn = (typed != NULL) ? typed : fallback; if (conn != NULL) - conn->ksnc_tx_last_post = cfs_time_current(); + conn->ksnc_tx_last_post = ktime_get_seconds(); return conn; } @@ -729,10 +728,10 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) { /* First packet starts the timeout */ - conn->ksnc_tx_deadline = - cfs_time_shift(*ksocknal_tunables.ksnd_timeout); + conn->ksnc_tx_deadline = ktime_get_seconds() + + *ksocknal_tunables.ksnd_timeout; if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */ - conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); + conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds(); conn->ksnc_tx_bufnob = 0; smp_mb(); /* order with adding to tx_queue */ } @@ -778,8 +777,8 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) ksock_route_t * ksocknal_find_connectable_route_locked (ksock_peer_ni_t *peer_ni) { - cfs_time_t now = cfs_time_current(); - struct list_head *tmp; + time64_t now = ktime_get_seconds(); + struct list_head *tmp; ksock_route_t *route; list_for_each(tmp, &peer_ni->ksnp_routes) { @@ -795,14 +794,14 @@ ksocknal_find_connectable_route_locked (ksock_peer_ni_t *peer_ni) continue; if (!(route->ksnr_retry_interval == 0 || /* first attempt */ - cfs_time_aftereq(now, route->ksnr_timeout))) { + now >= route->ksnr_timeout)) { CDEBUG(D_NET, "Too soon to retry route %pI4h " - "(cnted %d, interval %ld, %ld secs later)\n", + "(cnted %d, interval %lld, %lld secs later)\n", &route->ksnr_ipaddr, route->ksnr_connected, route->ksnr_retry_interval, - cfs_duration_sec(route->ksnr_timeout - now)); + route->ksnr_timeout - now); continue; } @@ -906,8 +905,8 @@ ksocknal_launch_packet(struct lnet_ni *ni, ksock_tx_t *tx, if (peer_ni->ksnp_accepting > 0 || ksocknal_find_connecting_route_locked (peer_ni) != NULL) { /* the message is going to be pinned to the peer_ni */ - tx->tx_deadline = - cfs_time_shift(*ksocknal_tunables.ksnd_timeout); + tx->tx_deadline = ktime_get_seconds() + + *ksocknal_tunables.ksnd_timeout; /* Queue the message until a connection is established */ list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue); @@ -1854,12 +1853,11 @@ ksocknal_connect (ksock_route_t *route) int type; int wanted; struct socket *sock; - cfs_time_t deadline; + time64_t deadline; int retry_later = 0; int rc = 0; - deadline = cfs_time_add(cfs_time_current(), - cfs_time_seconds(*ksocknal_tunables.ksnd_timeout)); + deadline = ktime_get_seconds() + *ksocknal_tunables.ksnd_timeout; write_lock_bh(&ksocknal_data.ksnd_global_lock); @@ -1903,7 +1901,7 @@ ksocknal_connect (ksock_route_t *route) write_unlock_bh(&ksocknal_data.ksnd_global_lock); - if (cfs_time_aftereq(cfs_time_current(), deadline)) { + if (ktime_get_seconds() >= deadline) { rc = -ETIMEDOUT; lnet_connect_console_error(rc, peer_ni->ksnp_id.nid, route->ksnr_ipaddr, @@ -1948,10 +1946,9 @@ ksocknal_connect (ksock_route_t *route) * attempt to connect if we lost conn race, * but the race is resolved quickly usually, * so min_reconnectms should be good heuristic */ - route->ksnr_retry_interval = - cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000; - route->ksnr_timeout = cfs_time_add(cfs_time_current(), - route->ksnr_retry_interval); + route->ksnr_retry_interval = *ksocknal_tunables.ksnd_min_reconnectms / 1000; + route->ksnr_timeout = ktime_get_seconds() + + route->ksnr_retry_interval; } ksocknal_launch_connection_locked(route); @@ -1966,18 +1963,17 @@ ksocknal_connect (ksock_route_t *route) route->ksnr_scheduled = 0; route->ksnr_connecting = 0; - /* This is a retry rather than a new connection */ - route->ksnr_retry_interval *= 2; - route->ksnr_retry_interval = - MAX(route->ksnr_retry_interval, - cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000); - route->ksnr_retry_interval = - MIN(route->ksnr_retry_interval, - cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000); + /* This is a retry rather than a new connection */ + route->ksnr_retry_interval *= 2; + route->ksnr_retry_interval = + max_t(time64_t, route->ksnr_retry_interval, + *ksocknal_tunables.ksnd_min_reconnectms / 1000); + route->ksnr_retry_interval = + min_t(time64_t, route->ksnr_retry_interval, + *ksocknal_tunables.ksnd_max_reconnectms / 1000); - LASSERT (route->ksnr_retry_interval != 0); - route->ksnr_timeout = cfs_time_add(cfs_time_current(), - route->ksnr_retry_interval); + LASSERT(route->ksnr_retry_interval); + route->ksnr_timeout = ktime_get_seconds() + route->ksnr_retry_interval; if (!list_empty(&peer_ni->ksnp_tx_queue) && peer_ni->ksnp_accepting == 0 && @@ -2112,22 +2108,20 @@ ksocknal_connd_check_stop(time64_t sec, long *timeout) static ksock_route_t * ksocknal_connd_get_route_locked(signed long *timeout_p) { + time64_t now = ktime_get_seconds(); ksock_route_t *route; - cfs_time_t now; - - now = cfs_time_current(); /* connd_routes can contain both pending and ordinary routes */ list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes, ksnr_connd_list) { if (route->ksnr_retry_interval == 0 || - cfs_time_aftereq(now, route->ksnr_timeout)) + now >= route->ksnr_timeout) return route; if (*timeout_p == MAX_SCHEDULE_TIMEOUT || - (int)*timeout_p > (int)(route->ksnr_timeout - now)) - *timeout_p = (int)(route->ksnr_timeout - now); + *timeout_p > cfs_time_seconds(route->ksnr_timeout - now)) + *timeout_p = cfs_time_seconds(route->ksnr_timeout - now); } return NULL; @@ -2295,8 +2289,7 @@ ksocknal_find_timed_out_conn (ksock_peer_ni_t *peer_ni) } if (conn->ksnc_rx_started && - cfs_time_aftereq(cfs_time_current(), - conn->ksnc_rx_deadline)) { + ktime_get_seconds() >= conn->ksnc_rx_deadline) { /* Timed out incomplete incoming message */ ksocknal_conn_addref(conn); CNETERR("Timeout receiving from %s (%pI4h:%d), " @@ -2312,8 +2305,7 @@ ksocknal_find_timed_out_conn (ksock_peer_ni_t *peer_ni) if ((!list_empty(&conn->ksnc_tx_queue) || conn->ksnc_sock->sk->sk_wmem_queued != 0) && - cfs_time_aftereq(cfs_time_current(), - conn->ksnc_tx_deadline)) { + ktime_get_seconds() >= conn->ksnc_tx_deadline) { /* Timed out messages queued for sending or * buffered in the socket's send buffer */ ksocknal_conn_addref(conn); @@ -2340,8 +2332,7 @@ ksocknal_flush_stale_txs(ksock_peer_ni_t *peer_ni) tx = list_entry(peer_ni->ksnp_tx_queue.next, ksock_tx_t, tx_list); - if (!cfs_time_aftereq(cfs_time_current(), - tx->tx_deadline)) + if (ktime_get_seconds() < tx->tx_deadline) break; list_del(&tx->tx_list); @@ -2369,18 +2360,16 @@ __must_hold(&ksocknal_data.ksnd_global_lock) return 0; if (*ksocknal_tunables.ksnd_keepalive <= 0 || - cfs_time_before(cfs_time_current(), - cfs_time_add(peer_ni->ksnp_last_alive, - cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive)))) + ktime_get_seconds() < peer_ni->ksnp_last_alive + + *ksocknal_tunables.ksnd_keepalive) return 0; - if (cfs_time_before(cfs_time_current(), - peer_ni->ksnp_send_keepalive)) + if (ktime_get_seconds() < peer_ni->ksnp_send_keepalive) return 0; /* retry 10 secs later, so we wouldn't put pressure * on this peer_ni if we failed to send keepalive this time */ - peer_ni->ksnp_send_keepalive = cfs_time_shift(10); + peer_ni->ksnp_send_keepalive = ktime_get_seconds() + 10; conn = ksocknal_find_conn_locked(peer_ni, NULL, 1); if (conn != NULL) { @@ -2433,9 +2422,9 @@ ksocknal_check_peer_timeouts (int idx) list_for_each_entry(peer_ni, peers, ksnp_list) { ksock_tx_t *tx_stale; - cfs_time_t deadline = 0; - int resid = 0; - int n = 0; + time64_t deadline = 0; + int resid = 0; + int n = 0; if (ksocknal_send_keepalive_locked(peer_ni) != 0) { read_unlock(&ksocknal_data.ksnd_global_lock); @@ -2463,9 +2452,7 @@ ksocknal_check_peer_timeouts (int idx) list_entry(peer_ni->ksnp_tx_queue.next, ksock_tx_t, tx_list); - if (cfs_time_aftereq(cfs_time_current(), - tx->tx_deadline)) { - + if (ktime_get_seconds() >= tx->tx_deadline) { ksocknal_peer_addref(peer_ni); read_unlock(&ksocknal_data.ksnd_global_lock); @@ -2482,8 +2469,7 @@ ksocknal_check_peer_timeouts (int idx) tx_stale = NULL; spin_lock(&peer_ni->ksnp_lock); list_for_each_entry(tx, &peer_ni->ksnp_zc_req_list, tx_zc_list) { - if (!cfs_time_aftereq(cfs_time_current(), - tx->tx_deadline)) + if (ktime_get_seconds() < tx->tx_deadline) break; /* ignore the TX if connection is being closed */ if (tx->tx_conn->ksnc_closing) @@ -2507,10 +2493,10 @@ ksocknal_check_peer_timeouts (int idx) read_unlock(&ksocknal_data.ksnd_global_lock); CERROR("Total %d stale ZC_REQs for peer_ni %s detected; the " - "oldest(%p) timed out %ld secs ago, " + "oldest(%p) timed out %lld secs ago, " "resid: %d, wmem: %d\n", n, libcfs_nid2str(peer_ni->ksnp_id.nid), tx_stale, - cfs_duration_sec(cfs_time_current() - deadline), + ktime_get_seconds() - deadline, resid, conn->ksnc_sock->sk->sk_wmem_queued); ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT); @@ -2528,10 +2514,10 @@ int ksocknal_reaper(void *arg) ksock_sched_t *sched; struct list_head enomem_conns; int nenomem_conns; - cfs_duration_t timeout; + time64_t timeout; int i; int peer_index = 0; - cfs_time_t deadline = cfs_time_current(); + time64_t deadline = ktime_get_seconds(); cfs_block_allsigs (); @@ -2600,8 +2586,7 @@ int ksocknal_reaper(void *arg) } /* careful with the jiffy wrap... */ - while ((timeout = cfs_time_sub(deadline, - cfs_time_current())) <= 0) { + while ((timeout = deadline - ktime_get_seconds()) <= 0) { const int n = 4; const int p = 1; int chunk = ksocknal_data.ksnd_peer_hash_size; @@ -2625,7 +2610,7 @@ int ksocknal_reaper(void *arg) ksocknal_data.ksnd_peer_hash_size; } - deadline = cfs_time_add(deadline, cfs_time_seconds(p)); + deadline += p; } if (nenomem_conns != 0) { @@ -2634,16 +2619,16 @@ int ksocknal_reaper(void *arg) * if any go back on my enomem list. */ timeout = SOCKNAL_ENOMEM_RETRY; } - ksocknal_data.ksnd_reaper_waketime = - cfs_time_add(cfs_time_current(), timeout); + ksocknal_data.ksnd_reaper_waketime = ktime_get_seconds() + + timeout; - set_current_state(TASK_INTERRUPTIBLE); + set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait); if (!ksocknal_data.ksnd_shuttingdown && list_empty(&ksocknal_data.ksnd_deathrow_conns) && list_empty(&ksocknal_data.ksnd_zombie_conns)) - schedule_timeout(timeout); + schedule_timeout(cfs_duration_sec(timeout)); set_current_state(TASK_RUNNING); remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait); -- 1.8.3.1