Examination of the ksocklnd time handle revealed that the
code only requires second level precision. Since this is
the case we can move away from using jiffies to time64_t.
This allows us to be independent of the HZ settings in
addition to making it clear what is time handling, using
time64_t verses unsigned long. In the process we remove
many of the various libcfs time wrappers as well.
Change-Id: I968630ef94febd4bff703fb633e677996939f95b
Signed-off-by: James Simmons <uja.ornl@yahoo.com>
Reviewed-on: https://review.whamcloud.com/26813
Reviewed-by: Doug Oucharek <doug.s.oucharek@intel.com>
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Amir Shehata <amir.shehata@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
}
conn->ksnc_peer = peer_ni; /* conn takes my ref on peer_ni */
}
conn->ksnc_peer = peer_ni; /* conn takes my ref on peer_ni */
- peer_ni->ksnp_last_alive = ktime_get_real_seconds();
+ peer_ni->ksnp_last_alive = ktime_get_seconds();
peer_ni->ksnp_send_keepalive = 0;
peer_ni->ksnp_error = 0;
peer_ni->ksnp_send_keepalive = 0;
peer_ni->ksnp_error = 0;
sched->kss_nconns++;
conn->ksnc_scheduler = sched;
sched->kss_nconns++;
conn->ksnc_scheduler = sched;
- conn->ksnc_tx_last_post = ktime_get_real_seconds();
+ conn->ksnc_tx_last_post = ktime_get_seconds();
/* Set the deadline for the outgoing HELLO to drain */
conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
/* Set the deadline for the outgoing HELLO to drain */
conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
- conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ conn->ksnc_tx_deadline = ktime_get_seconds() +
+ *ksocknal_tunables.ksnd_timeout;
smp_mb(); /* order with adding to peer_ni's conn list */
list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
smp_mb(); /* order with adding to peer_ni's conn list */
list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
ksocknal_peer_failed (ksock_peer_ni_t *peer_ni)
{
int notify = 0;
ksocknal_peer_failed (ksock_peer_ni_t *peer_ni)
{
int notify = 0;
- cfs_time_t last_alive = 0;
+ time64_t last_alive = 0;
/* There has been a connection failure or comms error; but I'll only
* tell LNET I think the peer_ni is dead if it's to another kernel and
/* There has been a connection failure or comms error; but I'll only
* tell LNET I think the peer_ni is dead if it's to another kernel and
if (notify)
lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, 0,
if (notify)
lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, 0,
+ cfs_time_seconds(last_alive)); /* to jiffies */
-ksocknal_destroy_conn (ksock_conn_t *conn)
+ksocknal_destroy_conn(ksock_conn_t *conn)
/* Final coup-de-grace of the reaper */
CDEBUG (D_NET, "connection %p\n", conn);
/* Final coup-de-grace of the reaper */
CDEBUG (D_NET, "connection %p\n", conn);
switch (conn->ksnc_rx_state) {
case SOCKNAL_RX_LNET_PAYLOAD:
last_rcv = conn->ksnc_rx_deadline -
switch (conn->ksnc_rx_state) {
case SOCKNAL_RX_LNET_PAYLOAD:
last_rcv = conn->ksnc_rx_deadline -
- cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
+ *ksocknal_tunables.ksnd_timeout;
CERROR("Completing partial receive from %s[%d], "
"ip %pI4h:%d, with error, wanted: %d, left: %d, "
CERROR("Completing partial receive from %s[%d], "
"ip %pI4h:%d, with error, wanted: %d, left: %d, "
- "last alive is %ld secs ago\n",
+ "last alive is %lld secs ago\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
&conn->ksnc_ipaddr, conn->ksnc_port,
conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
&conn->ksnc_ipaddr, conn->ksnc_port,
conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
- cfs_duration_sec(cfs_time_sub(ktime_get_real_seconds(),
- last_rcv)));
+ ktime_get_seconds() - last_rcv);
lnet_finalize(conn->ksnc_cookie, -EIO);
break;
case SOCKNAL_RX_LNET_HEADER:
lnet_finalize(conn->ksnc_cookie, -EIO);
break;
case SOCKNAL_RX_LNET_HEADER:
{
int connect = 1;
time64_t last_alive = 0;
{
int connect = 1;
time64_t last_alive = 0;
- time64_t now = ktime_get_real_seconds();
+ time64_t now = ktime_get_seconds();
ksock_peer_ni_t *peer_ni = NULL;
rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
struct lnet_process_id id = {
ksock_peer_ni_t *peer_ni = NULL;
rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
struct lnet_process_id id = {
conn = list_entry(tmp, ksock_conn_t, ksnc_list);
bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
conn = list_entry(tmp, ksock_conn_t, ksnc_list);
bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
- if (bufnob < conn->ksnc_tx_bufnob) {
- /* something got ACKed */
- conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ if (bufnob < conn->ksnc_tx_bufnob) {
+ /* something got ACKed */
+ conn->ksnc_tx_deadline = ktime_get_seconds() +
+ *ksocknal_tunables.ksnd_timeout;
peer_ni->ksnp_last_alive = now;
conn->ksnc_tx_bufnob = bufnob;
}
peer_ni->ksnp_last_alive = now;
conn->ksnc_tx_bufnob = bufnob;
}
read_unlock(glock);
if (last_alive != 0)
read_unlock(glock);
if (last_alive != 0)
+ *when = cfs_time_seconds(last_alive);
- CDEBUG(D_NET, "peer_ni %s %p, alive %ld secs ago, connect %d\n",
+ CDEBUG(D_NET, "peer_ni %s %p, alive %lld secs ago, connect %d\n",
libcfs_nid2str(nid), peer_ni,
libcfs_nid2str(nid), peer_ni,
- last_alive ? cfs_duration_sec(now - last_alive) : -1,
+ last_alive ? now - last_alive : -1,
#define SOCKNAL_PEER_HASH_SIZE 101 /* # peer_ni lists */
#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */
#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */
#define SOCKNAL_PEER_HASH_SIZE 101 /* # peer_ni lists */
#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */
#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */
-#define SOCKNAL_ENOMEM_RETRY CFS_TICK /* jiffies between retries */
+#define SOCKNAL_ENOMEM_RETRY 1 /* seconds between retries */
#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */
#define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */
#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */
#define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */
/* reaper sleeps here */
wait_queue_head_t ksnd_reaper_waitq;
/* when reaper will wake */
/* reaper sleeps here */
wait_queue_head_t ksnd_reaper_waitq;
/* when reaper will wake */
- cfs_time_t ksnd_reaper_waketime;
+ time64_t ksnd_reaper_waketime;
/* serialise */
spinlock_t ksnd_reaper_lock;
/* serialise */
spinlock_t ksnd_reaper_lock;
lnet_kiov_t *tx_kiov; /* packet page frags */
struct ksock_conn *tx_conn; /* owning conn */
struct lnet_msg *tx_lnetmsg; /* lnet message for lnet_finalize() */
lnet_kiov_t *tx_kiov; /* packet page frags */
struct ksock_conn *tx_conn; /* owning conn */
struct lnet_msg *tx_lnetmsg; /* lnet message for lnet_finalize() */
- cfs_time_t tx_deadline; /* when (in jiffies) tx times out */
- struct ksock_msg tx_msg; /* socklnd message buffer */
+ time64_t tx_deadline; /* when (in secs) tx times out */
+ struct ksock_msg tx_msg; /* socklnd message buffer */
int tx_desc_size; /* size of this descriptor */
union {
struct {
int tx_desc_size; /* size of this descriptor */
union {
struct {
/* where I enq waiting input or a forwarding descriptor */
struct list_head ksnc_rx_list;
/* where I enq waiting input or a forwarding descriptor */
struct list_head ksnc_rx_list;
- cfs_time_t ksnc_rx_deadline; /* when (in jiffies) receive times out */
+ time64_t ksnc_rx_deadline; /* when (in seconds) receive times out */
__u8 ksnc_rx_started; /* started receiving a message */
__u8 ksnc_rx_ready; /* data ready to read */
__u8 ksnc_rx_scheduled;/* being progressed */
__u8 ksnc_rx_started; /* started receiving a message */
__u8 ksnc_rx_ready; /* data ready to read */
__u8 ksnc_rx_scheduled;/* being progressed */
struct list_head ksnc_tx_queue;
/* next TX that can carry a LNet message or ZC-ACK */
ksock_tx_t *ksnc_tx_carrier;
struct list_head ksnc_tx_queue;
/* next TX that can carry a LNet message or ZC-ACK */
ksock_tx_t *ksnc_tx_carrier;
- /* when (in jiffies) tx times out */
- cfs_time_t ksnc_tx_deadline;
+ /* when (in seconds) tx times out */
+ time64_t ksnc_tx_deadline;
/* send buffer marker */
int ksnc_tx_bufnob;
/* # bytes queued */
/* send buffer marker */
int ksnc_tx_bufnob;
/* # bytes queued */
/* being progressed */
int ksnc_tx_scheduled;
/* time stamp of the last posted TX */
/* being progressed */
int ksnc_tx_scheduled;
/* time stamp of the last posted TX */
- cfs_time_t ksnc_tx_last_post;
+ time64_t ksnc_tx_last_post;
} ksock_conn_t;
typedef struct ksock_route
} ksock_conn_t;
typedef struct ksock_route
struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
struct ksock_peer *ksnr_peer; /* owning peer_ni */
atomic_t ksnr_refcount; /* # users */
struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
struct ksock_peer *ksnr_peer; /* owning peer_ni */
atomic_t ksnr_refcount; /* # users */
- cfs_time_t ksnr_timeout; /* when (in jiffies) reconnection can happen next */
- cfs_duration_t ksnr_retry_interval; /* how long between retries */
+ time64_t ksnr_timeout; /* when (in secs) reconnection can happen next */
+ time64_t ksnr_retry_interval; /* how long between retries */
__u32 ksnr_myipaddr; /* my IP */
__u32 ksnr_ipaddr; /* IP address to connect to */
int ksnr_port; /* port to connect to */
__u32 ksnr_myipaddr; /* my IP */
__u32 ksnr_ipaddr; /* IP address to connect to */
int ksnr_port; /* port to connect to */
typedef struct ksock_peer
{
struct list_head ksnp_list; /* stash on global peer_ni list */
typedef struct ksock_peer
{
struct list_head ksnp_list; /* stash on global peer_ni list */
- cfs_time_t ksnp_last_alive; /* when (in jiffies) I was last alive */
+ time64_t ksnp_last_alive;/* when (in seconds) I was last alive */
struct lnet_process_id ksnp_id; /* who's on the other end(s) */
atomic_t ksnp_refcount; /* # users */
int ksnp_sharecount; /* lconf usage counter */
struct lnet_process_id ksnp_id; /* who's on the other end(s) */
atomic_t ksnp_refcount; /* # users */
int ksnp_sharecount; /* lconf usage counter */
spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
/* zero copy requests wait for ACK */
struct list_head ksnp_zc_req_list;
spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
/* zero copy requests wait for ACK */
struct list_head ksnp_zc_req_list;
- cfs_time_t ksnp_send_keepalive; /* time to send keepalive */
+ time64_t ksnp_send_keepalive; /* time to send keepalive */
struct lnet_ni *ksnp_ni; /* which network */
int ksnp_n_passive_ips; /* # of... */
__u32 ksnp_passive_ips[LNET_NUM_INTERFACES]; /* preferred local interfaces */
struct lnet_ni *ksnp_ni; /* which network */
int ksnp_n_passive_ips; /* # of... */
__u32 ksnp_passive_ips[LNET_NUM_INTERFACES]; /* preferred local interfaces */
extern void ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist,
int error);
extern void ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive);
extern void ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist,
int error);
extern void ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive);
-extern void ksocknal_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
+extern void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
extern void ksocknal_thread_fini (void);
extern void ksocknal_launch_all_connections_locked (ksock_peer_ni_t *peer_ni);
extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
extern void ksocknal_thread_fini (void);
extern void ksocknal_launch_all_connections_locked (ksock_peer_ni_t *peer_ni);
if (bufnob < conn->ksnc_tx_bufnob) {
/* allocated send buffer bytes < computed; infer
* something got ACKed */
if (bufnob < conn->ksnc_tx_bufnob) {
/* allocated send buffer bytes < computed; infer
* something got ACKed */
- conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_tx_deadline = ktime_get_seconds() +
+ *ksocknal_tunables.ksnd_timeout;
+ conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_tx_bufnob = bufnob;
smp_mb();
}
conn->ksnc_tx_bufnob = bufnob;
smp_mb();
}
/* received something... */
nob = rc;
/* received something... */
nob = rc;
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_rx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
+ conn->ksnc_rx_deadline = ktime_get_seconds() +
+ *ksocknal_tunables.ksnd_timeout;
smp_mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
smp_mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
/* received something... */
nob = rc;
/* received something... */
nob = rc;
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_rx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
+ conn->ksnc_rx_deadline = ktime_get_seconds() +
+ *ksocknal_tunables.ksnd_timeout;
smp_mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
smp_mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
spin_lock(&peer_ni->ksnp_lock);
/* ZC_REQ is going to be pinned to the peer_ni */
spin_lock(&peer_ni->ksnp_lock);
/* ZC_REQ is going to be pinned to the peer_ni */
- tx->tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ tx->tx_deadline = ktime_get_seconds() +
+ *ksocknal_tunables.ksnd_timeout;
LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
LASSERT (conn->ksnc_tx_scheduled);
list_add_tail(&conn->ksnc_tx_list,
&ksocknal_data.ksnd_enomem_conns);
LASSERT (conn->ksnc_tx_scheduled);
list_add_tail(&conn->ksnc_tx_list,
&ksocknal_data.ksnd_enomem_conns);
- if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
- SOCKNAL_ENOMEM_RETRY),
- ksocknal_data.ksnd_reaper_waketime))
+ if (ktime_get_seconds() + SOCKNAL_ENOMEM_RETRY <
+ ksocknal_data.ksnd_reaper_waketime)
wake_up(&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
wake_up(&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
case SOCKNAL_MATCH_YES: /* typed connection */
if (typed == NULL || tnob > nob ||
(tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
case SOCKNAL_MATCH_YES: /* typed connection */
if (typed == NULL || tnob > nob ||
(tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
- cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
+ typed->ksnc_tx_last_post > c->ksnc_tx_last_post)) {
case SOCKNAL_MATCH_MAY: /* fallback connection */
if (fallback == NULL || fnob > nob ||
(fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
case SOCKNAL_MATCH_MAY: /* fallback connection */
if (fallback == NULL || fnob > nob ||
(fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
- cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
+ fallback->ksnc_tx_last_post > c->ksnc_tx_last_post)) {
fallback = c;
fnob = nob;
}
fallback = c;
fnob = nob;
}
conn = (typed != NULL) ? typed : fallback;
if (conn != NULL)
conn = (typed != NULL) ? typed : fallback;
if (conn != NULL)
- conn->ksnc_tx_last_post = cfs_time_current();
+ conn->ksnc_tx_last_post = ktime_get_seconds();
if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
/* First packet starts the timeout */
if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
/* First packet starts the timeout */
- conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ conn->ksnc_tx_deadline = ktime_get_seconds() +
+ *ksocknal_tunables.ksnd_timeout;
if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_tx_bufnob = 0;
smp_mb(); /* order with adding to tx_queue */
}
conn->ksnc_tx_bufnob = 0;
smp_mb(); /* order with adding to tx_queue */
}
ksock_route_t *
ksocknal_find_connectable_route_locked (ksock_peer_ni_t *peer_ni)
{
ksock_route_t *
ksocknal_find_connectable_route_locked (ksock_peer_ni_t *peer_ni)
{
- cfs_time_t now = cfs_time_current();
- struct list_head *tmp;
+ time64_t now = ktime_get_seconds();
+ struct list_head *tmp;
ksock_route_t *route;
list_for_each(tmp, &peer_ni->ksnp_routes) {
ksock_route_t *route;
list_for_each(tmp, &peer_ni->ksnp_routes) {
continue;
if (!(route->ksnr_retry_interval == 0 || /* first attempt */
continue;
if (!(route->ksnr_retry_interval == 0 || /* first attempt */
- cfs_time_aftereq(now, route->ksnr_timeout))) {
+ now >= route->ksnr_timeout)) {
CDEBUG(D_NET,
"Too soon to retry route %pI4h "
CDEBUG(D_NET,
"Too soon to retry route %pI4h "
- "(cnted %d, interval %ld, %ld secs later)\n",
+ "(cnted %d, interval %lld, %lld secs later)\n",
&route->ksnr_ipaddr,
route->ksnr_connected,
route->ksnr_retry_interval,
&route->ksnr_ipaddr,
route->ksnr_connected,
route->ksnr_retry_interval,
- cfs_duration_sec(route->ksnr_timeout - now));
+ route->ksnr_timeout - now);
if (peer_ni->ksnp_accepting > 0 ||
ksocknal_find_connecting_route_locked (peer_ni) != NULL) {
/* the message is going to be pinned to the peer_ni */
if (peer_ni->ksnp_accepting > 0 ||
ksocknal_find_connecting_route_locked (peer_ni) != NULL) {
/* the message is going to be pinned to the peer_ni */
- tx->tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ tx->tx_deadline = ktime_get_seconds() +
+ *ksocknal_tunables.ksnd_timeout;
/* Queue the message until a connection is established */
list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue);
/* Queue the message until a connection is established */
list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue);
int type;
int wanted;
struct socket *sock;
int type;
int wanted;
struct socket *sock;
int retry_later = 0;
int rc = 0;
int retry_later = 0;
int rc = 0;
- deadline = cfs_time_add(cfs_time_current(),
- cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
+ deadline = ktime_get_seconds() + *ksocknal_tunables.ksnd_timeout;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
- if (cfs_time_aftereq(cfs_time_current(), deadline)) {
+ if (ktime_get_seconds() >= deadline) {
rc = -ETIMEDOUT;
lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
route->ksnr_ipaddr,
rc = -ETIMEDOUT;
lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
route->ksnr_ipaddr,
* attempt to connect if we lost conn race,
* but the race is resolved quickly usually,
* so min_reconnectms should be good heuristic */
* attempt to connect if we lost conn race,
* but the race is resolved quickly usually,
* so min_reconnectms should be good heuristic */
- route->ksnr_retry_interval =
- cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000;
- route->ksnr_timeout = cfs_time_add(cfs_time_current(),
- route->ksnr_retry_interval);
+ route->ksnr_retry_interval = *ksocknal_tunables.ksnd_min_reconnectms / 1000;
+ route->ksnr_timeout = ktime_get_seconds() +
+ route->ksnr_retry_interval;
}
ksocknal_launch_connection_locked(route);
}
ksocknal_launch_connection_locked(route);
route->ksnr_scheduled = 0;
route->ksnr_connecting = 0;
route->ksnr_scheduled = 0;
route->ksnr_connecting = 0;
- /* This is a retry rather than a new connection */
- route->ksnr_retry_interval *= 2;
- route->ksnr_retry_interval =
- MAX(route->ksnr_retry_interval,
- cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000);
- route->ksnr_retry_interval =
- MIN(route->ksnr_retry_interval,
- cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000);
+ /* This is a retry rather than a new connection */
+ route->ksnr_retry_interval *= 2;
+ route->ksnr_retry_interval =
+ max_t(time64_t, route->ksnr_retry_interval,
+ *ksocknal_tunables.ksnd_min_reconnectms / 1000);
+ route->ksnr_retry_interval =
+ min_t(time64_t, route->ksnr_retry_interval,
+ *ksocknal_tunables.ksnd_max_reconnectms / 1000);
- LASSERT (route->ksnr_retry_interval != 0);
- route->ksnr_timeout = cfs_time_add(cfs_time_current(),
- route->ksnr_retry_interval);
+ LASSERT(route->ksnr_retry_interval);
+ route->ksnr_timeout = ktime_get_seconds() + route->ksnr_retry_interval;
if (!list_empty(&peer_ni->ksnp_tx_queue) &&
peer_ni->ksnp_accepting == 0 &&
if (!list_empty(&peer_ni->ksnp_tx_queue) &&
peer_ni->ksnp_accepting == 0 &&
static ksock_route_t *
ksocknal_connd_get_route_locked(signed long *timeout_p)
{
static ksock_route_t *
ksocknal_connd_get_route_locked(signed long *timeout_p)
{
+ time64_t now = ktime_get_seconds();
- cfs_time_t now;
-
- now = cfs_time_current();
/* connd_routes can contain both pending and ordinary routes */
list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
ksnr_connd_list) {
if (route->ksnr_retry_interval == 0 ||
/* connd_routes can contain both pending and ordinary routes */
list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
ksnr_connd_list) {
if (route->ksnr_retry_interval == 0 ||
- cfs_time_aftereq(now, route->ksnr_timeout))
+ now >= route->ksnr_timeout)
return route;
if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
return route;
if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
- (int)*timeout_p > (int)(route->ksnr_timeout - now))
- *timeout_p = (int)(route->ksnr_timeout - now);
+ *timeout_p > cfs_time_seconds(route->ksnr_timeout - now))
+ *timeout_p = cfs_time_seconds(route->ksnr_timeout - now);
}
if (conn->ksnc_rx_started &&
}
if (conn->ksnc_rx_started &&
- cfs_time_aftereq(cfs_time_current(),
- conn->ksnc_rx_deadline)) {
+ ktime_get_seconds() >= conn->ksnc_rx_deadline) {
/* Timed out incomplete incoming message */
ksocknal_conn_addref(conn);
CNETERR("Timeout receiving from %s (%pI4h:%d), "
/* Timed out incomplete incoming message */
ksocknal_conn_addref(conn);
CNETERR("Timeout receiving from %s (%pI4h:%d), "
if ((!list_empty(&conn->ksnc_tx_queue) ||
conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
if ((!list_empty(&conn->ksnc_tx_queue) ||
conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
- cfs_time_aftereq(cfs_time_current(),
- conn->ksnc_tx_deadline)) {
+ ktime_get_seconds() >= conn->ksnc_tx_deadline) {
/* Timed out messages queued for sending or
* buffered in the socket's send buffer */
ksocknal_conn_addref(conn);
/* Timed out messages queued for sending or
* buffered in the socket's send buffer */
ksocknal_conn_addref(conn);
tx = list_entry(peer_ni->ksnp_tx_queue.next,
ksock_tx_t, tx_list);
tx = list_entry(peer_ni->ksnp_tx_queue.next,
ksock_tx_t, tx_list);
- if (!cfs_time_aftereq(cfs_time_current(),
- tx->tx_deadline))
+ if (ktime_get_seconds() < tx->tx_deadline)
break;
list_del(&tx->tx_list);
break;
list_del(&tx->tx_list);
return 0;
if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
return 0;
if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
- cfs_time_before(cfs_time_current(),
- cfs_time_add(peer_ni->ksnp_last_alive,
- cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive))))
+ ktime_get_seconds() < peer_ni->ksnp_last_alive +
+ *ksocknal_tunables.ksnd_keepalive)
- if (cfs_time_before(cfs_time_current(),
- peer_ni->ksnp_send_keepalive))
+ if (ktime_get_seconds() < peer_ni->ksnp_send_keepalive)
return 0;
/* retry 10 secs later, so we wouldn't put pressure
* on this peer_ni if we failed to send keepalive this time */
return 0;
/* retry 10 secs later, so we wouldn't put pressure
* on this peer_ni if we failed to send keepalive this time */
- peer_ni->ksnp_send_keepalive = cfs_time_shift(10);
+ peer_ni->ksnp_send_keepalive = ktime_get_seconds() + 10;
conn = ksocknal_find_conn_locked(peer_ni, NULL, 1);
if (conn != NULL) {
conn = ksocknal_find_conn_locked(peer_ni, NULL, 1);
if (conn != NULL) {
list_for_each_entry(peer_ni, peers, ksnp_list) {
ksock_tx_t *tx_stale;
list_for_each_entry(peer_ni, peers, ksnp_list) {
ksock_tx_t *tx_stale;
- cfs_time_t deadline = 0;
- int resid = 0;
- int n = 0;
+ time64_t deadline = 0;
+ int resid = 0;
+ int n = 0;
if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
read_unlock(&ksocknal_data.ksnd_global_lock);
if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
read_unlock(&ksocknal_data.ksnd_global_lock);
list_entry(peer_ni->ksnp_tx_queue.next,
ksock_tx_t, tx_list);
list_entry(peer_ni->ksnp_tx_queue.next,
ksock_tx_t, tx_list);
- if (cfs_time_aftereq(cfs_time_current(),
- tx->tx_deadline)) {
-
+ if (ktime_get_seconds() >= tx->tx_deadline) {
ksocknal_peer_addref(peer_ni);
read_unlock(&ksocknal_data.ksnd_global_lock);
ksocknal_peer_addref(peer_ni);
read_unlock(&ksocknal_data.ksnd_global_lock);
tx_stale = NULL;
spin_lock(&peer_ni->ksnp_lock);
list_for_each_entry(tx, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
tx_stale = NULL;
spin_lock(&peer_ni->ksnp_lock);
list_for_each_entry(tx, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
- if (!cfs_time_aftereq(cfs_time_current(),
- tx->tx_deadline))
+ if (ktime_get_seconds() < tx->tx_deadline)
break;
/* ignore the TX if connection is being closed */
if (tx->tx_conn->ksnc_closing)
break;
/* ignore the TX if connection is being closed */
if (tx->tx_conn->ksnc_closing)
read_unlock(&ksocknal_data.ksnd_global_lock);
CERROR("Total %d stale ZC_REQs for peer_ni %s detected; the "
read_unlock(&ksocknal_data.ksnd_global_lock);
CERROR("Total %d stale ZC_REQs for peer_ni %s detected; the "
- "oldest(%p) timed out %ld secs ago, "
+ "oldest(%p) timed out %lld secs ago, "
"resid: %d, wmem: %d\n",
n, libcfs_nid2str(peer_ni->ksnp_id.nid), tx_stale,
"resid: %d, wmem: %d\n",
n, libcfs_nid2str(peer_ni->ksnp_id.nid), tx_stale,
- cfs_duration_sec(cfs_time_current() - deadline),
+ ktime_get_seconds() - deadline,
resid, conn->ksnc_sock->sk->sk_wmem_queued);
ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
resid, conn->ksnc_sock->sk->sk_wmem_queued);
ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
ksock_sched_t *sched;
struct list_head enomem_conns;
int nenomem_conns;
ksock_sched_t *sched;
struct list_head enomem_conns;
int nenomem_conns;
- cfs_duration_t timeout;
int i;
int peer_index = 0;
int i;
int peer_index = 0;
- cfs_time_t deadline = cfs_time_current();
+ time64_t deadline = ktime_get_seconds();
}
/* careful with the jiffy wrap... */
}
/* careful with the jiffy wrap... */
- while ((timeout = cfs_time_sub(deadline,
- cfs_time_current())) <= 0) {
+ while ((timeout = deadline - ktime_get_seconds()) <= 0) {
const int n = 4;
const int p = 1;
int chunk = ksocknal_data.ksnd_peer_hash_size;
const int n = 4;
const int p = 1;
int chunk = ksocknal_data.ksnd_peer_hash_size;
ksocknal_data.ksnd_peer_hash_size;
}
ksocknal_data.ksnd_peer_hash_size;
}
- deadline = cfs_time_add(deadline, cfs_time_seconds(p));
}
if (nenomem_conns != 0) {
}
if (nenomem_conns != 0) {
* if any go back on my enomem list. */
timeout = SOCKNAL_ENOMEM_RETRY;
}
* if any go back on my enomem list. */
timeout = SOCKNAL_ENOMEM_RETRY;
}
- ksocknal_data.ksnd_reaper_waketime =
- cfs_time_add(cfs_time_current(), timeout);
+ ksocknal_data.ksnd_reaper_waketime = ktime_get_seconds() +
+ timeout;
- set_current_state(TASK_INTERRUPTIBLE);
+ set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
if (!ksocknal_data.ksnd_shuttingdown &&
list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
list_empty(&ksocknal_data.ksnd_zombie_conns))
add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
if (!ksocknal_data.ksnd_shuttingdown &&
list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
list_empty(&ksocknal_data.ksnd_zombie_conns))
- schedule_timeout(timeout);
+ schedule_timeout(cfs_duration_sec(timeout));
set_current_state(TASK_RUNNING);
remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
set_current_state(TASK_RUNNING);
remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);