static void
kiblnd_debug_tx (kib_tx_t *tx)
{
- CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lx "
+ CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lld "
"cookie %#llx msg %s%s type %x cred %d\n",
tx, tx->tx_sending, tx->tx_queued, tx->tx_waiting,
- tx->tx_status, tx->tx_deadline, tx->tx_cookie,
+ tx->tx_status, ktime_to_ns(tx->tx_deadline), tx->tx_cookie,
tx->tx_lntmsg[0] == NULL ? "-" : "!",
tx->tx_lntmsg[1] == NULL ? "-" : "!",
tx->tx_msg->ibm_type, tx->tx_msg->ibm_credits);
if (rc)
goto out_fpo;
- fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- fpo->fpo_owner = fps;
+ fpo->fpo_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
+ fpo->fpo_owner = fps;
*pp_fpo = fpo;
return 0;
}
static int
-kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, cfs_time_t now)
+kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, time64_t now)
{
if (fpo->fpo_map_count != 0) /* still in use */
return 0;
if (fpo->fpo_failed)
return 1;
- return cfs_time_aftereq(now, fpo->fpo_deadline);
+ return now >= fpo->fpo_deadline;
}
static int
kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
{
struct list_head zombies = LIST_HEAD_INIT(zombies);
- kib_fmr_pool_t *fpo = fmr->fmr_pool;
+ kib_fmr_pool_t *fpo = fmr->fmr_pool;
kib_fmr_poolset_t *fps;
- cfs_time_t now = cfs_time_current();
- kib_fmr_pool_t *tmp;
- int rc;
+ time64_t now = ktime_get_seconds();
+ kib_fmr_pool_t *tmp;
+ int rc;
if (!fpo)
return;
spin_lock(&fps->fps_lock);
version = fps->fps_version;
list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
- fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ fpo->fpo_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
fpo->fpo_map_count++;
if (fpo->fpo_is_fmr) {
}
- if (cfs_time_before(cfs_time_current(), fps->fps_next_retry)) {
+ if (ktime_get_seconds() < fps->fps_next_retry) {
/* someone failed recently */
spin_unlock(&fps->fps_lock);
return -EAGAIN;
fps->fps_version++;
list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
} else {
- fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
+ fps->fps_next_retry = ktime_get_seconds() + IBLND_POOL_RETRY;
}
spin_unlock(&fps->fps_lock);
memset(pool, 0, sizeof(kib_pool_t));
INIT_LIST_HEAD(&pool->po_free_list);
- pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- pool->po_owner = ps;
- pool->po_size = size;
+ pool->po_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
+ pool->po_owner = ps;
+ pool->po_size = size;
}
static void
}
static int
-kiblnd_pool_is_idle(kib_pool_t *pool, cfs_time_t now)
+kiblnd_pool_is_idle(kib_pool_t *pool, time64_t now)
{
if (pool->po_allocated != 0) /* still in use */
return 0;
if (pool->po_failed)
return 1;
- return cfs_time_aftereq(now, pool->po_deadline);
+ return now >= pool->po_deadline;
}
void
kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
{
struct list_head zombies = LIST_HEAD_INIT(zombies);
- kib_poolset_t *ps = pool->po_owner;
- kib_pool_t *tmp;
- cfs_time_t now = cfs_time_current();
+ kib_poolset_t *ps = pool->po_owner;
+ kib_pool_t *tmp;
+ time64_t now = ktime_get_seconds();
spin_lock(&ps->ps_lock);
kib_pool_t *pool;
int rc;
unsigned int interval = 1;
- cfs_time_t time_before;
- unsigned int trips = 0;
+ ktime_t time_before;
+ unsigned int trips = 0;
again:
spin_lock(&ps->ps_lock);
continue;
pool->po_allocated++;
- pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ pool->po_deadline = ktime_get_seconds() +
+ IBLND_POOL_DEADLINE;
node = pool->po_free_list.next;
list_del(node);
goto again;
}
- if (cfs_time_before(cfs_time_current(), ps->ps_next_retry)) {
+ if (ktime_get_seconds() < ps->ps_next_retry) {
/* someone failed recently */
spin_unlock(&ps->ps_lock);
return NULL;
spin_unlock(&ps->ps_lock);
CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
- time_before = cfs_time_current();
+ time_before = ktime_get();
rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
- CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete",
- cfs_time_current() - time_before);
+ CDEBUG(D_NET, "ps_pool_create took %lld ms to complete",
+ ktime_ms_delta(ktime_get(), time_before));
spin_lock(&ps->ps_lock);
ps->ps_increasing = 0;
if (rc == 0) {
list_add_tail(&pool->po_list, &ps->ps_pool_list);
} else {
- ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
+ ps->ps_next_retry = ktime_get_seconds() + IBLND_POOL_RETRY;
CERROR("Can't allocate new %s pool because out of memory\n",
ps->ps_name);
}
char ibd_ifname[KIB_IFNAME_SIZE];
int ibd_nnets; /* # nets extant */
- cfs_time_t ibd_next_failover;
+ time64_t ibd_next_failover;
/* # failover failures */
int ibd_failed_failover;
/* failover in progress */
/* failed pool list */
struct list_head ps_failed_pool_list;
/* time stamp for retry if failed to allocate */
- cfs_time_t ps_next_retry;
+ time64_t ps_next_retry;
/* is allocating new pool */
int ps_increasing;
/* new pool size */
/* pool_set of this pool */
kib_poolset_t *po_owner;
/* deadline of this pool */
- cfs_time_t po_deadline;
+ time64_t po_deadline;
/* # of elements in use */
int po_allocated;
/* pool is created on failed HCA */
/* is allocating new pool */
int fps_increasing;
/* time stamp for retry if failed to allocate */
- cfs_time_t fps_next_retry;
+ time64_t fps_next_retry;
} kib_fmr_poolset_t;
#ifndef HAVE_IB_RDMA_WR
int fpo_pool_size;
} fast_reg;
};
- cfs_time_t fpo_deadline; /* deadline of this pool */
+ time64_t fpo_deadline; /* deadline of this pool */
int fpo_failed; /* fmr pool is failed */
int fpo_map_count; /* # of mapped FMR */
bool fpo_is_fmr; /* True if FMR pools allocated */
/* LNET completion status */
int tx_status;
/* completion deadline */
- unsigned long tx_deadline;
+ ktime_t tx_deadline;
/* completion cookie */
__u64 tx_cookie;
/* lnet msgs to finalize on completion */
/* CQ callback fired */
unsigned int ibc_ready:1;
/* time of last send */
- unsigned long ibc_last_send;
+ ktime_t ibc_last_send;
/** link chain for kiblnd_check_conns only */
struct list_head ibc_connd_list;
/** rxs completed before ESTABLISHED */
struct list_head ibp_tx_queue;
/* incarnation of peer_ni */
__u64 ibp_incarnation;
- /* when (in jiffies) I was last alive */
+ /* when (in seconds) I was last alive */
time64_t ibp_last_alive;
/* # users */
atomic_t ibp_refcount;
static inline int
kiblnd_send_keepalive(kib_conn_t *conn)
{
+ s64 keepalive_ns = *kiblnd_tunables.kib_keepalive * NSEC_PER_SEC;
+
return (*kiblnd_tunables.kib_keepalive > 0) &&
- cfs_time_after(jiffies, conn->ibc_last_send +
- msecs_to_jiffies(*kiblnd_tunables.kib_keepalive *
- MSEC_PER_SEC));
+ ktime_after(ktime_get(),
+ ktime_add_ns(conn->ibc_last_send, keepalive_ns));
}
static inline int
rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad);
}
- conn->ibc_last_send = jiffies;
+ conn->ibc_last_send = ktime_get();
if (rc == 0)
return 0;
kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
{
struct list_head *q;
+ s64 timeout_ns;
LASSERT(tx->tx_nwrq > 0); /* work items set up */
LASSERT(!tx->tx_queued); /* not queued for sending already */
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+ timeout_ns = *kiblnd_tunables.kib_timeout * NSEC_PER_SEC;
tx->tx_queued = 1;
- tx->tx_deadline = jiffies +
- msecs_to_jiffies(*kiblnd_tunables.kib_timeout *
- MSEC_PER_SEC);
+ tx->tx_deadline = ktime_add_ns(ktime_get(), timeout_ns);
if (tx->tx_conn == NULL) {
kiblnd_conn_addref(conn);
/* connection established */
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- conn->ibc_last_send = jiffies;
+ conn->ibc_last_send = ktime_get();
kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
kiblnd_peer_alive(peer_ni);
LASSERT(tx->tx_waiting || tx->tx_sending != 0);
}
- if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
- CERROR("Timed out tx: %s, %lu seconds\n",
+ if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
+ CERROR("Timed out tx: %s, %lld seconds\n",
kiblnd_queue2str(conn, txs),
- cfs_duration_sec(jiffies - tx->tx_deadline));
+ ktime_ms_delta(ktime_get(),
+ tx->tx_deadline) / MSEC_PER_SEC);
return 1;
}
}
/* Check tx_deadline */
list_for_each_entry_safe(tx, tx_tmp, &peer_ni->ibp_tx_queue, tx_list) {
- if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
- CWARN("Timed out tx for %s: %lu seconds\n",
+ if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
+ CWARN("Timed out tx for %s: %lld seconds\n",
libcfs_nid2str(peer_ni->ibp_nid),
- cfs_duration_sec(jiffies - tx->tx_deadline));
+ ktime_ms_delta(ktime_get(),
+ tx->tx_deadline) / MSEC_PER_SEC);
list_move(&tx->tx_list, &timedout_txs);
}
}
list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
ibd_fail_list) {
- if (cfs_time_before(cfs_time_current(),
- dev->ibd_next_failover))
+ if (ktime_get_seconds() < dev->ibd_next_failover)
continue;
do_failover = 1;
break;
LASSERT (dev->ibd_failover);
dev->ibd_failover = 0;
if (rc >= 0) { /* Device is OK or failover succeed */
- dev->ibd_next_failover = cfs_time_shift(3);
+ dev->ibd_next_failover = ktime_get_seconds() + 3;
continue;
}
/* failed to failover, retry later */
- dev->ibd_next_failover =
- cfs_time_shift(min(dev->ibd_failed_failover, 10));
+ dev->ibd_next_failover = ktime_get_seconds() +
+ min(dev->ibd_failed_failover, 10);
if (kiblnd_dev_can_failover(dev)) {
list_add_tail(&dev->ibd_fail_list,
&kiblnd_data.kib_failed_devs);
if (!ksocknal_data.ksnd_shuttingdown &&
list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
list_empty(&ksocknal_data.ksnd_zombie_conns))
- schedule_timeout(cfs_duration_sec(timeout));
+ schedule_timeout(cfs_time_seconds(timeout));
set_current_state(TASK_RUNNING);
remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);