extern int portal_rotor;
int lnet_notify(struct lnet_ni *ni, lnet_nid_t peer, int alive,
- cfs_time_t when);
+ time64_t when);
void lnet_notify_locked(struct lnet_peer_ni *lp, int notifylnd, int alive,
- cfs_time_t when);
+ time64_t when);
int lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway_nid,
unsigned int priority);
int lnet_check_routes(void);
static inline void
lnet_peer_set_alive(struct lnet_peer_ni *lp)
{
- lp->lpni_last_alive = lp->lpni_last_query = cfs_time_current();
+ lp->lpni_last_alive = ktime_get_seconds();
+ lp->lpni_last_query = lp->lpni_last_alive;
if (!lp->lpni_alive)
lnet_notify_locked(lp, 0, 1, lp->lpni_last_alive);
}
void (*lnd_notify)(struct lnet_ni *ni, lnet_nid_t peer, int alive);
/* query of peer aliveness */
- void (*lnd_query)(struct lnet_ni *ni, lnet_nid_t peer, cfs_time_t *when);
+ void (*lnd_query)(struct lnet_ni *ni, lnet_nid_t peer, time64_t *when);
/* accept a new connection */
int (*lnd_accept)(struct lnet_ni *ni, struct socket *sock);
int **ni_refs;
/* when I was last alive */
- long ni_last_alive;
+ time64_t ni_last_alive;
/* pointer to parent network */
struct lnet_net *ni_net;
/* # times router went dead<->alive. Protected with lpni_lock */
int lpni_alive_count;
/* time of last aliveness news */
- cfs_time_t lpni_timestamp;
+ time64_t lpni_timestamp;
/* time of last ping attempt */
- cfs_time_t lpni_ping_timestamp;
+ time64_t lpni_ping_timestamp;
/* != 0 if ping reply expected */
- cfs_time_t lpni_ping_deadline;
+ time64_t lpni_ping_deadline;
/* when I was last alive */
- cfs_time_t lpni_last_alive;
+ time64_t lpni_last_alive;
/* when lpni_ni was queried last time */
- cfs_time_t lpni_last_query;
+ time64_t lpni_last_query;
/* network peer is on */
struct lnet_net *lpni_net;
/* peer's NID */
}
void
-kgnilnd_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when)
+kgnilnd_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when)
{
kgn_net_t *net = ni->ni_data;
kgn_tx_t *tx;
/* LIE if in a quiesce - we will update the timeouts after,
* but we don't want sends failing during it */
if (kgnilnd_data.kgn_quiesce_trigger) {
- *when = jiffies;
+ *when = ktime_get_seconds();
read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
GOTO(out, 0);
}
* - if it was, we lie to LNet because we believe a TX would complete
* on reconnect */
if (kgnilnd_conn_clean_errno(peer->gnp_last_errno)) {
- *when = jiffies;
+ *when = ktime_get_seconds();
}
/* we still want to fire a TX and new conn in this case */
} else {
kgnilnd_launch_tx(tx, net, &id);
}
out:
- CDEBUG(D_NETTRACE, "peer 0x%p->%s when %lu\n", peer,
+ CDEBUG(D_NETTRACE, "peer 0x%p->%s when %lld\n", peer,
libcfs_nid2str(nid), *when);
EXIT;
}
void kgnilnd_free_phys_fmablk(kgn_device_t *device);
int kgnilnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg);
-void kgnilnd_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
+void kgnilnd_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when);
int kgnilnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg);
int kgnilnd_eager_recv(struct lnet_ni *ni, void *private,
struct lnet_msg *lntmsg, void **new_private);
}
static void
-kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when)
+kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when)
{
- cfs_time_t last_alive = 0;
- cfs_time_t now = cfs_time_current();
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_peer_ni_t *peer_ni;
- unsigned long flags;
+ time64_t last_alive = 0;
+ time64_t now = ktime_get_seconds();
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ kib_peer_ni_t *peer_ni;
+ unsigned long flags;
read_lock_irqsave(glock, flags);
if (peer_ni == NULL)
kiblnd_launch_tx(ni, NULL, nid);
- CDEBUG(D_NET, "peer_ni %s %p, alive %ld secs ago\n",
+ CDEBUG(D_NET, "peer_ni %s %p, alive %lld secs ago\n",
libcfs_nid2str(nid), peer_ni,
- last_alive ? cfs_duration_sec(now - last_alive) : -1);
+ last_alive ? now - last_alive : -1);
return;
}
/* incarnation of peer_ni */
__u64 ibp_incarnation;
/* when (in jiffies) I was last alive */
- cfs_time_t ibp_last_alive;
+ time64_t ibp_last_alive;
/* # users */
atomic_t ibp_refcount;
/* version of peer_ni */
static void
kiblnd_peer_alive (kib_peer_ni_t *peer_ni)
{
- /* This is racy, but everyone's only writing cfs_time_current() */
- peer_ni->ibp_last_alive = cfs_time_current();
+ /* This is racy, but everyone's only writing ktime_get_seconds() */
+ peer_ni->ibp_last_alive = ktime_get_seconds();
smp_mb();
}
kiblnd_peer_notify (kib_peer_ni_t *peer_ni)
{
int error = 0;
- cfs_time_t last_alive = 0;
+ time64_t last_alive = 0;
unsigned long flags;
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
}
if (timedout) {
- CERROR("Timed out RDMA with %s (%lu): "
+ CERROR("Timed out RDMA with %s (%lld): "
"c: %u, oc: %u, rc: %u\n",
libcfs_nid2str(peer_ni->ibp_nid),
- cfs_duration_sec(cfs_time_current() -
- peer_ni->ibp_last_alive),
+ ktime_get_seconds() - peer_ni->ibp_last_alive,
conn->ibc_credits,
conn->ibc_outstanding_credits,
conn->ibc_reserved_credits);
}
ksocknal_peer_decref(peer_ni);
- /* NB peer_ni unlinks itself when last conn/route is removed */
+ /* NB peer_ni unlinks itself when last conn/route is removed */
}
static int
}
void
-ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when)
+ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when)
{
int connect = 1;
time64_t last_alive = 0;
read_unlock(glock);
if (last_alive != 0)
- *when = cfs_time_seconds(last_alive);
+ *when = last_alive;
CDEBUG(D_NET, "peer_ni %s %p, alive %lld secs ago, connect %d\n",
libcfs_nid2str(nid), peer_ni,
extern void ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist,
int error);
extern void ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive);
-extern void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
+extern void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when);
extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
extern void ksocknal_thread_fini(void);
extern void ksocknal_launch_all_connections_locked(struct ksock_peer_ni *peer_ni);
}
case IOC_LIBCFS_NOTIFY_ROUTER: {
- unsigned long jiffies_passed;
-
- jiffies_passed = ktime_get_real_seconds() - data->ioc_u64[0];
- jiffies_passed = cfs_time_seconds(jiffies_passed);
+ time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
- jiffies - jiffies_passed);
+ deadline);
}
case IOC_LIBCFS_LNET_DIST:
static void
lnet_ni_query_locked(struct lnet_ni *ni, struct lnet_peer_ni *lp)
{
- cfs_time_t last_alive = 0;
+ time64_t last_alive = 0;
int cpt = lnet_cpt_of_nid_locked(lp->lpni_nid, ni);
LASSERT(lnet_peer_aliveness_enabled(lp));
(ni->ni_net->net_lnd->lnd_query)(ni, lp->lpni_nid, &last_alive);
lnet_net_lock(cpt);
- lp->lpni_last_query = cfs_time_current();
+ lp->lpni_last_query = ktime_get_seconds();
if (last_alive != 0) /* NI has updated timestamp */
lp->lpni_last_alive = last_alive;
/* NB: always called with lnet_net_lock held */
static inline int
-lnet_peer_is_alive (struct lnet_peer_ni *lp, cfs_time_t now)
+lnet_peer_is_alive(struct lnet_peer_ni *lp, time64_t now)
{
- int alive;
- cfs_time_t deadline;
+ int alive;
+ time64_t deadline;
LASSERT (lnet_peer_aliveness_enabled(lp));
*/
spin_lock(&lp->lpni_lock);
if (!lp->lpni_alive && lp->lpni_alive_count > 0 &&
- cfs_time_aftereq(lp->lpni_timestamp, lp->lpni_last_alive)) {
+ lp->lpni_timestamp >= lp->lpni_last_alive) {
spin_unlock(&lp->lpni_lock);
return 0;
}
- deadline =
- cfs_time_add(lp->lpni_last_alive,
- cfs_time_seconds(lp->lpni_net->net_tunables.
- lct_peer_timeout));
- alive = cfs_time_after(deadline, now);
+ deadline = lp->lpni_last_alive +
+ lp->lpni_net->net_tunables.lct_peer_timeout;
+ alive = deadline > now;
/*
* Update obsolete lp_alive except for routers assumed to be dead
/* NB: returns 1 when alive, 0 when dead, negative when error;
* may drop the lnet_net_lock */
static int
-lnet_peer_alive_locked (struct lnet_ni *ni, struct lnet_peer_ni *lp)
+lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lp)
{
- cfs_time_t now = cfs_time_current();
+ time64_t now = ktime_get_seconds();
if (!lnet_peer_aliveness_enabled(lp))
return -ENODEV;
*/
if (lp->lpni_last_query != 0) {
static const int lnet_queryinterval = 1;
+ time64_t next_query;
- cfs_time_t next_query =
- cfs_time_add(lp->lpni_last_query,
- cfs_time_seconds(lnet_queryinterval));
+ next_query = lp->lpni_last_query + lnet_queryinterval;
- if (cfs_time_before(now, next_query)) {
+ if (now < next_query) {
if (lp->lpni_alive)
CWARN("Unexpected aliveness of peer %s: "
- "%d < %d (%d/%d)\n",
+ "%lld < %lld (%d/%d)\n",
libcfs_nid2str(lp->lpni_nid),
- (int)now, (int)next_query,
+ now, next_query,
lnet_queryinterval,
lp->lpni_net->net_tunables.lct_peer_timeout);
return 0;
/**
* seconds to drop the next message, it's exclusive with dr_drop_at
*/
- cfs_time_t dr_drop_time;
+ time64_t dr_drop_time;
/** baseline to caculate dr_drop_time */
- cfs_time_t dr_time_base;
+ time64_t dr_time_base;
/** statistic of dropped messages */
struct lnet_fault_stat dr_stat;
};
rule->dr_attr = *attr;
if (attr->u.drop.da_interval != 0) {
- rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval);
- rule->dr_drop_time = cfs_time_shift(cfs_rand() %
- attr->u.drop.da_interval);
+ rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
+ rule->dr_drop_time = ktime_get_seconds() +
+ cfs_rand() % attr->u.drop.da_interval;
} else {
rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
}
if (attr->u.drop.da_rate != 0) {
rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
} else {
- rule->dr_drop_time = cfs_time_shift(cfs_rand() %
- attr->u.drop.da_interval);
- rule->dr_time_base = cfs_time_shift(attr->u.drop.
- da_interval);
+ rule->dr_drop_time = ktime_get_seconds() +
+ cfs_rand() % attr->u.drop.da_interval;
+ rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
}
spin_unlock(&rule->dr_lock);
}
/* match this rule, check drop rate now */
spin_lock(&rule->dr_lock);
if (rule->dr_drop_time != 0) { /* time based drop */
- cfs_time_t now = cfs_time_current();
+ time64_t now = ktime_get_seconds();
rule->dr_stat.fs_count++;
- drop = cfs_time_aftereq(now, rule->dr_drop_time);
+ drop = now >= rule->dr_drop_time;
if (drop) {
- if (cfs_time_after(now, rule->dr_time_base))
+ if (now > rule->dr_time_base)
rule->dr_time_base = now;
rule->dr_drop_time = rule->dr_time_base +
- cfs_time_seconds(cfs_rand() %
- attr->u.drop.da_interval);
- rule->dr_time_base += cfs_time_seconds(attr->u.drop.
- da_interval);
+ cfs_rand() % attr->u.drop.da_interval;
+ rule->dr_time_base += attr->u.drop.da_interval;
- CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %ld\n",
+ CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lld\n",
libcfs_nid2str(attr->fa_src),
libcfs_nid2str(attr->fa_dst),
rule->dr_drop_time);
/**
* seconds to delay the next message, it's exclusive with dl_delay_at
*/
- cfs_time_t dl_delay_time;
+ time64_t dl_delay_time;
/** baseline to caculate dl_delay_time */
- cfs_time_t dl_time_base;
+ time64_t dl_time_base;
/** jiffies to send the next delayed message */
unsigned long dl_msg_send;
/** delayed message list */
static struct delay_daemon_data delay_dd;
-static cfs_time_t
-round_timeout(cfs_time_t timeout)
-{
- return cfs_time_seconds((unsigned int)
- cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
-}
-
static void
delay_rule_decref(struct lnet_delay_rule *rule)
{
/* match this rule, check delay rate now */
spin_lock(&rule->dl_lock);
if (rule->dl_delay_time != 0) { /* time based delay */
- cfs_time_t now = cfs_time_current();
+ time64_t now = ktime_get_seconds();
rule->dl_stat.fs_count++;
- delay = cfs_time_aftereq(now, rule->dl_delay_time);
+ delay = now >= rule->dl_delay_time;
if (delay) {
- if (cfs_time_after(now, rule->dl_time_base))
+ if (now > rule->dl_time_base)
rule->dl_time_base = now;
rule->dl_delay_time = rule->dl_time_base +
- cfs_time_seconds(cfs_rand() %
- attr->u.delay.la_interval);
- rule->dl_time_base += cfs_time_seconds(attr->u.delay.
- la_interval);
+ cfs_rand() % attr->u.delay.la_interval;
+ rule->dl_time_base += attr->u.delay.la_interval;
- CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %ld\n",
+ CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lld\n",
libcfs_nid2str(attr->fa_src),
libcfs_nid2str(attr->fa_dst),
rule->dl_delay_time);
rule->dl_stat.u.delay.ls_delayed++;
list_add_tail(&msg->msg_list, &rule->dl_msg_list);
- msg->msg_delay_send = round_timeout(
- cfs_time_shift(attr->u.delay.la_latency));
+ msg->msg_delay_send = ktime_get_seconds() + attr->u.delay.la_latency;
if (rule->dl_msg_send == -1) {
rule->dl_msg_send = msg->msg_delay_send;
mod_timer(&rule->dl_timer, rule->dl_msg_send);
{
struct lnet_msg *msg;
struct lnet_msg *tmp;
- unsigned long now = cfs_time_current();
+ time64_t now = ktime_get_seconds();
- if (!all && rule->dl_msg_send > now)
+ if (!all && cfs_time_seconds(rule->dl_msg_send) > now)
return;
spin_lock(&rule->dl_lock);
rule->dl_attr = *attr;
if (attr->u.delay.la_interval != 0) {
- rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval);
- rule->dl_delay_time = cfs_time_shift(cfs_rand() %
- attr->u.delay.la_interval);
+ rule->dl_time_base = ktime_get_seconds() +
+ attr->u.delay.la_interval;
+ rule->dl_delay_time = ktime_get_seconds() +
+ cfs_rand() % attr->u.delay.la_interval;
} else {
rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
}
if (attr->u.delay.la_rate != 0) {
rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
} else {
- rule->dl_delay_time = cfs_time_shift(cfs_rand() %
- attr->u.delay.la_interval);
- rule->dl_time_base = cfs_time_shift(attr->u.delay.
- la_interval);
+ rule->dl_delay_time = ktime_get_seconds() +
+ cfs_rand() % attr->u.delay.la_interval;
+ rule->dl_time_base = ktime_get_seconds() +
+ attr->u.delay.la_interval;
}
spin_unlock(&rule->dl_lock);
}
spin_lock_init(&lpni->lpni_lock);
lpni->lpni_alive = !lnet_peers_start_down(); /* 1 bit!! */
- lpni->lpni_last_alive = cfs_time_current(); /* assumes alive */
+ lpni->lpni_last_alive = ktime_get_seconds(); /* assumes alive */
lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
lpni->lpni_nid = nid;
lpni->lpni_cpt = cpt;
void
lnet_notify_locked(struct lnet_peer_ni *lp, int notifylnd, int alive,
- cfs_time_t when)
+ time64_t when)
{
- if (cfs_time_before(when, lp->lpni_timestamp)) { /* out of date information */
+ if (lp->lpni_timestamp > when) { /* out of date information */
CDEBUG(D_NET, "Out of date\n");
return;
}
*/
spin_lock(&lp->lpni_lock);
- lp->lpni_timestamp = when; /* update timestamp */
+ lp->lpni_timestamp = when; /* update timestamp */
lp->lpni_ping_deadline = 0; /* disable ping timeout */
if (lp->lpni_alive_count != 0 && /* got old news */
* we ping alive routers to try to detect router death before
* apps get burned). */
- lnet_notify_locked(lp, 1, (event->status == 0), cfs_time_current());
+ lnet_notify_locked(lp, 1, !event->status, ktime_get_seconds());
/* The router checker will wake up very shortly and do the
* actual notification.
* XXX If 'lp' stops being a router before then, it will still
{
struct lnet_ni *ni = NULL;
time64_t now;
- int timeout;
+ time64_t timeout;
LASSERT(the_lnet.ln_routing);
LASSERT(ni->ni_status != NULL);
if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
- CDEBUG(D_NET, "NI(%s:%d) status changed to down\n",
+ CDEBUG(D_NET, "NI(%s:%lld) status changed to down\n",
libcfs_nid2str(ni->ni_nid), timeout);
/* NB: so far, this is the only place to set
* NI status to "down" */
lnet_ping_router_locked(struct lnet_peer_ni *rtr)
{
struct lnet_rc_data *rcd = NULL;
- cfs_time_t now = cfs_time_current();
- int secs;
- struct lnet_ni *ni;
+ time64_t now = ktime_get_seconds();
+ time64_t secs;
+ struct lnet_ni *ni;
lnet_peer_ni_addref_locked(rtr);
if (rtr->lpni_ping_deadline != 0 && /* ping timed out? */
- cfs_time_after(now, rtr->lpni_ping_deadline))
+ now > rtr->lpni_ping_deadline)
lnet_notify_locked(rtr, 1, 0, now);
/* Run any outstanding notifications */
secs = lnet_router_check_interval(rtr);
CDEBUG(D_NET,
- "rtr %s %d: deadline %lu ping_notsent %d alive %d "
- "alive_count %d lpni_ping_timestamp %lu\n",
+ "rtr %s %lld: deadline %lld ping_notsent %d alive %d "
+ "alive_count %d lpni_ping_timestamp %lld\n",
libcfs_nid2str(rtr->lpni_nid), secs,
rtr->lpni_ping_deadline, rtr->lpni_ping_notsent,
rtr->lpni_alive, rtr->lpni_alive_count, rtr->lpni_ping_timestamp);
if (secs != 0 && !rtr->lpni_ping_notsent &&
- cfs_time_after(now, cfs_time_add(rtr->lpni_ping_timestamp,
- cfs_time_seconds(secs)))) {
+ now > rtr->lpni_ping_timestamp + secs) {
int rc;
struct lnet_process_id id;
struct lnet_handle_md mdh;
mdh = rcd->rcd_mdh;
if (rtr->lpni_ping_deadline == 0) {
- rtr->lpni_ping_deadline =
- cfs_time_shift(router_ping_timeout);
+ rtr->lpni_ping_deadline = ktime_get_seconds() +
+ router_ping_timeout;
}
lnet_net_unlock(rtr->lpni_cpt);
}
int
-lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, int alive, cfs_time_t when)
+lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, int alive, time64_t when)
{
struct lnet_peer_ni *lp = NULL;
- cfs_time_t now = cfs_time_current();
+ time64_t now = ktime_get_seconds();
int cpt = lnet_cpt_of_nid(nid, ni);
LASSERT (!in_interrupt ());
}
/* can't do predictions... */
- if (cfs_time_after(when, now)) {
+ if (when > now) {
CWARN("Ignoring prediction from %s of %s %s "
- "%ld seconds in the future\n",
+ "%lld seconds in the future\n",
(ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
- libcfs_nid2str(nid), alive ? "up" : "down",
- cfs_duration_sec(cfs_time_sub(when, now)));
+ libcfs_nid2str(nid), alive ? "up" : "down", when - now);
return -EINVAL;
}
if (peer != NULL) {
lnet_nid_t nid = peer->lpni_nid;
- cfs_time_t now = cfs_time_current();
- cfs_time_t deadline = peer->lpni_ping_deadline;
+ time64_t now = ktime_get_seconds();
+ time64_t deadline = peer->lpni_ping_deadline;
int nrefs = atomic_read(&peer->lpni_refcount);
int nrtrrefs = peer->lpni_rtr_refcount;
int alive_cnt = peer->lpni_alive_count;
int alive = peer->lpni_alive;
int pingsent = !peer->lpni_ping_notsent;
- int last_ping = cfs_duration_sec(cfs_time_sub(now,
- peer->lpni_ping_timestamp));
+ time64_t last_ping = now - peer->lpni_ping_timestamp;
int down_ni = 0;
struct lnet_route *rtr;
if (deadline == 0)
s += snprintf(s, tmpstr + tmpsiz - s,
- "%-4d %7d %9d %6s %12d %9d %8s %7d %s\n",
+ "%-4d %7d %9d %6s %12llu %9d %8s %7d %s\n",
nrefs, nrtrrefs, alive_cnt,
alive ? "up" : "down", last_ping,
pingsent, "NA", down_ni,
libcfs_nid2str(nid));
else
s += snprintf(s, tmpstr + tmpsiz - s,
- "%-4d %7d %9d %6s %12d %9d %8lu %7d %s\n",
+ "%-4d %7d %9d %6s %12llu %9d %8llu %7d %s\n",
nrefs, nrtrrefs, alive_cnt,
alive ? "up" : "down", last_ping,
pingsent,
- cfs_duration_sec(cfs_time_sub(deadline, now)),
+ deadline - now,
down_ni, libcfs_nid2str(nid));
LASSERT(tmpstr + tmpsiz - s > 0);
}
if (peer != NULL) {
lnet_nid_t nid = peer->lpni_nid;
int nrefs = atomic_read(&peer->lpni_refcount);
- int lastalive = -1;
+ time64_t lastalive = -1;
char *aliveness = "NA";
int maxcr = (peer->lpni_net) ?
peer->lpni_net->net_tunables.lct_peer_tx_credits : 0;
aliveness = peer->lpni_alive ? "up" : "down";
if (lnet_peer_aliveness_enabled(peer)) {
- cfs_time_t now = cfs_time_current();
- cfs_duration_t delta;
+ time64_t now = ktime_get_seconds();
- delta = cfs_time_sub(now, peer->lpni_last_alive);
- lastalive = cfs_duration_sec(delta);
+ lastalive = now - peer->lpni_last_alive;
/* No need to mess up peers contents with
* arbitrarily long integers - it suffices to
lnet_net_unlock(cpt);
s += snprintf(s, tmpstr + tmpsiz - s,
- "%-24s %4d %5s %5d %5d %5d %5d %5d %5d %d\n",
+ "%-24s %4d %5s %5lld %5d %5d %5d %5d %5d %d\n",
libcfs_nid2str(nid), nrefs, aliveness,
lastalive, maxcr, rtrcr, minrtrcr, txcr,
mintxcr, txqnob);
ni = lnet_get_ni_idx_locked(skip);
if (ni != NULL) {
- struct lnet_tx_queue *tq;
- char *stat;
+ struct lnet_tx_queue *tq;
+ char *stat;
time64_t now = ktime_get_real_seconds();
- int last_alive = -1;
- int i;
- int j;
+ time64_t last_alive = -1;
+ int i;
+ int j;
if (the_lnet.ln_routing)
last_alive = now - ni->ni_last_alive;
lnet_net_lock(i);
s += snprintf(s, tmpstr + tmpsiz - s,
- "%-24s %6s %5d %4d %4d %4d %5d %5d %5d\n",
+ "%-24s %6s %5lld %4d %4d %4d %5d %5d %5d\n",
libcfs_nid2str(ni->ni_nid), stat,
last_alive, *ni->ni_refs[i],
ni->ni_net->net_tunables.lct_peer_tx_credits,