cfs_time_current||jiffies
cfs_time_current_64||ktime_get
cfs_time_current_sec||ktime_get_real_seconds
+msecs_to_jiffies|cfs_time_seconds
DEFINE_TIMER||CFS_DEFINE_TIMER
DN_MAX_BONUSLEN||DN_BONUS_SIZE(dnodesize)
DN_OLD_MAX_BONUSLEN||DN_BONUS_SIZE(DNODE_MIN_SIZE)
lnet_ni_status_t||struct lnet_ni_status
lnet_ping_info_t||struct lnet_ping_info
lnet_process_id_packed_t||struct lnet_process_id_packed
+HZ||cfs_time_seconds
LPD64||%lld
LPLD||%ld
LPLU||%lu
CERROR("cfs_fail_timeout id %x sleeping for %dms\n", id, ms);
while (ktime_before(ktime_get(), till)) {
set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(1000) / 10);
+ schedule_timeout(cfs_time_seconds(1) / 10);
set_current_state(TASK_RUNNING);
if (!cfs_fail_loc) {
CERROR("cfs_fail_timeout interrupted\n");
memset(buf, 0xAD, PAGE_SIZE);
kunmap(page);
- for (start = jiffies, end = start + msecs_to_jiffies(MSEC_PER_SEC / 4),
+ for (start = jiffies, end = start + cfs_time_seconds(1) / 4,
bcount = 0; time_before(jiffies, end) && err == 0; bcount++) {
struct ahash_request *req;
int i;
kiblnd_data.kib_peer_hash_size;
}
- deadline += msecs_to_jiffies(p * MSEC_PER_SEC);
+ deadline += cfs_time_seconds(p);
spin_lock_irqsave(lock, flags);
}
/* If timeout is negative then set default of 3 minutes */
if (((s32)data->ioc_u32[1] <= 0) ||
data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
- timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
+ timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
else
- timeout = msecs_to_jiffies(data->ioc_u32[1]);
+ timeout = nsecs_to_jiffies(data->ioc_u32[1] * NSEC_PER_MSEC);
rc = lnet_ping(id, timeout, data->ioc_pbuf1,
data->ioc_plen1 / sizeof(struct lnet_process_id));
/* If timeout is negative then set default of 3 minutes */
if (((s32)ping->op_param) <= 0 ||
ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
- timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
+ timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
else
- timeout = msecs_to_jiffies(ping->op_param);
+ timeout = nsecs_to_jiffies(ping->op_param * NSEC_PER_MSEC);
rc = lnet_ping(ping->ping_id, timeout,
ping->ping_buf,
int which;
int unlinked = 0;
int replied = 0;
- const signed long a_long_time = msecs_to_jiffies(60 * MSEC_PER_SEC);
+ const signed long a_long_time = cfs_time_seconds(60);
struct lnet_ping_buffer *pbuf;
struct lnet_process_id tmpid;
int i;
lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
{
int rc;
- long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC);
+ long jiffies_left = cfs_time_seconds(timeout);
unsigned long then;
struct timeval tv;
if (timeout != 0) {
/* Set send timeout to remaining time */
- tv = (struct timeval) {
- .tv_sec = jiffies_left /
- msecs_to_jiffies(MSEC_PER_SEC),
- .tv_usec = ((jiffies_left %
- msecs_to_jiffies(MSEC_PER_SEC)) *
- USEC_PER_SEC) /
- msecs_to_jiffies(MSEC_PER_SEC)
- };
-
+ jiffies_to_timeval(jiffies_left, &tv);
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
(char *)&tv, sizeof(tv));
if (rc != 0) {
lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
{
int rc;
- long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC);
+ long jiffies_left = cfs_time_seconds(timeout);
unsigned long then;
struct timeval tv;
};
/* Set receive timeout to remaining time */
- tv = (struct timeval) {
- .tv_sec = jiffies_left / msecs_to_jiffies(MSEC_PER_SEC),
- .tv_usec = ((jiffies_left %
- msecs_to_jiffies(MSEC_PER_SEC)) *
- USEC_PER_SEC) /
- msecs_to_jiffies(MSEC_PER_SEC)
- };
+ jiffies_to_timeval(jiffies_left, &tv);
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
(char *)&tv, sizeof(tv));
if (rc != 0) {
while (wait_event_timeout(obd->obd_next_transno_waitq,
check_routine(lut),
- msecs_to_jiffies(60 * MSEC_PER_SEC)) == 0)
+ cfs_time_seconds(60)) == 0)
; /* wait indefinitely for event, but don't trigger watchdog */
if (obd->obd_abort_recovery) {
ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
force_wait:
if (force)
- lwi = LWI_TIMEOUT(msecs_to_jiffies(obd_timeout *
- MSEC_PER_SEC) / 4, NULL, NULL);
+ lwi = LWI_TIMEOUT(cfs_time_seconds(1) / 4,
+ NULL, NULL);
rc = l_wait_event(ns->ns_waitq,
atomic_read(&ns->ns_bref) == 0, &lwi);
bool dirty = false;
if (limit != LFSCK_SPEED_NO_LIMIT) {
- if (limit > msecs_to_jiffies(MSEC_PER_SEC)) {
- lfsck->li_sleep_rate = limit /
- msecs_to_jiffies(MSEC_PER_SEC);
+ if (limit > cfs_time_seconds(1)) {
+ lfsck->li_sleep_rate = limit / cfs_time_seconds(1);
lfsck->li_sleep_jif = 1;
} else {
lfsck->li_sleep_rate = 1;
- lfsck->li_sleep_jif = msecs_to_jiffies(MSEC_PER_SEC) /
- limit;
+ lfsck->li_sleep_jif = cfs_time_seconds(1) / limit;
}
} else {
lfsck->li_sleep_jif = 0;
if (unlikely(rc == -EINPROGRESS)) {
retry = true;
set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
+ schedule_timeout(cfs_time_seconds(1));
set_current_state(TASK_RUNNING);
if (!signal_pending(current) &&
thread_is_running(&lfsck->li_thread))
/* wait running statahead threads to quit */
while (atomic_read(&sbi->ll_sa_running) > 0) {
set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC >> 3));
+ schedule_timeout(cfs_time_seconds(1 >> 3));
}
}
* safely because statahead RPC will access sai data */
while (sai->sai_sent != sai->sai_replied) {
/* in case we're not woken up, timeout wait */
- lwi = LWI_TIMEOUT(msecs_to_jiffies(MSEC_PER_SEC >> 3),
- NULL, NULL);
+ lwi = LWI_TIMEOUT(cfs_time_seconds(1) >> 3, NULL, NULL);
l_wait_event(sa_thread->t_ctl_waitq,
sai->sai_sent == sai->sai_replied, &lwi);
}
++io->ci_ndelay_tried;
if (io->ci_ndelay && io->ci_ndelay_tried >= comp->lo_mirror_count) {
set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC)); /* 10ms */
+ schedule_timeout(cfs_time_seconds(1)); /* 10ms */
if (signal_pending(current))
RETURN(-EINTR);
cfs_fail_val ==
tsi2mdt_info(tsi)->mti_mdt->mdt_seq_site.ss_node_id) {
set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(3 * MSEC_PER_SEC));
+ schedule_timeout(cfs_time_seconds(3));
}
return tgt_connect(tsi);
/* Always wait a few seconds to allow the server who
caused the lock revocation to finish its setup, plus some
random so everyone doesn't try to reconnect at once. */
- to = msecs_to_jiffies(MGC_TIMEOUT_MIN_SECONDS * MSEC_PER_SEC);
+ to = cfs_time_seconds(MGC_TIMEOUT_MIN_SECONDS);
/* rand is centi-seconds */
- to += msecs_to_jiffies(rand * MSEC_PER_SEC / 100);
+ to += cfs_time_seconds(rand) / 100;
lwi = LWI_TIMEOUT(to, NULL, NULL);
l_wait_event(rq_waitq, rq_state & (RQ_STOP | RQ_PRECLEANUP),
&lwi);
memset(buf, 0xAD, PAGE_SIZE);
kunmap(page);
- for (start = jiffies, end = start + msecs_to_jiffies(MSEC_PER_SEC / 4),
+ for (start = jiffies, end = start + cfs_time_seconds(1) / 4,
bcount = 0; time_before(jiffies, end) && rc == 0; bcount++) {
rc = __obd_t10_performance_test(obd_name, cksum_type, page,
buf_len / PAGE_SIZE);
/* The connection with MGS is not established.
* Try again after 2 seconds. Interruptable. */
set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(
- msecs_to_jiffies(MSEC_PER_SEC) * 2);
+ schedule_timeout(cfs_time_seconds(2));
set_current_state(TASK_RUNNING);
if (!signal_pending(current))
goto again;
if (client_gtd.gtd_stopped == 1)
return;
- if (next_shrink > ktime_get_seconds())
- schedule_delayed_work(&work, msecs_to_jiffies(
- (next_shrink - ktime_get_seconds()) *
- MSEC_PER_SEC));
- else
+ if (next_shrink > ktime_get_seconds()) {
+ time64_t delay = next_shrink - ktime_get_seconds();
+
+ schedule_delayed_work(&work, cfs_time_seconds(delay));
+ } else {
schedule_work(&work.work);
+ }
}
void osc_schedule_grant_work(void)
*/
if (last_credits != oh->ot_credits &&
time_after(jiffies, last_printed +
- msecs_to_jiffies(60 * MSEC_PER_SEC)) &&
+ cfs_time_seconds(60)) &&
osd_transaction_size(dev) > 512) {
CWARN("%s: credits %u > trans_max %u\n", osd_name(dev),
oh->ot_credits, osd_transaction_size(dev));
*/
rc = osp_precreate_cleanup_orphans(&env, d);
if (rc != 0) {
- schedule_timeout_interruptible(
- msecs_to_jiffies(MSEC_PER_SEC));
+ schedule_timeout_interruptible(cfs_time_seconds(1));
continue;
}
}
read_unlock(&rsi_cache.hash_lock);
if (valid == 0) {
- unsigned long jiffies;
- jiffies = msecs_to_jiffies(MSEC_PER_SEC *
- GSS_SVC_UPCALL_TIMEOUT);
- schedule_timeout(jiffies);
+ unsigned long timeout;
+
+ timeout = cfs_time_seconds(GSS_SVC_UPCALL_TIMEOUT);
+ schedule_timeout(timeout);
}
cache_get(&rsip->h);
goto cache_check;
if (atomic_read(&rsi_cache.readers) > 0)
break;
set_current_state(TASK_UNINTERRUPTIBLE);
- LASSERT(msecs_to_jiffies(MSEC_PER_SEC / 4) > 0);
- schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC / 4));
+ schedule_timeout(cfs_time_seconds(1) / 4);
}
if (atomic_read(&rsi_cache.readers) == 0)
newctx, newctx->cc_flags);
set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
+ schedule_timeout(cfs_time_seconds(1));
} else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags)
== 0)) {
/*
req->rq_restart = 0;
spin_unlock(&req->rq_lock);
- lwi = LWI_TIMEOUT_INTR(msecs_to_jiffies(timeout * MSEC_PER_SEC),
+ lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
ctx_refresh_timeout,
ctx_refresh_interrupt, req);
rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);