if (osp == NULL)
return -EINVAL;
- seq_printf(m, "%u\n", osp->opd_statfs_maxage);
+ seq_printf(m, "%lld\n", osp->opd_statfs_maxage);
return 0;
}
static int osp_sync(const struct lu_env *env, struct dt_device *dev)
{
struct osp_device *d = dt2osp_dev(dev);
- cfs_time_t expire;
struct l_wait_info lwi = { 0 };
+ time64_t start = ktime_get_seconds();
int recs, rc = 0;
- unsigned long start = cfs_time_current();
- __u64 old;
+ u64 old;
ENTRY;
atomic_read(&d->opd_async_updates_count));
/* make sure the connection is fine */
- expire = cfs_time_shift(obd_timeout);
- lwi = LWI_TIMEOUT(expire - cfs_time_current(), osp_sync_timeout, d);
+ lwi = LWI_TIMEOUT(cfs_time_seconds(obd_timeout), osp_sync_timeout, d);
rc = l_wait_event(d->opd_sync_barrier_waitq,
atomic_read(&d->opd_async_updates_count) == 0,
&lwi);
while (atomic64_read(&d->opd_sync_processed_recs) < old + recs) {
__u64 last = atomic64_read(&d->opd_sync_processed_recs);
/* make sure the connection is fine */
- expire = cfs_time_shift(obd_timeout);
- lwi = LWI_TIMEOUT(expire - cfs_time_current(),
+ lwi = LWI_TIMEOUT(cfs_time_seconds(obd_timeout),
osp_sync_timeout, d);
l_wait_event(d->opd_sync_barrier_waitq,
atomic64_read(&d->opd_sync_processed_recs)
while (atomic_read(&d->opd_sync_rpcs_in_flight) > 0) {
old = atomic_read(&d->opd_sync_rpcs_in_flight);
- expire = cfs_time_shift(obd_timeout);
- lwi = LWI_TIMEOUT(expire - cfs_time_current(),
+ lwi = LWI_TIMEOUT(cfs_time_seconds(obd_timeout),
osp_sync_timeout, d);
l_wait_event(d->opd_sync_barrier_waitq,
atomic_read(&d->opd_sync_rpcs_in_flight) == 0,
atomic_dec(&d->opd_sync_barrier);
osp_sync_check_for_work(d);
- CDEBUG(D_CACHE, "%s: done in %lu: rc = %d\n", d->opd_obd->obd_name,
- cfs_time_current() - start, rc);
+ CDEBUG(D_CACHE, "%s: done in %lld: rc = %d\n", d->opd_obd->obd_name,
+ ktime_get_seconds() - start, rc);
RETURN(rc);
}
atomic_t opd_sync_barrier;
wait_queue_head_t opd_sync_barrier_waitq;
/* last generated id */
- cfs_time_t opd_sync_next_commit_cb;
+ ktime_t opd_sync_next_commit_cb;
atomic_t opd_commits_registered;
/*
* statfs related fields: OSP maintains it on its own
*/
struct obd_statfs opd_statfs;
- cfs_time_t opd_statfs_fresh_till;
- struct timer_list opd_statfs_timer;
+ ktime_t opd_statfs_fresh_till;
+ struct timer_list opd_statfs_timer;
int opd_statfs_update_in_progress;
/* how often to update statfs data */
- int opd_statfs_maxage;
+ time64_t opd_statfs_maxage;
struct proc_dir_entry *opd_symlink;
*/
static inline int osp_statfs_need_update(struct osp_device *d)
{
- return !cfs_time_before(cfs_time_current(),
- d->opd_statfs_fresh_till);
+ return !ktime_before(ktime_get(), d->opd_statfs_fresh_till);
}
/*
struct ptlrpc_request *req,
union ptlrpc_async_args *aa, int rc)
{
- struct obd_import *imp = req->rq_import;
- struct obd_statfs *msfs;
- struct osp_device *d;
+ struct obd_import *imp = req->rq_import;
+ struct obd_statfs *msfs;
+ struct osp_device *d;
+ u64 maxage_ns;
ENTRY;
osp_pre_update_status(d, rc);
/* schedule next update */
- d->opd_statfs_fresh_till = cfs_time_shift(d->opd_statfs_maxage);
- mod_timer(&d->opd_statfs_timer, d->opd_statfs_fresh_till);
+ maxage_ns = d->opd_statfs_maxage * NSEC_PER_SEC;
+ d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), maxage_ns);
+ mod_timer(&d->opd_statfs_timer,
+ jiffies + cfs_time_seconds(d->opd_statfs_maxage));
d->opd_statfs_update_in_progress = 0;
CDEBUG(D_CACHE, "updated statfs %p\n", d);
*/
static int osp_statfs_update(const struct lu_env *env, struct osp_device *d)
{
+ u64 expire = obd_timeout * 1000 * NSEC_PER_SEC;
struct ptlrpc_request *req;
struct obd_import *imp;
union ptlrpc_async_args *aa;
- int rc;
+ int rc;
ENTRY;
* no updates till reply
*/
del_timer(&d->opd_statfs_timer);
- d->opd_statfs_fresh_till = cfs_time_shift(obd_timeout * 1000);
+ d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), expire);
d->opd_statfs_update_in_progress = 1;
ptlrpcd_add_req(req);
/* we still want to sync changes if no new changes are coming */
- if (cfs_time_before(cfs_time_current(), d->opd_sync_next_commit_cb))
+ if (ktime_before(ktime_get(), d->opd_sync_next_commit_cb))
GOTO(out, rc);
if (atomic_read(&d->opd_sync_changes)) {
* then we should poll OST immediately once object destroy
* is replied
*/
- d->opd_statfs_fresh_till = cfs_time_shift(-1);
+ d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
del_timer(&d->opd_statfs_timer);
wake_up(&d->opd_pre_waitq);
}
*/
int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d)
{
- struct l_wait_info lwi;
- cfs_time_t expire = cfs_time_shift(obd_timeout);
- int precreated, rc, synced = 0;
+ time64_t expire = ktime_get_seconds() + obd_timeout;
+ struct l_wait_info lwi;
+ int precreated, rc, synced = 0;
ENTRY;
/* XXX: don't wake up if precreation is in progress */
wake_up(&d->opd_pre_waitq);
- lwi = LWI_TIMEOUT(expire - cfs_time_current(),
- osp_precreate_timeout_condition, d);
- if (cfs_time_aftereq(cfs_time_current(), expire)) {
+ lwi = LWI_TIMEOUT(cfs_time_seconds(obd_timeout),
+ osp_precreate_timeout_condition, d);
+ if (ktime_get_seconds() >= expire) {
rc = -ETIMEDOUT;
break;
}
/*
* Initialize statfs-related things
*/
- d->opd_statfs_maxage = 5; /* default update interval */
- d->opd_statfs_fresh_till = cfs_time_shift(-1000);
- CDEBUG(D_OTHER, "current %llu, fresh till %llu\n",
- (unsigned long long)cfs_time_current(),
- (unsigned long long)d->opd_statfs_fresh_till);
+ d->opd_statfs_maxage = 5; /* defaultupdate interval */
+ d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(),
+ 1000 * NSEC_PER_SEC);
+ CDEBUG(D_OTHER, "current %lldns, fresh till %lldns\n",
+ ktime_get_ns(),
+ ktime_to_ns(d->opd_statfs_fresh_till));
setup_timer(&d->opd_statfs_timer, osp_statfs_timer_cb,
(unsigned long)d);
* put a mark in the llog till which we'll be processing
* old records restless
*/
- d->opd_sync_generation.mnt_cnt = cfs_time_current();
- d->opd_sync_generation.conn_cnt = cfs_time_current();
+ d->opd_sync_generation.mnt_cnt = ktime_get_ns();
+ d->opd_sync_generation.conn_cnt = ktime_get_ns();
osi->osi_hdr.lrh_type = LLOG_GEN_REC;
osi->osi_hdr.lrh_len = sizeof(osi->osi_gen);
spin_unlock(&d->opd_sync_lock);
rc = dt_trans_cb_add(th, dcb);
- CDEBUG(D_HA, "%s: add commit cb at %llu, next at %llu, rc = %d\n",
- d->opd_obd->obd_name, (unsigned long long) cfs_time_current(),
- (unsigned long long) d->opd_sync_next_commit_cb, rc);
+ CDEBUG(D_HA, "%s: add commit cb at %lluns, next at %lluns, rc = %d\n",
+ d->opd_obd->obd_name, ktime_get_ns(),
+ ktime_to_ns(d->opd_sync_next_commit_cb), rc);
if (likely(rc == 0)) {
lu_device_get(osp2lu_dev(d));
int osp_sync_add_commit_cb_1s(const struct lu_env *env, struct osp_device *d,
struct thandle *th)
{
+ ktime_t now = ktime_get();
bool add = false;
/* fast path */
- if (cfs_time_before(cfs_time_current(), d->opd_sync_next_commit_cb))
+ if (ktime_before(now, d->opd_sync_next_commit_cb))
return 0;
spin_lock(&d->opd_sync_lock);
- if (cfs_time_aftereq(cfs_time_current(), d->opd_sync_next_commit_cb)) {
+ if (ktime_before(d->opd_sync_next_commit_cb, now)) {
add = true;
- d->opd_sync_next_commit_cb = cfs_time_shift(1);
+ d->opd_sync_next_commit_cb = ktime_add_ns(now, NSEC_PER_SEC);
}
spin_unlock(&d->opd_sync_lock);