* This allows the client to start caching negative dentries
* for a directory and may save an RPC for a later stat.
*/
- unsigned int ns_ctime_age_limit;
+ time64_t ns_ctime_age_limit;
/**
* Used to rate-limit ldlm_namespace_dump calls.
* \see ldlm_namespace_dump. Increased by 10 seconds every time
* it is called.
*/
- cfs_time_t ns_next_dump;
+ time64_t ns_next_dump;
/** "policy" function that does actual lock conflict determination */
ldlm_res_policy ns_policy;
* The resources in this namespace remember contended state during
* \a ns_contention_time, in seconds.
*/
- unsigned ns_contention_time;
+ time64_t ns_contention_time;
/**
* Limit size of contended extent locks, in bytes.
* under this lock.
* \see ost_rw_prolong_locks
*/
- cfs_time_t l_callback_timeout;
+ time64_t l_callback_timeout;
/** Local PID of process which created this lock. */
__u32 l_pid;
union {
/**
* When the resource was considered as contended,
- * used only on server side. */
- cfs_time_t lr_contention_time;
+ * used only on server side.
+ */
+ time64_t lr_contention_time;
/**
* Associated inode, used only on client side.
*/
struct ldlm_res_id lpa_resid;
struct ldlm_extent lpa_extent;
enum ldlm_mode lpa_mode;
- int lpa_timeout;
+ time64_t lpa_timeout;
int lpa_locks_cnt;
int lpa_blocks_cnt;
};
/** @} ldlm_handlers */
void ldlm_revoke_export_locks(struct obd_export *exp);
-unsigned int ldlm_bl_timeout(struct ldlm_lock *lock);
+time64_t ldlm_bl_timeout(struct ldlm_lock *lock);
#endif
int ldlm_del_waiting_lock(struct ldlm_lock *lock);
-int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout);
+int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, time64_t timeout);
int ldlm_get_ref(void);
void ldlm_put_ref(void);
int ldlm_init_export(struct obd_export *exp);
/** For bulk requests on client only: bulk descriptor */
struct ptlrpc_bulk_desc *cr_bulk;
/** optional time limit for send attempts */
- cfs_duration_t cr_delay_limit;
+ time64_t cr_delay_limit;
/** time request was first queued */
- cfs_time_t cr_queued_time;
+ time64_t cr_queued_time;
/** request sent in nanoseconds */
ktime_t cr_sent_ns;
/** time for request really sent out */
/**
* service time estimate (secs)
* If the request is not served by this time, it is marked as timed out.
+ * Do not change to time64_t since this is transmitted over the wire.
*/
- int rq_timeout;
+ time_t rq_timeout;
/**
* when request/reply sent (secs), or time when request should be sent
*/
static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req)
{
if (req->rq_delay_limit != 0 &&
- cfs_time_before(cfs_time_add(req->rq_queued_time,
- cfs_time_seconds(req->rq_delay_limit)),
- cfs_time_current())) {
+ req->rq_queued_time + req->rq_delay_limit < ktime_get_seconds())
return 1;
- }
return 0;
}
static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
{
struct ldlm_resource *res = lock->l_resource;
- cfs_time_t now = cfs_time_current();
+ time64_t now = ktime_get_seconds();
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
return 1;
CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
res->lr_contention_time = now;
- return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
- cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time)));
+
+ return now < res->lr_contention_time +
+ ldlm_res_to_ns(res)->ns_contention_time;
}
struct ldlm_extent_compat_args {
void ldlm_lock_prolong_one(struct ldlm_lock *lock,
struct ldlm_prolong_args *arg)
{
- int timeout;
+ time64_t timeout;
if (arg->lpa_export != lock->l_export ||
lock->l_flags & LDLM_FL_DESTROYED)
*/
timeout = arg->lpa_timeout + (ldlm_bl_timeout(lock) >> 1);
- LDLM_DEBUG(lock, "refreshed to %ds.\n", timeout);
+ LDLM_DEBUG(lock, "refreshed to %llds.\n", timeout);
arg->lpa_blocks_cnt++;
{
struct obd_device *target;
struct lustre_handle *hdl;
- cfs_time_t now;
- cfs_time_t deadline;
- int timeout;
+ time64_t deadline;
+ time64_t timeout;
+ time64_t now;
int rc = 0;
- ENTRY;
+ ENTRY;
hdl = &exp->exp_imp_reverse->imp_remote_handle;
if (!exp->exp_connection || !lustre_handle_is_used(hdl)) {
conn->cookie = exp->exp_handle.h_cookie;
GOTO(out_already, rc);
}
- now = cfs_time_current();
- deadline = target->obd_recovery_timer.expires;
- if (cfs_time_before(now, deadline)) {
- struct target_distribute_txn_data *tdtd =
- class_exp2tgt(exp)->lut_tdtd;
+ now = ktime_get_seconds();
+ deadline = cfs_duration_sec(target->obd_recovery_timer.expires);
+ if (now < deadline) {
+ struct target_distribute_txn_data *tdtd;
int size = 0;
int count = 0;
char *buf = NULL;
- timeout = cfs_duration_sec(cfs_time_sub(deadline, now));
+ timeout = deadline - now;
+ tdtd = class_exp2tgt(exp)->lut_tdtd;
if (tdtd && tdtd->tdtd_show_update_logs_retrievers)
buf = tdtd->tdtd_show_update_logs_retrievers(
tdtd->tdtd_show_retrievers_cbdata,
if (count > 0)
LCONSOLE_WARN("%s: Recovery already passed deadline "
- "%d:%.02d. It is due to DNE recovery "
+ "%lld:%.02lld. It is due to DNE recovery "
"failed/stuck on the %d MDT(s):%s. "
"Please wait until all MDTs recovered "
"or abort the recovery by force.\n",
buf ? buf : "unknown (not enough RAM)");
else
LCONSOLE_WARN("%s: Recovery already passed deadline "
- "%d:%.02d. If you do not want to wait "
+ "%lld:%.02lld. If you do not want to wait "
"more, please abort the recovery by "
"force.\n", target->obd_name,
timeout / 60, timeout % 60);
if (buf != NULL)
OBD_FREE(buf, size);
} else {
- timeout = cfs_duration_sec(cfs_time_sub(now, deadline));
+ timeout = now - deadline;
LCONSOLE_WARN("%s: Recovery already passed deadline"
- " %d:%.02d, It is most likely due to DNE"
+ " %lld:%.02lld, It is most likely due to DNE"
" recovery is failed or stuck, please wait a"
" few more minutes or abort the recovery.\n",
target->obd_name, timeout / 60, timeout % 60);
/* allow "new" MDT to be connected during recovery, since we
* need retrieve recovery update records from it */
if (target->obd_recovering && !lw_client && !mds_mds_conn) {
- cfs_time_t t;
- int c; /* connected */
- int i; /* in progress */
- int k; /* known */
- int s; /* stale/evicted */
+ time64_t t;
+ int c; /* connected */
+ int i; /* in progress */
+ int k; /* known */
+ int s; /* stale/evicted */
c = atomic_read(&target->obd_connected_clients);
i = atomic_read(&target->obd_lock_replay_clients);
k = target->obd_max_recoverable_clients;
s = target->obd_stale_clients;
t = target->obd_recovery_timer.expires;
- t = cfs_time_sub(t, cfs_time_current());
- t = cfs_duration_sec(t);
+ t = cfs_duration_sec(target->obd_recovery_timer.expires);
+ t -= ktime_get_seconds();
LCONSOLE_WARN("%s: Denying connection for new client %s"
"(at %s), waiting for %d known clients "
"(%d recovered, %d in progress, and %d "
- "evicted) to recover in %d:%.02d\n",
+ "evicted) to recover in %lld:%.02lld\n",
target->obd_name, cluuid.uuid,
libcfs_nid2str(req->rq_peer.nid), k,
- c - i, i, s, (int)t / 60,
- (int)t % 60);
+ c - i, i, s, t / 60, t % 60);
rc = -EBUSY;
} else {
dont_check_exports:
}
mod_timer(&obd->obd_recovery_timer,
- cfs_time_shift(obd->obd_recovery_timeout));
+ jiffies + cfs_time_seconds(obd->obd_recovery_timeout));
obd->obd_recovery_start = ktime_get_real_seconds();
spin_unlock(&obd->obd_dev_lock);
* if @extend is true, extend recovery window to have @drt remaining at least;
* otherwise, make sure the recovery timeout value is not less than @drt.
*/
-static void extend_recovery_timer(struct obd_device *obd, int drt,
+static void extend_recovery_timer(struct obd_device *obd, time64_t drt,
bool extend)
{
time64_t now;
obd->obd_recovery_timeout = to;
end = obd->obd_recovery_start + to;
mod_timer(&obd->obd_recovery_timer,
- cfs_time_shift(end - now));
+ jiffies + cfs_time_seconds(end - now));
}
spin_unlock(&obd->obd_dev_lock);
struct ptlrpc_request *req,
int new_client)
{
- int service_time = lustre_msg_get_service_time(req->rq_reqmsg);
+ time64_t service_time = lustre_msg_get_service_time(req->rq_reqmsg);
struct obd_device_target *obt = &obd->u.obt;
if (!new_client && service_time)
target_start_recovery_timer(obd);
/* Convert the service time to RPC timeout,
- * and reuse service_time to limit stack usage. */
+ * and reuse service_time to limit stack usage.
+ */
service_time = at_est2timeout(service_time);
if (OBD_FAIL_CHECK(OBD_FAIL_TGT_SLUGGISH_NET) &&
/* don't reset timer for final stage */
if (!exp_finished(req->rq_export)) {
- int to = obd_timeout;
+ time64_t to = obd_timeout;
/**
* Add request timeout to the recovery time so next request from
int target_bulk_io(struct obd_export *exp, struct ptlrpc_bulk_desc *desc,
struct l_wait_info *lwi)
{
- struct ptlrpc_request *req = desc->bd_req;
- time_t start = cfs_time_current_sec();
- time_t deadline;
- int rc = 0;
+ struct ptlrpc_request *req = desc->bd_req;
+ time64_t start = ktime_get_real_seconds();
+ time64_t deadline;
+ int rc = 0;
ENTRY;
deadline = req->rq_deadline;
do {
- long timeoutl = deadline - cfs_time_current_sec();
- cfs_duration_t timeout = timeoutl <= 0 ?
- CFS_TICK : cfs_time_seconds(timeoutl);
- time_t rq_deadline;
+ time64_t timeoutl = deadline - ktime_get_real_seconds();
+ long timeout_jiffies = timeoutl <= 0 ?
+ 1 : cfs_time_seconds(timeoutl);
+ time64_t rq_deadline;
- *lwi = LWI_TIMEOUT_INTERVAL(timeout, cfs_time_seconds(1),
+ *lwi = LWI_TIMEOUT_INTERVAL(timeout_jiffies,
+ cfs_time_seconds(1),
target_bulk_timeout, desc);
rc = l_wait_event(desc->bd_waitq,
!ptlrpc_server_bulk_active(desc) ||
deadline = start + bulk_timeout;
if (deadline > rq_deadline)
deadline = rq_deadline;
- } while ((rc == -ETIMEDOUT) &&
- (deadline > cfs_time_current_sec()));
+ } while (rc == -ETIMEDOUT &&
+ deadline > ktime_get_real_seconds());
if (rc == -ETIMEDOUT) {
- DEBUG_REQ(D_ERROR, req, "timeout on bulk %s after %ld%+lds",
+ DEBUG_REQ(D_ERROR, req, "timeout on bulk %s after %lld%+llds",
bulk2type(req), deadline - start,
- cfs_time_current_sec() - deadline);
+ ktime_get_real_seconds() - deadline);
ptlrpc_abort_bulk(desc);
} else if (exp->exp_failed) {
DEBUG_REQ(D_ERROR, req, "Eviction on bulk %s",
libcfs_debug_vmsg2(msgdata, fmt, args,
" ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
"res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s "
- "remote: %#llx expref: %d pid: %u timeout: %lu "
+ "remote: %#llx expref: %d pid: %u timeout: %lld "
"lvb_type: %d\n",
lock,
lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
" ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
"res: "DLDLMRES" rrc: %d type: %s [%llu->%llu] "
"(req %llu->%llu) flags: %#llx nid: %s remote: "
- "%#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
+ "%#llx expref: %d pid: %u timeout: %lld lvb_type: %d\n",
ldlm_lock_to_ns_name(lock), lock,
lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
lock->l_readers, lock->l_writers,
" ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
"res: "DLDLMRES" rrc: %d type: %s pid: %d "
"[%llu->%llu] flags: %#llx nid: %s "
- "remote: %#llx expref: %d pid: %u timeout: %lu\n",
+ "remote: %#llx expref: %d pid: %u timeout: %lld\n",
ldlm_lock_to_ns_name(lock), lock,
lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
lock->l_readers, lock->l_writers,
" ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
"res: "DLDLMRES" bits %#llx/%#llx rrc: %d type: %s "
"flags: %#llx nid: %s remote: %#llx expref: %d "
- "pid: %u timeout: %lu lvb_type: %d\n",
+ "pid: %u timeout: %lld lvb_type: %d\n",
ldlm_lock_to_ns_name(lock),
lock, lock->l_handle.h_cookie,
atomic_read(&lock->l_refc),
" ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
"res: "DLDLMRES" rrc: %d type: %s flags: %#llx "
"nid: %s remote: %#llx expref: %d pid: %u "
- "timeout: %lu lvb_type: %d\n",
+ "timeout: %lld lvb_type: %d\n",
ldlm_lock_to_ns_name(lock),
lock, lock->l_handle.h_cookie,
atomic_read(&lock->l_refc),
static struct ldlm_state *ldlm_state;
-static inline cfs_time_t round_timeout(cfs_time_t timeout)
-{
- return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
-}
-
-/* timeout for initial callback (AST) reply (bz10399) */
-static inline unsigned int ldlm_get_rq_timeout(void)
+/* timeout for initial callback (AST) reply (bz10399)
+ * Due to having to send a 32 bit time value over the
+ * wire return it as time_t instead of time64_t
+ */
+static inline time_t ldlm_get_rq_timeout(void)
{
- /* Non-AT value */
- unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
+ /* Non-AT value */
+ time_t timeout = min(ldlm_timeout, obd_timeout / 3);
- return timeout < 1 ? 1 : timeout;
+ return timeout < 1 ? 1 : timeout;
}
struct ldlm_bl_pool {
}
static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
-static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds);
+static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t seconds);
/**
* Check if there is a request in the export request list
spin_lock_bh(&waiting_locks_spinlock);
while (!list_empty(&waiting_locks_list)) {
lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
- l_pending_chain);
- if (cfs_time_after(lock->l_callback_timeout,
- cfs_time_current()) ||
- (lock->l_req_mode == LCK_GROUP))
- break;
+ l_pending_chain);
+ if (lock->l_callback_timeout > ktime_get_seconds() ||
+ lock->l_req_mode == LCK_GROUP)
+ break;
/* Check if we need to prolong timeout */
if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT) &&
wake_up(&expired_lock_wait_queue);
}
- /*
- * Make sure the timer will fire again if we have any locks
- * left.
- */
+ /*
+ * Make sure the timer will fire again if we have any locks
+ * left.
+ */
if (!list_empty(&waiting_locks_list)) {
- cfs_time_t timeout_rounded;
+ unsigned long timeout_jiffies;
+
lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
- l_pending_chain);
- timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
- mod_timer(&waiting_locks_timer, timeout_rounded);
- }
+ l_pending_chain);
+ timeout_jiffies = cfs_time_seconds(lock->l_callback_timeout);
+ mod_timer(&waiting_locks_timer, timeout_jiffies);
+ }
spin_unlock_bh(&waiting_locks_spinlock);
}
*
* Called with the namespace lock held.
*/
-static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds)
+static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t seconds)
{
- cfs_time_t timeout;
- cfs_time_t timeout_rounded;
+ unsigned long timeout_jiffies;
+ time64_t timeout;
if (!list_empty(&lock->l_pending_chain))
return 0;
OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
seconds = 1;
- timeout = cfs_time_shift(seconds);
- if (likely(cfs_time_after(timeout, lock->l_callback_timeout)))
+ timeout = ktime_get_seconds() + seconds;
+ if (likely(timeout > lock->l_callback_timeout))
lock->l_callback_timeout = timeout;
- timeout_rounded = round_timeout(lock->l_callback_timeout);
+ timeout_jiffies = cfs_time_seconds(lock->l_callback_timeout);
- if (cfs_time_before(timeout_rounded, waiting_locks_timer.expires) ||
- !timer_pending(&waiting_locks_timer)) {
- mod_timer(&waiting_locks_timer, timeout_rounded);
- }
- /* if the new lock has a shorter timeout than something earlier on
- the list, we'll wait the longer amount of time; no big deal. */
- /* FIFO */
+ if (time_before(timeout_jiffies, waiting_locks_timer.expires) ||
+ !timer_pending(&waiting_locks_timer))
+ mod_timer(&waiting_locks_timer, timeout_jiffies);
+
+ /* if the new lock has a shorter timeout than something earlier on
+ * the list, we'll wait the longer amount of time; no big deal.
+ */
+ /* FIFO */
list_add_tail(&lock->l_pending_chain, &waiting_locks_list);
- return 1;
+ return 1;
}
static void ldlm_add_blocked_lock(struct ldlm_lock *lock)
static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
{
+ time64_t timeout = ldlm_bl_timeout(lock);
int ret;
- int timeout = ldlm_bl_timeout(lock);
/* NB: must be called with hold of lock_res_and_lock() */
LASSERT(ldlm_is_res_locked(lock));
}
if (ldlm_is_destroyed(lock)) {
- static cfs_time_t next;
+ static time64_t next;
spin_unlock_bh(&waiting_locks_spinlock);
LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
- if (cfs_time_after(cfs_time_current(), next)) {
- next = cfs_time_shift(14400);
+ if (ktime_get_seconds() > next) {
+ next = ktime_get_seconds() + 14400;
libcfs_debug_dumpstack(NULL);
}
return 0;
if (ret)
ldlm_add_blocked_lock(lock);
- LDLM_DEBUG(lock, "%sadding to wait list(timeout: %d, AT: %s)",
+ LDLM_DEBUG(lock, "%sadding to wait list(timeout: %lld, AT: %s)",
ret == 0 ? "not re-" : "", timeout,
AT_OFF ? "off" : "on");
return ret;
del_timer(&waiting_locks_timer);
} else {
struct ldlm_lock *next;
+
next = list_entry(list_next, struct ldlm_lock,
- l_pending_chain);
+ l_pending_chain);
mod_timer(&waiting_locks_timer,
- round_timeout(next->l_callback_timeout));
+ cfs_time_seconds(next->l_callback_timeout));
}
}
list_del_init(&lock->l_pending_chain);
*
* Called with namespace lock held.
*/
-int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
+int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, time64_t timeout)
{
if (lock->l_export == NULL) {
/* We don't have a "waiting locks list" on clients. */
RETURN(0);
}
-int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
+int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, time64_t timeout)
{
RETURN(0);
}
*
* \retval timeout in seconds to wait for the client reply
*/
-unsigned int ldlm_bl_timeout(struct ldlm_lock *lock)
+time64_t ldlm_bl_timeout(struct ldlm_lock *lock)
{
- unsigned int timeout;
+ time64_t timeout;
if (AT_OFF)
return obd_timeout / 2;
* It would be nice to have some kind of "early reply" mechanism for
* lock callbacks too... */
timeout = at_get(&lock->l_export->exp_bl_lock_at);
- return max(timeout + (timeout >> 1), ldlm_enqueue_min);
+ return max(timeout + (timeout >> 1), (time64_t)ldlm_enqueue_min);
}
EXPORT_SYMBOL(ldlm_bl_timeout);
static void ldlm_update_resend(struct ptlrpc_request *req, void *data)
{
- struct ldlm_cb_async_args *ca = data;
- struct ldlm_lock *lock = ca->ca_lock;
+ struct ldlm_cb_async_args *ca = data;
+ struct ldlm_lock *lock = ca->ca_lock;
ldlm_refresh_waiting_lock(lock, ldlm_bl_timeout(lock));
}
INIT_LIST_HEAD(&ast_list);
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
- int to = cfs_time_seconds(1);
+ long to = cfs_time_seconds(1);
+
while (to > 0) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(to);
ENTRY;
if (lock->l_conn_export == NULL) {
- static cfs_time_t next_dump = 0, last_dump = 0;
+ static time64_t next_dump, last_dump;
LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago); "
"not entering recovery in server code, just going back to sleep",
(s64)lock->l_last_activity,
(s64)(ktime_get_real_seconds() -
lock->l_last_activity));
- if (cfs_time_after(cfs_time_current(), next_dump)) {
+ if (ktime_get_seconds() > next_dump) {
last_dump = next_dump;
- next_dump = cfs_time_shift(300);
+ next_dump = ktime_get_seconds() + 300;
ldlm_namespace_dump(D_DLMTRACE,
ldlm_lock_to_ns(lock));
if (last_dump == 0)
/* We use the same basis for both server side and client side functions
from a single node. */
-static unsigned int ldlm_cp_timeout(struct ldlm_lock *lock)
+static time64_t ldlm_cp_timeout(struct ldlm_lock *lock)
{
- unsigned int timeout;
+ time64_t timeout;
if (AT_OFF)
return obd_timeout;
* lock from another client. Server will evict the other client if it
* doesn't respond reasonably, and then give us the lock. */
timeout = at_get(ldlm_lock_to_ns_at(lock));
- return max(3 * timeout, ldlm_enqueue_min);
+ return max(3 * timeout, (time64_t) ldlm_enqueue_min);
}
/**
struct obd_device *obd;
struct obd_import *imp = NULL;
struct l_wait_info lwi;
- __u32 timeout;
+ time64_t timeout;
int rc = 0;
ENTRY;
timeout = ldlm_cp_timeout(lock);
lwd.lwd_lock = lock;
- lock->l_last_activity = cfs_time_current_sec();
+ lock->l_last_activity = ktime_get_real_seconds();
if (ldlm_is_no_timeout(lock)) {
LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
lock->l_export = NULL;
lock->l_blocking_ast = einfo->ei_cb_bl;
lock->l_flags |= (*flags & (LDLM_FL_NO_LRU | LDLM_FL_EXCL));
- lock->l_last_activity = cfs_time_current_sec();
+ lock->l_last_activity = ktime_get_real_seconds();
/* lock not sent to server yet */
if (reqp == NULL || *reqp == NULL) {
int scale = NSEC_PER_MSEC;
unsigned long long tmp;
char *buf;
- int err;
/* Did the user ask in seconds or milliseconds. Default is in ms */
buf = strstr(buffer, "ms");
if (buf)
*buf = '\0';
- err = kstrtoull(buffer, 10, &tmp);
- if (err != 0)
+ if (kstrtoull(buffer, 10, &tmp))
return -EINVAL;
ns->ns_max_age = ktime_set(0, tmp * scale);
struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
ns_kobj);
- return sprintf(buf, "%u\n", ns->ns_ctime_age_limit);
+ return sprintf(buf, "%llu\n", ns->ns_ctime_age_limit);
}
static ssize_t ctime_age_limit_store(struct kobject *kobj,
{
struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
ns_kobj);
- unsigned long tmp;
- int err;
+ unsigned long long tmp;
- err = kstrtoul(buffer, 10, &tmp);
- if (err != 0)
+ if (kstrtoull(buffer, 10, &tmp))
return -EINVAL;
ns->ns_ctime_age_limit = tmp;
struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
ns_kobj);
- return sprintf(buf, "%u\n", ns->ns_contention_time);
+ return sprintf(buf, "%llu\n", ns->ns_contention_time);
}
static ssize_t contention_seconds_store(struct kobject *kobj,
{
struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
ns_kobj);
- unsigned long tmp;
- int err;
+ unsigned long long tmp;
- err = kstrtoul(buffer, 10, &tmp);
- if (err != 0)
+ if (kstrtoull(buffer, 10, &tmp))
return -EINVAL;
ns->ns_contention_time = tmp;
ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
ns_is_client(ns) ? "client" : "server");
- if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
+ if (ktime_get_seconds() < ns->ns_next_dump)
return;
cfs_hash_for_each_nolock(ns->ns_rs_hash,
ldlm_res_hash_dump,
(void *)(unsigned long)level, 0);
spin_lock(&ns->ns_lock);
- ns->ns_next_dump = cfs_time_shift(10);
+ ns->ns_next_dump = ktime_get_seconds() + 10;
spin_unlock(&ns->ns_lock);
}
*
* \retval amount of time to extend the timeout with
*/
-static inline int prolong_timeout(struct ptlrpc_request *req)
+static inline time64_t prolong_timeout(struct ptlrpc_request *req)
{
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
- time_t req_timeout;
+ time64_t req_timeout;
if (AT_OFF)
return obd_timeout / 2;
req_timeout = req->rq_deadline - req->rq_arrival_time.tv_sec;
- return max_t(time_t, at_est2timeout(at_get(&svcpt->scp_at_estimate)),
+ return max_t(time64_t, at_est2timeout(at_get(&svcpt->scp_at_estimate)),
req_timeout);
}
list_add_tail(&req->rq_set_chain, &set->set_requests);
req->rq_set = set;
atomic_inc(&set->set_remaining);
- req->rq_queued_time = cfs_time_current();
+ req->rq_queued_time = ktime_get_seconds();
if (req->rq_reqmsg != NULL)
lustre_msg_set_jobid(req->rq_reqmsg, NULL);
* The set takes over the caller's request reference.
*/
req->rq_set = set;
- req->rq_queued_time = cfs_time_current();
+ req->rq_queued_time = ktime_get_seconds();
list_add_tail(&req->rq_set_chain, &set->set_new_requests);
count = atomic_inc_return(&set->set_new_count);
spin_unlock(&set->set_new_req_lock);
lustre_msg_add_op_flags(request->rq_reqmsg,
MSG_CONNECT_TRANSNO);
- DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %d)",
+ DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %ld)",
request->rq_timeout);
ptlrpcd_add_req(request);
rc = 0;
LASSERT(req->rq_phase == RQ_PHASE_NEW);
req->rq_set = new;
- req->rq_queued_time = cfs_time_current();
+ req->rq_queued_time = ktime_get_seconds();
}
spin_lock(&new->set_new_req_lock);
}
if (ldlm_is_ast_sent(lock)) {
- struct ptlrpc_service_part *svc;
- unsigned int timeout;
+ struct ptlrpc_service_part *svc;
+ time64_t timeout;
svc = req->rq_rqbd->rqbd_svcpt;
timeout = at_est2timeout(at_get(&svc->scp_at_estimate));