From e920be6814512b1aa8696ea36d697d3b698c13e8 Mon Sep 17 00:00:00 2001 From: James Simmons Date: Wed, 18 Oct 2017 13:26:00 -0400 Subject: [PATCH] LU-9019 ldlm: migrate the rest of the code to 64 bit time Replace the last cfs_time_current_sec() to avoid the overflow issues in 2038 with ktime_get_real_seconds(). Reduce the jiffies usage to the bare minimum which is useage for mod_timer() and schedule_timeout(). This makes the ldlm totally 64 bit time compliant. Change-Id: Iaee92c17d51fdfc55bd26e9e813e30a6ce794856 Signed-off-by: James Simmons Reviewed-on: https://review.whamcloud.com/29295 Tested-by: Jenkins Tested-by: Maloo Reviewed-by: Dmitry Eremin Reviewed-by: Andreas Dilger Reviewed-by: John L. Hammond Reviewed-by: Oleg Drokin --- lustre/include/lustre_dlm.h | 19 ++++---- lustre/include/lustre_net.h | 12 ++--- lustre/ldlm/ldlm_extent.c | 11 +++-- lustre/ldlm/ldlm_lib.c | 87 ++++++++++++++++++----------------- lustre/ldlm/ldlm_lock.c | 10 ++-- lustre/ldlm/ldlm_lockd.c | 109 ++++++++++++++++++++++---------------------- lustre/ldlm/ldlm_request.c | 18 ++++---- lustre/ldlm/ldlm_resource.c | 24 ++++------ lustre/ofd/ofd_dev.c | 6 +-- lustre/ptlrpc/client.c | 4 +- lustre/ptlrpc/import.c | 2 +- lustre/ptlrpc/ptlrpcd.c | 2 +- lustre/quota/qmt_handler.c | 4 +- 13 files changed, 152 insertions(+), 156 deletions(-) diff --git a/lustre/include/lustre_dlm.h b/lustre/include/lustre_dlm.h index 17b51ae..2feb5e2 100644 --- a/lustre/include/lustre_dlm.h +++ b/lustre/include/lustre_dlm.h @@ -438,14 +438,14 @@ struct ldlm_namespace { * This allows the client to start caching negative dentries * for a directory and may save an RPC for a later stat. */ - unsigned int ns_ctime_age_limit; + time64_t ns_ctime_age_limit; /** * Used to rate-limit ldlm_namespace_dump calls. * \see ldlm_namespace_dump. Increased by 10 seconds every time * it is called. */ - cfs_time_t ns_next_dump; + time64_t ns_next_dump; /** "policy" function that does actual lock conflict determination */ ldlm_res_policy ns_policy; @@ -483,7 +483,7 @@ struct ldlm_namespace { * The resources in this namespace remember contended state during * \a ns_contention_time, in seconds. */ - unsigned ns_contention_time; + time64_t ns_contention_time; /** * Limit size of contended extent locks, in bytes. @@ -871,7 +871,7 @@ struct ldlm_lock { * under this lock. * \see ost_rw_prolong_locks */ - cfs_time_t l_callback_timeout; + time64_t l_callback_timeout; /** Local PID of process which created this lock. */ __u32 l_pid; @@ -980,8 +980,9 @@ struct ldlm_resource { union { /** * When the resource was considered as contended, - * used only on server side. */ - cfs_time_t lr_contention_time; + * used only on server side. + */ + time64_t lr_contention_time; /** * Associated inode, used only on client side. */ @@ -1263,7 +1264,7 @@ struct ldlm_prolong_args { struct ldlm_res_id lpa_resid; struct ldlm_extent lpa_extent; enum ldlm_mode lpa_mode; - int lpa_timeout; + time64_t lpa_timeout; int lpa_locks_cnt; int lpa_blocks_cnt; }; @@ -1312,10 +1313,10 @@ int ldlm_request_cancel(struct ptlrpc_request *req, /** @} ldlm_handlers */ void ldlm_revoke_export_locks(struct obd_export *exp); -unsigned int ldlm_bl_timeout(struct ldlm_lock *lock); +time64_t ldlm_bl_timeout(struct ldlm_lock *lock); #endif int ldlm_del_waiting_lock(struct ldlm_lock *lock); -int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout); +int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, time64_t timeout); int ldlm_get_ref(void); void ldlm_put_ref(void); int ldlm_init_export(struct obd_export *exp); diff --git a/lustre/include/lustre_net.h b/lustre/include/lustre_net.h index 3325ad6..b10ac91 100644 --- a/lustre/include/lustre_net.h +++ b/lustre/include/lustre_net.h @@ -784,9 +784,9 @@ struct ptlrpc_cli_req { /** For bulk requests on client only: bulk descriptor */ struct ptlrpc_bulk_desc *cr_bulk; /** optional time limit for send attempts */ - cfs_duration_t cr_delay_limit; + time64_t cr_delay_limit; /** time request was first queued */ - cfs_time_t cr_queued_time; + time64_t cr_queued_time; /** request sent in nanoseconds */ ktime_t cr_sent_ns; /** time for request really sent out */ @@ -1115,8 +1115,9 @@ struct ptlrpc_request { /** * service time estimate (secs) * If the request is not served by this time, it is marked as timed out. + * Do not change to time64_t since this is transmitted over the wire. */ - int rq_timeout; + time_t rq_timeout; /** * when request/reply sent (secs), or time when request should be sent */ @@ -2592,11 +2593,8 @@ static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req) static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req) { if (req->rq_delay_limit != 0 && - cfs_time_before(cfs_time_add(req->rq_queued_time, - cfs_time_seconds(req->rq_delay_limit)), - cfs_time_current())) { + req->rq_queued_time + req->rq_delay_limit < ktime_get_seconds()) return 1; - } return 0; } diff --git a/lustre/ldlm/ldlm_extent.c b/lustre/ldlm/ldlm_extent.c index 5001b66..4c1ac07 100644 --- a/lustre/ldlm/ldlm_extent.c +++ b/lustre/ldlm/ldlm_extent.c @@ -311,7 +311,7 @@ static void ldlm_extent_policy(struct ldlm_resource *res, static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks) { struct ldlm_resource *res = lock->l_resource; - cfs_time_t now = cfs_time_current(); + time64_t now = ktime_get_seconds(); if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION)) return 1; @@ -319,8 +319,9 @@ static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks) CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks); if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks) res->lr_contention_time = now; - return cfs_time_before(now, cfs_time_add(res->lr_contention_time, - cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time))); + + return now < res->lr_contention_time + + ldlm_res_to_ns(res)->ns_contention_time; } struct ldlm_extent_compat_args { @@ -675,7 +676,7 @@ destroylock: void ldlm_lock_prolong_one(struct ldlm_lock *lock, struct ldlm_prolong_args *arg) { - int timeout; + time64_t timeout; if (arg->lpa_export != lock->l_export || lock->l_flags & LDLM_FL_DESTROYED) @@ -693,7 +694,7 @@ void ldlm_lock_prolong_one(struct ldlm_lock *lock, */ timeout = arg->lpa_timeout + (ldlm_bl_timeout(lock) >> 1); - LDLM_DEBUG(lock, "refreshed to %ds.\n", timeout); + LDLM_DEBUG(lock, "refreshed to %llds.\n", timeout); arg->lpa_blocks_cnt++; diff --git a/lustre/ldlm/ldlm_lib.c b/lustre/ldlm/ldlm_lib.c index 59bc608..0b0d778 100644 --- a/lustre/ldlm/ldlm_lib.c +++ b/lustre/ldlm/ldlm_lib.c @@ -742,12 +742,12 @@ static int target_handle_reconnect(struct lustre_handle *conn, { struct obd_device *target; struct lustre_handle *hdl; - cfs_time_t now; - cfs_time_t deadline; - int timeout; + time64_t deadline; + time64_t timeout; + time64_t now; int rc = 0; - ENTRY; + ENTRY; hdl = &exp->exp_imp_reverse->imp_remote_handle; if (!exp->exp_connection || !lustre_handle_is_used(hdl)) { conn->cookie = exp->exp_handle.h_cookie; @@ -781,16 +781,16 @@ static int target_handle_reconnect(struct lustre_handle *conn, GOTO(out_already, rc); } - now = cfs_time_current(); - deadline = target->obd_recovery_timer.expires; - if (cfs_time_before(now, deadline)) { - struct target_distribute_txn_data *tdtd = - class_exp2tgt(exp)->lut_tdtd; + now = ktime_get_seconds(); + deadline = cfs_duration_sec(target->obd_recovery_timer.expires); + if (now < deadline) { + struct target_distribute_txn_data *tdtd; int size = 0; int count = 0; char *buf = NULL; - timeout = cfs_duration_sec(cfs_time_sub(deadline, now)); + timeout = deadline - now; + tdtd = class_exp2tgt(exp)->lut_tdtd; if (tdtd && tdtd->tdtd_show_update_logs_retrievers) buf = tdtd->tdtd_show_update_logs_retrievers( tdtd->tdtd_show_retrievers_cbdata, @@ -798,7 +798,7 @@ static int target_handle_reconnect(struct lustre_handle *conn, if (count > 0) LCONSOLE_WARN("%s: Recovery already passed deadline " - "%d:%.02d. It is due to DNE recovery " + "%lld:%.02lld. It is due to DNE recovery " "failed/stuck on the %d MDT(s):%s. " "Please wait until all MDTs recovered " "or abort the recovery by force.\n", @@ -807,7 +807,7 @@ static int target_handle_reconnect(struct lustre_handle *conn, buf ? buf : "unknown (not enough RAM)"); else LCONSOLE_WARN("%s: Recovery already passed deadline " - "%d:%.02d. If you do not want to wait " + "%lld:%.02lld. If you do not want to wait " "more, please abort the recovery by " "force.\n", target->obd_name, timeout / 60, timeout % 60); @@ -815,9 +815,9 @@ static int target_handle_reconnect(struct lustre_handle *conn, if (buf != NULL) OBD_FREE(buf, size); } else { - timeout = cfs_duration_sec(cfs_time_sub(now, deadline)); + timeout = now - deadline; LCONSOLE_WARN("%s: Recovery already passed deadline" - " %d:%.02d, It is most likely due to DNE" + " %lld:%.02lld, It is most likely due to DNE" " recovery is failed or stuck, please wait a" " few more minutes or abort the recovery.\n", target->obd_name, timeout / 60, timeout % 60); @@ -1238,27 +1238,26 @@ no_export: /* allow "new" MDT to be connected during recovery, since we * need retrieve recovery update records from it */ if (target->obd_recovering && !lw_client && !mds_mds_conn) { - cfs_time_t t; - int c; /* connected */ - int i; /* in progress */ - int k; /* known */ - int s; /* stale/evicted */ + time64_t t; + int c; /* connected */ + int i; /* in progress */ + int k; /* known */ + int s; /* stale/evicted */ c = atomic_read(&target->obd_connected_clients); i = atomic_read(&target->obd_lock_replay_clients); k = target->obd_max_recoverable_clients; s = target->obd_stale_clients; t = target->obd_recovery_timer.expires; - t = cfs_time_sub(t, cfs_time_current()); - t = cfs_duration_sec(t); + t = cfs_duration_sec(target->obd_recovery_timer.expires); + t -= ktime_get_seconds(); LCONSOLE_WARN("%s: Denying connection for new client %s" "(at %s), waiting for %d known clients " "(%d recovered, %d in progress, and %d " - "evicted) to recover in %d:%.02d\n", + "evicted) to recover in %lld:%.02lld\n", target->obd_name, cluuid.uuid, libcfs_nid2str(req->rq_peer.nid), k, - c - i, i, s, (int)t / 60, - (int)t % 60); + c - i, i, s, t / 60, t % 60); rc = -EBUSY; } else { dont_check_exports: @@ -1697,7 +1696,7 @@ static void target_start_recovery_timer(struct obd_device *obd) } mod_timer(&obd->obd_recovery_timer, - cfs_time_shift(obd->obd_recovery_timeout)); + jiffies + cfs_time_seconds(obd->obd_recovery_timeout)); obd->obd_recovery_start = ktime_get_real_seconds(); spin_unlock(&obd->obd_dev_lock); @@ -1716,7 +1715,7 @@ static void target_start_recovery_timer(struct obd_device *obd) * if @extend is true, extend recovery window to have @drt remaining at least; * otherwise, make sure the recovery timeout value is not less than @drt. */ -static void extend_recovery_timer(struct obd_device *obd, int drt, +static void extend_recovery_timer(struct obd_device *obd, time64_t drt, bool extend) { time64_t now; @@ -1752,7 +1751,7 @@ static void extend_recovery_timer(struct obd_device *obd, int drt, obd->obd_recovery_timeout = to; end = obd->obd_recovery_start + to; mod_timer(&obd->obd_recovery_timer, - cfs_time_shift(end - now)); + jiffies + cfs_time_seconds(end - now)); } spin_unlock(&obd->obd_dev_lock); @@ -1776,7 +1775,7 @@ check_and_start_recovery_timer(struct obd_device *obd, struct ptlrpc_request *req, int new_client) { - int service_time = lustre_msg_get_service_time(req->rq_reqmsg); + time64_t service_time = lustre_msg_get_service_time(req->rq_reqmsg); struct obd_device_target *obt = &obd->u.obt; if (!new_client && service_time) @@ -1788,7 +1787,8 @@ check_and_start_recovery_timer(struct obd_device *obd, target_start_recovery_timer(obd); /* Convert the service time to RPC timeout, - * and reuse service_time to limit stack usage. */ + * and reuse service_time to limit stack usage. + */ service_time = at_est2timeout(service_time); if (OBD_FAIL_CHECK(OBD_FAIL_TGT_SLUGGISH_NET) && @@ -2131,7 +2131,7 @@ static void handle_recovery_req(struct ptlrpc_thread *thread, /* don't reset timer for final stage */ if (!exp_finished(req->rq_export)) { - int to = obd_timeout; + time64_t to = obd_timeout; /** * Add request timeout to the recovery time so next request from @@ -3123,10 +3123,10 @@ static inline const char *bulk2type(struct ptlrpc_request *req) int target_bulk_io(struct obd_export *exp, struct ptlrpc_bulk_desc *desc, struct l_wait_info *lwi) { - struct ptlrpc_request *req = desc->bd_req; - time_t start = cfs_time_current_sec(); - time_t deadline; - int rc = 0; + struct ptlrpc_request *req = desc->bd_req; + time64_t start = ktime_get_real_seconds(); + time64_t deadline; + int rc = 0; ENTRY; @@ -3173,12 +3173,13 @@ int target_bulk_io(struct obd_export *exp, struct ptlrpc_bulk_desc *desc, deadline = req->rq_deadline; do { - long timeoutl = deadline - cfs_time_current_sec(); - cfs_duration_t timeout = timeoutl <= 0 ? - CFS_TICK : cfs_time_seconds(timeoutl); - time_t rq_deadline; + time64_t timeoutl = deadline - ktime_get_real_seconds(); + long timeout_jiffies = timeoutl <= 0 ? + 1 : cfs_time_seconds(timeoutl); + time64_t rq_deadline; - *lwi = LWI_TIMEOUT_INTERVAL(timeout, cfs_time_seconds(1), + *lwi = LWI_TIMEOUT_INTERVAL(timeout_jiffies, + cfs_time_seconds(1), target_bulk_timeout, desc); rc = l_wait_event(desc->bd_waitq, !ptlrpc_server_bulk_active(desc) || @@ -3192,13 +3193,13 @@ int target_bulk_io(struct obd_export *exp, struct ptlrpc_bulk_desc *desc, deadline = start + bulk_timeout; if (deadline > rq_deadline) deadline = rq_deadline; - } while ((rc == -ETIMEDOUT) && - (deadline > cfs_time_current_sec())); + } while (rc == -ETIMEDOUT && + deadline > ktime_get_real_seconds()); if (rc == -ETIMEDOUT) { - DEBUG_REQ(D_ERROR, req, "timeout on bulk %s after %ld%+lds", + DEBUG_REQ(D_ERROR, req, "timeout on bulk %s after %lld%+llds", bulk2type(req), deadline - start, - cfs_time_current_sec() - deadline); + ktime_get_real_seconds() - deadline); ptlrpc_abort_bulk(desc); } else if (exp->exp_failed) { DEBUG_REQ(D_ERROR, req, "Eviction on bulk %s", diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index 8d427ec..472dde5 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -2774,7 +2774,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, libcfs_debug_vmsg2(msgdata, fmt, args, " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s " "res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s " - "remote: %#llx expref: %d pid: %u timeout: %lu " + "remote: %#llx expref: %d pid: %u timeout: %lld " "lvb_type: %d\n", lock, lock->l_handle.h_cookie, atomic_read(&lock->l_refc), @@ -2794,7 +2794,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s " "res: "DLDLMRES" rrc: %d type: %s [%llu->%llu] " "(req %llu->%llu) flags: %#llx nid: %s remote: " - "%#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n", + "%#llx expref: %d pid: %u timeout: %lld lvb_type: %d\n", ldlm_lock_to_ns_name(lock), lock, lock->l_handle.h_cookie, atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers, @@ -2817,7 +2817,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s " "res: "DLDLMRES" rrc: %d type: %s pid: %d " "[%llu->%llu] flags: %#llx nid: %s " - "remote: %#llx expref: %d pid: %u timeout: %lu\n", + "remote: %#llx expref: %d pid: %u timeout: %lld\n", ldlm_lock_to_ns_name(lock), lock, lock->l_handle.h_cookie, atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers, @@ -2839,7 +2839,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s " "res: "DLDLMRES" bits %#llx/%#llx rrc: %d type: %s " "flags: %#llx nid: %s remote: %#llx expref: %d " - "pid: %u timeout: %lu lvb_type: %d\n", + "pid: %u timeout: %lld lvb_type: %d\n", ldlm_lock_to_ns_name(lock), lock, lock->l_handle.h_cookie, atomic_read(&lock->l_refc), @@ -2862,7 +2862,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s " "res: "DLDLMRES" rrc: %d type: %s flags: %#llx " "nid: %s remote: %#llx expref: %d pid: %u " - "timeout: %lu lvb_type: %d\n", + "timeout: %lld lvb_type: %d\n", ldlm_lock_to_ns_name(lock), lock, lock->l_handle.h_cookie, atomic_read(&lock->l_refc), diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c index 5d3b781..0584a0f 100644 --- a/lustre/ldlm/ldlm_lockd.c +++ b/lustre/ldlm/ldlm_lockd.c @@ -64,18 +64,16 @@ struct kset *ldlm_svc_kset; static struct ldlm_state *ldlm_state; -static inline cfs_time_t round_timeout(cfs_time_t timeout) -{ - return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1); -} - -/* timeout for initial callback (AST) reply (bz10399) */ -static inline unsigned int ldlm_get_rq_timeout(void) +/* timeout for initial callback (AST) reply (bz10399) + * Due to having to send a 32 bit time value over the + * wire return it as time_t instead of time64_t + */ +static inline time_t ldlm_get_rq_timeout(void) { - /* Non-AT value */ - unsigned int timeout = min(ldlm_timeout, obd_timeout / 3); + /* Non-AT value */ + time_t timeout = min(ldlm_timeout, obd_timeout / 3); - return timeout < 1 ? 1 : timeout; + return timeout < 1 ? 1 : timeout; } struct ldlm_bl_pool { @@ -259,7 +257,7 @@ static int expired_lock_main(void *arg) } static int ldlm_add_waiting_lock(struct ldlm_lock *lock); -static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds); +static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t seconds); /** * Check if there is a request in the export request list @@ -296,11 +294,10 @@ static void waiting_locks_callback(unsigned long unused) spin_lock_bh(&waiting_locks_spinlock); while (!list_empty(&waiting_locks_list)) { lock = list_entry(waiting_locks_list.next, struct ldlm_lock, - l_pending_chain); - if (cfs_time_after(lock->l_callback_timeout, - cfs_time_current()) || - (lock->l_req_mode == LCK_GROUP)) - break; + l_pending_chain); + if (lock->l_callback_timeout > ktime_get_seconds() || + lock->l_req_mode == LCK_GROUP) + break; /* Check if we need to prolong timeout */ if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT) && @@ -348,17 +345,18 @@ static void waiting_locks_callback(unsigned long unused) wake_up(&expired_lock_wait_queue); } - /* - * Make sure the timer will fire again if we have any locks - * left. - */ + /* + * Make sure the timer will fire again if we have any locks + * left. + */ if (!list_empty(&waiting_locks_list)) { - cfs_time_t timeout_rounded; + unsigned long timeout_jiffies; + lock = list_entry(waiting_locks_list.next, struct ldlm_lock, - l_pending_chain); - timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout); - mod_timer(&waiting_locks_timer, timeout_rounded); - } + l_pending_chain); + timeout_jiffies = cfs_time_seconds(lock->l_callback_timeout); + mod_timer(&waiting_locks_timer, timeout_jiffies); + } spin_unlock_bh(&waiting_locks_spinlock); } @@ -374,10 +372,10 @@ static void waiting_locks_callback(unsigned long unused) * * Called with the namespace lock held. */ -static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds) +static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t seconds) { - cfs_time_t timeout; - cfs_time_t timeout_rounded; + unsigned long timeout_jiffies; + time64_t timeout; if (!list_empty(&lock->l_pending_chain)) return 0; @@ -386,21 +384,22 @@ static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds) OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT)) seconds = 1; - timeout = cfs_time_shift(seconds); - if (likely(cfs_time_after(timeout, lock->l_callback_timeout))) + timeout = ktime_get_seconds() + seconds; + if (likely(timeout > lock->l_callback_timeout)) lock->l_callback_timeout = timeout; - timeout_rounded = round_timeout(lock->l_callback_timeout); + timeout_jiffies = cfs_time_seconds(lock->l_callback_timeout); - if (cfs_time_before(timeout_rounded, waiting_locks_timer.expires) || - !timer_pending(&waiting_locks_timer)) { - mod_timer(&waiting_locks_timer, timeout_rounded); - } - /* if the new lock has a shorter timeout than something earlier on - the list, we'll wait the longer amount of time; no big deal. */ - /* FIFO */ + if (time_before(timeout_jiffies, waiting_locks_timer.expires) || + !timer_pending(&waiting_locks_timer)) + mod_timer(&waiting_locks_timer, timeout_jiffies); + + /* if the new lock has a shorter timeout than something earlier on + * the list, we'll wait the longer amount of time; no big deal. + */ + /* FIFO */ list_add_tail(&lock->l_pending_chain, &waiting_locks_list); - return 1; + return 1; } static void ldlm_add_blocked_lock(struct ldlm_lock *lock) @@ -427,8 +426,8 @@ static void ldlm_add_blocked_lock(struct ldlm_lock *lock) static int ldlm_add_waiting_lock(struct ldlm_lock *lock) { + time64_t timeout = ldlm_bl_timeout(lock); int ret; - int timeout = ldlm_bl_timeout(lock); /* NB: must be called with hold of lock_res_and_lock() */ LASSERT(ldlm_is_res_locked(lock)); @@ -447,12 +446,12 @@ static int ldlm_add_waiting_lock(struct ldlm_lock *lock) } if (ldlm_is_destroyed(lock)) { - static cfs_time_t next; + static time64_t next; spin_unlock_bh(&waiting_locks_spinlock); LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)"); - if (cfs_time_after(cfs_time_current(), next)) { - next = cfs_time_shift(14400); + if (ktime_get_seconds() > next) { + next = ktime_get_seconds() + 14400; libcfs_debug_dumpstack(NULL); } return 0; @@ -471,7 +470,7 @@ static int ldlm_add_waiting_lock(struct ldlm_lock *lock) if (ret) ldlm_add_blocked_lock(lock); - LDLM_DEBUG(lock, "%sadding to wait list(timeout: %d, AT: %s)", + LDLM_DEBUG(lock, "%sadding to wait list(timeout: %lld, AT: %s)", ret == 0 ? "not re-" : "", timeout, AT_OFF ? "off" : "on"); return ret; @@ -501,10 +500,11 @@ static int __ldlm_del_waiting_lock(struct ldlm_lock *lock) del_timer(&waiting_locks_timer); } else { struct ldlm_lock *next; + next = list_entry(list_next, struct ldlm_lock, - l_pending_chain); + l_pending_chain); mod_timer(&waiting_locks_timer, - round_timeout(next->l_callback_timeout)); + cfs_time_seconds(next->l_callback_timeout)); } } list_del_init(&lock->l_pending_chain); @@ -547,7 +547,7 @@ int ldlm_del_waiting_lock(struct ldlm_lock *lock) * * Called with namespace lock held. */ -int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout) +int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, time64_t timeout) { if (lock->l_export == NULL) { /* We don't have a "waiting locks list" on clients. */ @@ -587,7 +587,7 @@ int ldlm_del_waiting_lock(struct ldlm_lock *lock) RETURN(0); } -int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout) +int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, time64_t timeout) { RETURN(0); } @@ -605,9 +605,9 @@ int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout) * * \retval timeout in seconds to wait for the client reply */ -unsigned int ldlm_bl_timeout(struct ldlm_lock *lock) +time64_t ldlm_bl_timeout(struct ldlm_lock *lock) { - unsigned int timeout; + time64_t timeout; if (AT_OFF) return obd_timeout / 2; @@ -617,7 +617,7 @@ unsigned int ldlm_bl_timeout(struct ldlm_lock *lock) * It would be nice to have some kind of "early reply" mechanism for * lock callbacks too... */ timeout = at_get(&lock->l_export->exp_bl_lock_at); - return max(timeout + (timeout >> 1), ldlm_enqueue_min); + return max(timeout + (timeout >> 1), (time64_t)ldlm_enqueue_min); } EXPORT_SYMBOL(ldlm_bl_timeout); @@ -773,8 +773,8 @@ static int ldlm_cb_interpret(const struct lu_env *env, static void ldlm_update_resend(struct ptlrpc_request *req, void *data) { - struct ldlm_cb_async_args *ca = data; - struct ldlm_lock *lock = ca->ca_lock; + struct ldlm_cb_async_args *ca = data; + struct ldlm_lock *lock = ca->ca_lock; ldlm_refresh_waiting_lock(lock, ldlm_bl_timeout(lock)); } @@ -1773,7 +1773,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, INIT_LIST_HEAD(&ast_list); if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) { - int to = cfs_time_seconds(1); + long to = cfs_time_seconds(1); + while (to > 0) { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(to); diff --git a/lustre/ldlm/ldlm_request.c b/lustre/ldlm/ldlm_request.c index fb67a1e..43da254 100644 --- a/lustre/ldlm/ldlm_request.c +++ b/lustre/ldlm/ldlm_request.c @@ -120,16 +120,16 @@ int ldlm_expired_completion_wait(void *data) ENTRY; if (lock->l_conn_export == NULL) { - static cfs_time_t next_dump = 0, last_dump = 0; + static time64_t next_dump, last_dump; LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago); " "not entering recovery in server code, just going back to sleep", (s64)lock->l_last_activity, (s64)(ktime_get_real_seconds() - lock->l_last_activity)); - if (cfs_time_after(cfs_time_current(), next_dump)) { + if (ktime_get_seconds() > next_dump) { last_dump = next_dump; - next_dump = cfs_time_shift(300); + next_dump = ktime_get_seconds() + 300; ldlm_namespace_dump(D_DLMTRACE, ldlm_lock_to_ns(lock)); if (last_dump == 0) @@ -161,9 +161,9 @@ int ldlm_expired_completion_wait(void *data) /* We use the same basis for both server side and client side functions from a single node. */ -static unsigned int ldlm_cp_timeout(struct ldlm_lock *lock) +static time64_t ldlm_cp_timeout(struct ldlm_lock *lock) { - unsigned int timeout; + time64_t timeout; if (AT_OFF) return obd_timeout; @@ -172,7 +172,7 @@ static unsigned int ldlm_cp_timeout(struct ldlm_lock *lock) * lock from another client. Server will evict the other client if it * doesn't respond reasonably, and then give us the lock. */ timeout = at_get(ldlm_lock_to_ns_at(lock)); - return max(3 * timeout, ldlm_enqueue_min); + return max(3 * timeout, (time64_t) ldlm_enqueue_min); } /** @@ -255,7 +255,7 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) struct obd_device *obd; struct obd_import *imp = NULL; struct l_wait_info lwi; - __u32 timeout; + time64_t timeout; int rc = 0; ENTRY; @@ -284,7 +284,7 @@ noreproc: timeout = ldlm_cp_timeout(lock); lwd.lwd_lock = lock; - lock->l_last_activity = cfs_time_current_sec(); + lock->l_last_activity = ktime_get_real_seconds(); if (ldlm_is_no_timeout(lock)) { LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT"); @@ -946,7 +946,7 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, lock->l_export = NULL; lock->l_blocking_ast = einfo->ei_cb_bl; lock->l_flags |= (*flags & (LDLM_FL_NO_LRU | LDLM_FL_EXCL)); - lock->l_last_activity = cfs_time_current_sec(); + lock->l_last_activity = ktime_get_real_seconds(); /* lock not sent to server yet */ if (reqp == NULL || *reqp == NULL) { diff --git a/lustre/ldlm/ldlm_resource.c b/lustre/ldlm/ldlm_resource.c index 41e7c1e..ccb832b 100644 --- a/lustre/ldlm/ldlm_resource.c +++ b/lustre/ldlm/ldlm_resource.c @@ -409,7 +409,6 @@ static ssize_t lru_max_age_store(struct kobject *kobj, struct attribute *attr, int scale = NSEC_PER_MSEC; unsigned long long tmp; char *buf; - int err; /* Did the user ask in seconds or milliseconds. Default is in ms */ buf = strstr(buffer, "ms"); @@ -422,8 +421,7 @@ static ssize_t lru_max_age_store(struct kobject *kobj, struct attribute *attr, if (buf) *buf = '\0'; - err = kstrtoull(buffer, 10, &tmp); - if (err != 0) + if (kstrtoull(buffer, 10, &tmp)) return -EINVAL; ns->ns_max_age = ktime_set(0, tmp * scale); @@ -471,7 +469,7 @@ static ssize_t ctime_age_limit_show(struct kobject *kobj, struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace, ns_kobj); - return sprintf(buf, "%u\n", ns->ns_ctime_age_limit); + return sprintf(buf, "%llu\n", ns->ns_ctime_age_limit); } static ssize_t ctime_age_limit_store(struct kobject *kobj, @@ -480,11 +478,9 @@ static ssize_t ctime_age_limit_store(struct kobject *kobj, { struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace, ns_kobj); - unsigned long tmp; - int err; + unsigned long long tmp; - err = kstrtoul(buffer, 10, &tmp); - if (err != 0) + if (kstrtoull(buffer, 10, &tmp)) return -EINVAL; ns->ns_ctime_age_limit = tmp; @@ -537,7 +533,7 @@ static ssize_t contention_seconds_show(struct kobject *kobj, struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace, ns_kobj); - return sprintf(buf, "%u\n", ns->ns_contention_time); + return sprintf(buf, "%llu\n", ns->ns_contention_time); } static ssize_t contention_seconds_store(struct kobject *kobj, @@ -546,11 +542,9 @@ static ssize_t contention_seconds_store(struct kobject *kobj, { struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace, ns_kobj); - unsigned long tmp; - int err; + unsigned long long tmp; - err = kstrtoul(buffer, 10, &tmp); - if (err != 0) + if (kstrtoull(buffer, 10, &tmp)) return -EINVAL; ns->ns_contention_time = tmp; @@ -1654,14 +1648,14 @@ void ldlm_namespace_dump(int level, struct ldlm_namespace *ns) ldlm_ns_name(ns), atomic_read(&ns->ns_bref), ns_is_client(ns) ? "client" : "server"); - if (cfs_time_before(cfs_time_current(), ns->ns_next_dump)) + if (ktime_get_seconds() < ns->ns_next_dump) return; cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_res_hash_dump, (void *)(unsigned long)level, 0); spin_lock(&ns->ns_lock); - ns->ns_next_dump = cfs_time_shift(10); + ns->ns_next_dump = ktime_get_seconds() + 10; spin_unlock(&ns->ns_lock); } diff --git a/lustre/ofd/ofd_dev.c b/lustre/ofd/ofd_dev.c index 02cf82a..4cf5e53 100644 --- a/lustre/ofd/ofd_dev.c +++ b/lustre/ofd/ofd_dev.c @@ -2364,16 +2364,16 @@ static int ofd_quotactl(struct tgt_session_info *tsi) * * \retval amount of time to extend the timeout with */ -static inline int prolong_timeout(struct ptlrpc_request *req) +static inline time64_t prolong_timeout(struct ptlrpc_request *req) { struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt; - time_t req_timeout; + time64_t req_timeout; if (AT_OFF) return obd_timeout / 2; req_timeout = req->rq_deadline - req->rq_arrival_time.tv_sec; - return max_t(time_t, at_est2timeout(at_get(&svcpt->scp_at_estimate)), + return max_t(time64_t, at_est2timeout(at_get(&svcpt->scp_at_estimate)), req_timeout); } diff --git a/lustre/ptlrpc/client.c b/lustre/ptlrpc/client.c index 2766186..1c05e76 100644 --- a/lustre/ptlrpc/client.c +++ b/lustre/ptlrpc/client.c @@ -1088,7 +1088,7 @@ void ptlrpc_set_add_req(struct ptlrpc_request_set *set, list_add_tail(&req->rq_set_chain, &set->set_requests); req->rq_set = set; atomic_inc(&set->set_remaining); - req->rq_queued_time = cfs_time_current(); + req->rq_queued_time = ktime_get_seconds(); if (req->rq_reqmsg != NULL) lustre_msg_set_jobid(req->rq_reqmsg, NULL); @@ -1119,7 +1119,7 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, * The set takes over the caller's request reference. */ req->rq_set = set; - req->rq_queued_time = cfs_time_current(); + req->rq_queued_time = ktime_get_seconds(); list_add_tail(&req->rq_set_chain, &set->set_new_requests); count = atomic_inc_return(&set->set_new_count); spin_unlock(&set->set_new_req_lock); diff --git a/lustre/ptlrpc/import.c b/lustre/ptlrpc/import.c index c31004a..46908ba 100644 --- a/lustre/ptlrpc/import.c +++ b/lustre/ptlrpc/import.c @@ -764,7 +764,7 @@ int ptlrpc_connect_import(struct obd_import *imp) lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_TRANSNO); - DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %d)", + DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %ld)", request->rq_timeout); ptlrpcd_add_req(request); rc = 0; diff --git a/lustre/ptlrpc/ptlrpcd.c b/lustre/ptlrpc/ptlrpcd.c index dc9f948..8a9f049 100644 --- a/lustre/ptlrpc/ptlrpcd.c +++ b/lustre/ptlrpc/ptlrpcd.c @@ -212,7 +212,7 @@ void ptlrpcd_add_rqset(struct ptlrpc_request_set *set) LASSERT(req->rq_phase == RQ_PHASE_NEW); req->rq_set = new; - req->rq_queued_time = cfs_time_current(); + req->rq_queued_time = ktime_get_seconds(); } spin_lock(&new->set_new_req_lock); diff --git a/lustre/quota/qmt_handler.c b/lustre/quota/qmt_handler.c index e0c99ad..52963e1 100644 --- a/lustre/quota/qmt_handler.c +++ b/lustre/quota/qmt_handler.c @@ -638,8 +638,8 @@ static int qmt_dqacq(const struct lu_env *env, struct lu_device *ld, } if (ldlm_is_ast_sent(lock)) { - struct ptlrpc_service_part *svc; - unsigned int timeout; + struct ptlrpc_service_part *svc; + time64_t timeout; svc = req->rq_rqbd->rqbd_svcpt; timeout = at_est2timeout(at_get(&svc->scp_at_estimate)); -- 1.8.3.1