Replace cfs_time_current_sec() to avoid the overflow
issues in 2038 with ktime_get_real_seconds(). Mirgate
the rest of the gss code to time64_t to avoid the 2038
overflow issue.
Currently in encrypt_page_pools we are reporting the jiffy
cycles for "max wait time" which not only doesn't make
sense but can vary from platform to platform. Instead we
will report in terms of milliseconds. That requires changing
epp_st_max_wait into ktime_t since we need better than
seconds precision. Lastly the time in encrypt_page_pools for
"last access" and "last shrink" was showing up negative.
This was due to epp_last_* field being set to the number of
seconds since the epoch instead of the number of seconds
since the node booted. Change epp_last_* to being set by
ktime_get_seconds() instead of ktime_get_real_seconds()
resolves this problem.
Test-Parameters: trivial
Change-Id: Ia2d559454287675699a067121760543a2e6877da
Signed-off-by: James Simmons <uja.ornl@yahoo.com>
Reviewed-on: https://review.whamcloud.com/29859
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: John L. Hammond <john.hammond@intel.com>
Reviewed-by: Sebastien Buisson <sbuisson@ddn.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
14 files changed:
]) # LIBCFS_KTIME_GET_REAL_SECONDS
#
]) # LIBCFS_KTIME_GET_REAL_SECONDS
#
+# Kernel version 3.17 created ktime_get_ns wrapper
+#
+AC_DEFUN([LIBCFS_KTIME_GET_NS],[
+LB_CHECK_COMPILE([does function 'ktime_get_ns' exist],
+ktime_get_ns, [
+ #include <linux/ktime.h>
+],[
+ u64 nanoseconds;
+
+ nanoseconds = ktime_get_ns();
+],[],[
+ AC_DEFINE(NEED_KTIME_GET_NS, 1,
+ ['ktime_get_ns' is not available])
+])
+]) # LIBCFS_KTIME_GET_NS
+
+#
# Kernel version 3.17 created ktime_get_real_ns wrapper
#
AC_DEFUN([LIBCFS_KTIME_GET_REAL_NS],[
# Kernel version 3.17 created ktime_get_real_ns wrapper
#
AC_DEFUN([LIBCFS_KTIME_GET_REAL_NS],[
# 3.17
LIBCFS_HLIST_ADD_AFTER
LIBCFS_TIMESPEC64
# 3.17
LIBCFS_HLIST_ADD_AFTER
LIBCFS_TIMESPEC64
LIBCFS_KTIME_GET_REAL_TS64
LIBCFS_KTIME_GET_REAL_SECONDS
LIBCFS_KTIME_GET_REAL_NS
LIBCFS_KTIME_GET_REAL_TS64
LIBCFS_KTIME_GET_REAL_SECONDS
LIBCFS_KTIME_GET_REAL_NS
time64_t ktime_get_seconds(void);
#endif /* HAVE_KTIME_GET_SECONDS */
time64_t ktime_get_seconds(void);
#endif /* HAVE_KTIME_GET_SECONDS */
+#ifdef NEED_KTIME_GET_NS
+static inline u64 ktime_get_ns(void)
+{
+ return ktime_to_ns(ktime_get());
+}
+#endif /* NEED_KTIME_GET_NS */
+
#ifdef NEED_KTIME_GET_REAL_NS
static inline u64 ktime_get_real_ns(void)
{
#ifdef NEED_KTIME_GET_REAL_NS
static inline u64 ktime_get_real_ns(void)
{
atomic_t cc_refcount;
struct ptlrpc_sec *cc_sec;
struct ptlrpc_ctx_ops *cc_ops;
atomic_t cc_refcount;
struct ptlrpc_sec *cc_sec;
struct ptlrpc_ctx_ops *cc_ops;
- cfs_time_t cc_expire; /* in seconds */
+ time64_t cc_expire; /* in seconds */
unsigned int cc_early_expire:1;
unsigned long cc_flags;
struct vfs_cred cc_vcred;
unsigned int cc_early_expire:1;
unsigned long cc_flags;
struct vfs_cred cc_vcred;
struct gss_ctx **ctx_new);
__u32 lgss_inquire_context(
struct gss_ctx *ctx,
struct gss_ctx **ctx_new);
__u32 lgss_inquire_context(
struct gss_ctx *ctx,
- unsigned long *endtime);
__u32 lgss_get_mic(
struct gss_ctx *ctx,
int msgcnt,
__u32 lgss_get_mic(
struct gss_ctx *ctx,
int msgcnt,
struct gss_ctx *ctx_new);
__u32 (*gss_inquire_context)(
struct gss_ctx *ctx,
struct gss_ctx *ctx_new);
__u32 (*gss_inquire_context)(
struct gss_ctx *ctx,
- unsigned long *endtime);
__u32 (*gss_get_mic)(
struct gss_ctx *ctx,
int msgcnt,
__u32 (*gss_get_mic)(
struct gss_ctx *ctx,
int msgcnt,
*/
#define GSS_GC_INTERVAL (60 * 60) /* 60 minutes */
*/
#define GSS_GC_INTERVAL (60 * 60) /* 60 minutes */
-static inline
-unsigned long gss_round_ctx_expiry(unsigned long expiry,
- unsigned long sec_flags)
+static inline time64_t gss_round_ctx_expiry(time64_t expiry,
+ unsigned long sec_flags)
- if (sec_flags & PTLRPC_SEC_FL_REVERSE)
- return expiry;
+ if (sec_flags & PTLRPC_SEC_FL_REVERSE)
+ return expiry;
- if (get_seconds() + __TIMEOUT_DELTA <= expiry)
- return expiry - __TIMEOUT_DELTA;
+ if (ktime_get_real_seconds() + __TIMEOUT_DELTA <= expiry)
+ return expiry - __TIMEOUT_DELTA;
key_revoke_locked(key);
}
key_revoke_locked(key);
}
-static void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, long timeout)
+static void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, time64_t timeout)
{
struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
{
struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
- struct timer_list *timer = gctx_kr->gck_timer;
+ struct timer_list *timer = gctx_kr->gck_timer;
- CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout);
- timeout = msecs_to_jiffies(timeout * MSEC_PER_SEC) +
- cfs_time_current();
+ CDEBUG(D_SEC, "ctx %p: start timer %llds\n", ctx, timeout);
- timer->expires = timeout;
+ timer->expires = cfs_time_seconds(timeout) + jiffies;
timer->data = (unsigned long ) ctx;
timer->function = ctx_upcall_timeout_kr;
timer->data = (unsigned long ) ctx;
timer->function = ctx_upcall_timeout_kr;
- ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT;
+ ctx->cc_expire = ktime_get_real_seconds() + KEYRING_UPCALL_TIMEOUT;
clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
atomic_inc(&ctx->cc_refcount); /* for the caller */
clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
atomic_inc(&ctx->cc_refcount); /* for the caller */
struct ptlrpc_cli_ctx *new_ctx,
struct key *key)
{
struct ptlrpc_cli_ctx *new_ctx,
struct key *key)
{
- struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- struct hlist_node __maybe_unused *hnode;
- struct ptlrpc_cli_ctx *ctx;
- cfs_time_t now;
- ENTRY;
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct hlist_node __maybe_unused *hnode;
+ struct ptlrpc_cli_ctx *ctx;
+ time64_t now;
- LASSERT(sec_is_reverse(sec));
+ ENTRY;
+ LASSERT(sec_is_reverse(sec));
spin_lock(&sec->ps_lock);
spin_lock(&sec->ps_lock);
- now = cfs_time_current_sec();
+ now = ktime_get_real_seconds();
/* set all existing ctxs short expiry */
cfs_hlist_for_each_entry(ctx, hnode, &gsec_kr->gsk_clist, cc_cache) {
/* set all existing ctxs short expiry */
cfs_hlist_for_each_entry(ctx, hnode, &gsec_kr->gsk_clist, cc_cache) {
static
int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
{
static
int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
{
- struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- struct hlist_node __maybe_unused *pos, *next;
- struct ptlrpc_cli_ctx *ctx;
- struct gss_cli_ctx *gctx;
- time_t now = cfs_time_current_sec();
- ENTRY;
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct hlist_node __maybe_unused *pos, *next;
+ struct ptlrpc_cli_ctx *ctx;
+ struct gss_cli_ctx *gctx;
+ time64_t now = ktime_get_real_seconds();
spin_lock(&sec->ps_lock);
cfs_hlist_for_each_entry_safe(ctx, pos, next,
&gsec_kr->gsk_clist, cc_cache) {
spin_lock(&sec->ps_lock);
cfs_hlist_for_each_entry_safe(ctx, pos, next,
&gsec_kr->gsk_clist, cc_cache) {
snprintf(mech, sizeof(mech), "N/A");
mech[sizeof(mech) - 1] = '\0';
snprintf(mech, sizeof(mech), "N/A");
mech[sizeof(mech) - 1] = '\0';
- seq_printf(seq, "%p: uid %u, ref %d, expire %lu(%+ld), fl %s, "
- "seq %d, win %u, key %08x(ref %d), "
- "hdl %#llx:%#llx, mech: %s\n",
+ seq_printf(seq,
+ "%p: uid %u, ref %d, expire %lld(%+lld), fl %s, seq %d, win %u, key %08x(ref %d), hdl %#llx:%#llx, mech: %s\n",
ctx, ctx->cc_vcred.vc_uid,
atomic_read(&ctx->cc_refcount),
ctx->cc_expire,
ctx, ctx->cc_vcred.vc_uid,
atomic_read(&ctx->cc_refcount),
ctx->cc_expire,
kc_cfx:1,
kc_seed_init:1,
kc_have_acceptor_subkey:1;
kc_cfx:1,
kc_seed_init:1,
kc_have_acceptor_subkey:1;
__u8 kc_seed[16];
__u64 kc_seq_send;
__u64 kc_seq_recv;
__u8 kc_seed[16];
__u64 kc_seq_send;
__u64 kc_seq_recv;
gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
goto out_err;
gss_get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
goto out_err;
- /* end time */
- if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
+ /* end time. While kc_endtime might be 64 bit the krb5 API
+ * still uses 32 bits. To delay the 2038 bug see the incoming
+ * value as a u32 which give us until 2106. See the link for details:
+ *
+ * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
+ */
+ if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
goto out_err;
/* seq send */
goto out_err;
/* seq send */
{
unsigned int tmp_uint, keysize;
{
unsigned int tmp_uint, keysize;
- /* end time */
- if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
+ /* end time. While kc_endtime might be 64 bit the krb5 API
+ * still uses 32 bits. To delay the 2038 bug see the incoming
+ * value as a u32 which give us until 2106. See the link for details:
+ *
+ * http://web.mit.edu/kerberos/www/krb5-current/doc/appdev/y2038.html
+ */
+ if (gss_get_bytes(&p, end, &kctx->kc_endtime, sizeof(u32)))
goto out_err;
/* flags */
goto out_err;
/* flags */
static
__u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
static
__u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
- unsigned long *endtime)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
- *endtime = (unsigned long)((__u32) kctx->kc_endtime);
+ *endtime = kctx->kc_endtime;
* this interface is much simplified, currently we only need endtime.
*/
__u32 lgss_inquire_context(struct gss_ctx *context_handle,
* this interface is much simplified, currently we only need endtime.
*/
__u32 lgss_inquire_context(struct gss_ctx *context_handle,
- unsigned long *endtime)
{
LASSERT(context_handle);
LASSERT(context_handle->mech_type);
{
LASSERT(context_handle);
LASSERT(context_handle->mech_type);
static
__u32 gss_inquire_context_null(struct gss_ctx *gss_context,
static
__u32 gss_inquire_context_null(struct gss_ctx *gss_context,
- unsigned long *endtime)
{
/* quick timeout for testing purposes */
{
/* quick timeout for testing purposes */
- *endtime = cfs_time_current_sec() + 60;
+ *endtime = ktime_get_real_seconds() + 60;
CERROR("Failed to read context expiration time");
return -1;
}
CERROR("Failed to read context expiration time");
return -1;
}
- skc->sc_expire = tmp + cfs_time_current_sec();
+ skc->sc_expire = tmp + ktime_get_real_seconds();
/* 5. host random is used as nonce for encryption */
if (gss_get_bytes(&ptr, end, &skc->sc_host_random,
/* 5. host random is used as nonce for encryption */
if (gss_get_bytes(&ptr, end, &skc->sc_host_random,
static
__u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
static
__u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
- unsigned long *endtime)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
/* currently the expiry time passed down from user-space
* is invalid, here we retrive it from mech.
*/
/* currently the expiry time passed down from user-space
* is invalid, here we retrive it from mech.
*/
- if (lgss_inquire_context(rsci.ctx.gsc_mechctx,
- (unsigned long *)&ctx_expiry)) {
+ if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
CERROR("unable to get expire time, drop it\n");
goto out;
}
CERROR("unable to get expire time, drop it\n");
goto out;
}
struct gss_cli_ctx *gctx)
{
struct rsc rsci, *rscp = NULL;
struct gss_cli_ctx *gctx)
{
struct rsc rsci, *rscp = NULL;
- unsigned long ctx_expiry;
__u32 major;
int rc;
ENTRY;
__u32 major;
int rc;
ENTRY;
int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle)
{
int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle)
{
- const cfs_time_t expire = 20;
- struct rsc *rscp;
+ const time64_t expire = 20;
+ struct rsc *rscp;
rscp = gss_svc_searchbyctx(handle);
if (rscp) {
CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) expire soon\n",
&rscp->ctx, rscp);
rscp = gss_svc_searchbyctx(handle);
if (rscp) {
CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) expire soon\n",
&rscp->ctx, rscp);
- rscp->h.expiry_time = cfs_time_current_sec() + expire;
+ rscp->h.expiry_time = ktime_get_real_seconds() + expire;
COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
}
return 0;
COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
}
return 0;
if (!ctx->cc_early_expire)
clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
if (!ctx->cc_early_expire)
clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
- CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
+ CWARN("ctx %p(%u->%s) get expired: %lld(%+llds)\n",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
ctx->cc_expire,
ctx->cc_expire == 0 ? 0 :
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
ctx->cc_expire,
ctx->cc_expire == 0 ? 0 :
- cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
+ ctx->cc_expire - ktime_get_real_seconds());
sptlrpc_cli_ctx_wakeup(ctx);
return 1;
sptlrpc_cli_ctx_wakeup(ctx);
return 1;
return 0;
/* check real expiration */
return 0;
/* check real expiration */
- if (cfs_time_after(ctx->cc_expire, cfs_time_current_sec()))
+ if (ctx->cc_expire > ktime_get_real_seconds())
return 0;
cli_ctx_expire(ctx);
return 0;
cli_ctx_expire(ctx);
void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
{
void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
{
- struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
- unsigned long ctx_expiry;
+ struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
+ time64_t ctx_expiry;
if (lgss_inquire_context(gctx->gc_mechctx, &ctx_expiry)) {
CERROR("ctx %p(%u): unable to inquire, expire it now\n",
if (lgss_inquire_context(gctx->gc_mechctx, &ctx_expiry)) {
CERROR("ctx %p(%u): unable to inquire, expire it now\n",
if (sec_is_reverse(ctx->cc_sec)) {
CWARN("server installed reverse ctx %p idx %#llx, "
if (sec_is_reverse(ctx->cc_sec)) {
CWARN("server installed reverse ctx %p idx %#llx, "
- "expiry %lu(%+lds)\n", ctx,
+ "expiry %lld(%+llds)\n", ctx,
gss_handle_to_u64(&gctx->gc_handle),
ctx->cc_expire,
gss_handle_to_u64(&gctx->gc_handle),
ctx->cc_expire,
- cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
+ ctx->cc_expire - ktime_get_real_seconds());
} else {
CWARN("client refreshed ctx %p idx %#llx (%u->%s), "
} else {
CWARN("client refreshed ctx %p idx %#llx (%u->%s), "
- "expiry %lu(%+lds)\n", ctx,
+ "expiry %lld(%+llds)\n", ctx,
gss_handle_to_u64(&gctx->gc_handle),
ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
ctx->cc_expire,
gss_handle_to_u64(&gctx->gc_handle),
ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
ctx->cc_expire,
- cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
+ ctx->cc_expire - ktime_get_real_seconds());
/* install reverse svc ctx for root context */
if (ctx->cc_vcred.vc_uid == 0)
/* install reverse svc ctx for root context */
if (ctx->cc_vcred.vc_uid == 0)
unsigned long epp_st_missings; /* # of cache missing */
unsigned long epp_st_lowfree; /* lowest free pages reached */
unsigned int epp_st_max_wqlen; /* highest waitqueue length */
unsigned long epp_st_missings; /* # of cache missing */
unsigned long epp_st_lowfree; /* lowest free pages reached */
unsigned int epp_st_max_wqlen; /* highest waitqueue length */
- cfs_time_t epp_st_max_wait; /* in jeffies */
+ ktime_t epp_st_max_wait; /* in nanoseconds */
unsigned long epp_st_outofmem; /* # of out of mem requests */
/*
* pointers to pools, may be vmalloc'd
unsigned long epp_st_outofmem; /* # of out of mem requests */
/*
* pointers to pools, may be vmalloc'd
"total pages: %lu\n"
"total free: %lu\n"
"idle index: %lu/100\n"
"total pages: %lu\n"
"total free: %lu\n"
"idle index: %lu/100\n"
- "last shrink: %lds\n"
- "last access: %lds\n"
+ "last shrink: %llds\n"
+ "last access: %llds\n"
"max pages reached: %lu\n"
"grows: %u\n"
"grows failure: %u\n"
"max pages reached: %lu\n"
"grows: %u\n"
"grows failure: %u\n"
"cache missing: %lu\n"
"low free mark: %lu\n"
"max waitqueue depth: %u\n"
"cache missing: %lu\n"
"low free mark: %lu\n"
"max waitqueue depth: %u\n"
- "max wait time: %ld/%lu\n"
+ "max wait time ms: %lld\n"
"out of mem: %lu\n",
totalram_pages, PAGES_PER_POOL,
page_pools.epp_max_pages,
"out of mem: %lu\n",
totalram_pages, PAGES_PER_POOL,
page_pools.epp_max_pages,
page_pools.epp_total_pages,
page_pools.epp_free_pages,
page_pools.epp_idle_idx,
page_pools.epp_total_pages,
page_pools.epp_free_pages,
page_pools.epp_idle_idx,
- (long)(ktime_get_seconds() - page_pools.epp_last_shrink),
- (long)(ktime_get_seconds() - page_pools.epp_last_access),
+ ktime_get_seconds() - page_pools.epp_last_shrink,
+ ktime_get_seconds() - page_pools.epp_last_access,
page_pools.epp_st_max_pages,
page_pools.epp_st_grows,
page_pools.epp_st_grow_fails,
page_pools.epp_st_max_pages,
page_pools.epp_st_grows,
page_pools.epp_st_grow_fails,
page_pools.epp_st_missings,
page_pools.epp_st_lowfree,
page_pools.epp_st_max_wqlen,
page_pools.epp_st_missings,
page_pools.epp_st_lowfree,
page_pools.epp_st_max_wqlen,
- page_pools.epp_st_max_wait,
- msecs_to_jiffies(MSEC_PER_SEC),
+ ktime_to_ms(page_pools.epp_st_max_wait),
page_pools.epp_st_outofmem);
spin_unlock(&page_pools.epp_lock);
page_pools.epp_st_outofmem);
spin_unlock(&page_pools.epp_lock);
* if no pool access for a long time, we consider it's fully idle.
* a little race here is fine.
*/
* if no pool access for a long time, we consider it's fully idle.
* a little race here is fine.
*/
- if (unlikely(ktime_get_real_seconds() - page_pools.epp_last_access >
+ if (unlikely(ktime_get_seconds() - page_pools.epp_last_access >
CACHE_QUIESCENT_PERIOD)) {
spin_lock(&page_pools.epp_lock);
page_pools.epp_idle_idx = IDLE_IDX_MAX;
CACHE_QUIESCENT_PERIOD)) {
spin_lock(&page_pools.epp_lock);
page_pools.epp_idle_idx = IDLE_IDX_MAX;
(long)sc->nr_to_scan, page_pools.epp_free_pages);
page_pools.epp_st_shrinks++;
(long)sc->nr_to_scan, page_pools.epp_free_pages);
page_pools.epp_st_shrinks++;
- page_pools.epp_last_shrink = ktime_get_real_seconds();
+ page_pools.epp_last_shrink = ktime_get_seconds();
}
spin_unlock(&page_pools.epp_lock);
}
spin_unlock(&page_pools.epp_lock);
* if no pool access for a long time, we consider it's fully idle.
* a little race here is fine.
*/
* if no pool access for a long time, we consider it's fully idle.
* a little race here is fine.
*/
- if (unlikely(ktime_get_real_seconds() - page_pools.epp_last_access >
+ if (unlikely(ktime_get_seconds() - page_pools.epp_last_access >
CACHE_QUIESCENT_PERIOD)) {
spin_lock(&page_pools.epp_lock);
page_pools.epp_idle_idx = IDLE_IDX_MAX;
CACHE_QUIESCENT_PERIOD)) {
spin_lock(&page_pools.epp_lock);
page_pools.epp_idle_idx = IDLE_IDX_MAX;
*/
int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
{
*/
int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
{
- wait_queue_t waitlink;
- unsigned long this_idle = -1;
- cfs_time_t tick = 0;
- long now;
- int p_idx, g_idx;
- int i;
+ wait_queue_t waitlink;
+ unsigned long this_idle = -1;
+ u64 tick_ns = 0;
+ time64_t now;
+ int p_idx, g_idx;
+ int i;
LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
LASSERT(desc->bd_iov_count > 0);
LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
LASSERT(desc->bd_iov_count > 0);
page_pools.epp_st_access++;
again:
if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) {
page_pools.epp_st_access++;
again:
if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) {
- if (tick == 0)
- tick = cfs_time_current();
+ if (tick_ns == 0)
+ tick_ns = ktime_get_ns();
now = ktime_get_real_seconds();
now = ktime_get_real_seconds();
- /* record max wait time */
- if (unlikely(tick != 0)) {
- tick = cfs_time_current() - tick;
- if (tick > page_pools.epp_st_max_wait)
- page_pools.epp_st_max_wait = tick;
- }
+ /* record max wait time */
+ if (unlikely(tick_ns)) {
+ ktime_t tick = ktime_sub_ns(ktime_get(), tick_ns);
+
+ if (ktime_after(tick, page_pools.epp_st_max_wait))
+ page_pools.epp_st_max_wait = tick;
+ }
/* proceed with rest of allocation */
page_pools.epp_free_pages -= desc->bd_iov_count;
/* proceed with rest of allocation */
page_pools.epp_free_pages -= desc->bd_iov_count;
this_idle) /
(IDLE_IDX_WEIGHT + 1);
this_idle) /
(IDLE_IDX_WEIGHT + 1);
- page_pools.epp_last_access = ktime_get_real_seconds();
+ page_pools.epp_last_access = ktime_get_seconds();
spin_unlock(&page_pools.epp_lock);
return 0;
spin_unlock(&page_pools.epp_lock);
return 0;
page_pools.epp_growing = 0;
page_pools.epp_idle_idx = 0;
page_pools.epp_growing = 0;
page_pools.epp_idle_idx = 0;
- page_pools.epp_last_shrink = ktime_get_real_seconds();
- page_pools.epp_last_access = ktime_get_real_seconds();
+ page_pools.epp_last_shrink = ktime_get_seconds();
+ page_pools.epp_last_access = ktime_get_seconds();
spin_lock_init(&page_pools.epp_lock);
page_pools.epp_total_pages = 0;
spin_lock_init(&page_pools.epp_lock);
page_pools.epp_total_pages = 0;
page_pools.epp_st_missings = 0;
page_pools.epp_st_lowfree = 0;
page_pools.epp_st_max_wqlen = 0;
page_pools.epp_st_missings = 0;
page_pools.epp_st_lowfree = 0;
page_pools.epp_st_max_wqlen = 0;
- page_pools.epp_st_max_wait = 0;
+ page_pools.epp_st_max_wait = ktime_set(0, 0);
page_pools.epp_st_outofmem = 0;
enc_pools_alloc();
page_pools.epp_st_outofmem = 0;
enc_pools_alloc();
if (page_pools.epp_st_access > 0) {
CDEBUG(D_SEC,
if (page_pools.epp_st_access > 0) {
CDEBUG(D_SEC,
- "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%lu, out of mem %lu\n",
+ "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait ms %lld, out of mem %lu\n",
page_pools.epp_st_max_pages, page_pools.epp_st_grows,
page_pools.epp_st_grow_fails,
page_pools.epp_st_shrinks, page_pools.epp_st_access,
page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
page_pools.epp_st_max_pages, page_pools.epp_st_grows,
page_pools.epp_st_grow_fails,
page_pools.epp_st_shrinks, page_pools.epp_st_access,
page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
- page_pools.epp_st_max_wait,
- msecs_to_jiffies(MSEC_PER_SEC),
+ ktime_to_ms(page_pools.epp_st_max_wait),
page_pools.epp_st_outofmem);
}
}
page_pools.epp_st_outofmem);
}
}