Here we use an unsigned long to store the timeout for gc,
which is probably safe until 2106, but this patch converts it
to use ktime_get_real_seconds() and time64_t for consistency.
Linux-commit:
8cc980713ec9e6847896891c54562ad815c33424
Change-Id: I9c66ac818239debe676b78fbee5764cd5b69028c
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Oleg Drokin <green@linuxhacker.ru>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: James Simmons <uja.ornl@yahoo.com>
Reviewed-on: https://review.whamcloud.com/24710
Tested-by: Jenkins
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Sebastien Buisson <sbuisson@ddn.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Dmitry Eremin <dmitry.eremin@intel.com>
* garbage collection
*/
struct list_head ps_gc_list;
- cfs_time_t ps_gc_interval; /* in seconds */
- cfs_time_t ps_gc_next; /* in seconds */
+ time64_t ps_gc_interval; /* in seconds */
+ time64_t ps_gc_next; /* in seconds */
};
static inline int flvr_is_rootonly(__u32 flavor)
ctx_check_death_locked_pf(ctx, freelist);
}
- sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
- EXIT;
+ sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval;
+ EXIT;
}
static
retry:
spin_lock(&sec->ps_lock);
- /* gc_next == 0 means never do gc */
- if (remove_dead && sec->ps_gc_next &&
- cfs_time_after(cfs_time_current_sec(), sec->ps_gc_next)) {
- gss_ctx_cache_gc_pf(gsec_pf, &freelist);
- gc = 1;
- }
+ /* gc_next == 0 means never do gc */
+ if (remove_dead && sec->ps_gc_next &&
+ (ktime_get_real_seconds() > sec->ps_gc_next)) {
+ gss_ctx_cache_gc_pf(gsec_pf, &freelist);
+ gc = 1;
+ }
cfs_hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_cache) {
if (gc == 0 &&
{
struct gss_upcall_msg *gmsg;
struct gss_upcall_msg_data *gumd;
- static cfs_time_t ratelimit = 0;
+ static time64_t ratelimit;
ENTRY;
LASSERT(list_empty(&msg->list));
atomic_inc(&gmsg->gum_refcount);
gss_unhash_msg(gmsg);
if (msg->errno == -ETIMEDOUT || msg->errno == -EPIPE) {
- cfs_time_t now = cfs_time_current_sec();
+ time64_t now = ktime_get_real_seconds();
- if (cfs_time_after(now, ratelimit)) {
+ if (now > ratelimit) {
CWARN("upcall timed out, is lgssd running?\n");
ratelimit = now + 15;
}
LASSERT(sec->ps_gc_interval > 0);
LASSERT(list_empty(&sec->ps_gc_list));
- sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
+ sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval;
spin_lock(&sec_gc_list_lock);
list_add_tail(&sec->ps_gc_list, &sec_gc_list);
CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
- if (cfs_time_after(sec->ps_gc_next, cfs_time_current_sec()))
+ if (sec->ps_gc_next > ktime_get_real_seconds())
return;
sec->ps_policy->sp_cops->gc_ctx(sec);
- sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
+ sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval;
}
static int sec_gc_main(void *arg)
seq_printf(seq, "refcount: %d\n",
atomic_read(&sec->ps_refcount));
seq_printf(seq, "nctx: %d\n", atomic_read(&sec->ps_nctx));
- seq_printf(seq, "gc internal %ld\n", sec->ps_gc_interval);
- seq_printf(seq, "gc next %ld\n",
+ seq_printf(seq, "gc internal %lld\n", sec->ps_gc_interval);
+ seq_printf(seq, "gc next %lld\n",
sec->ps_gc_interval ?
- sec->ps_gc_next - cfs_time_current_sec() : 0);
+ (s64)(sec->ps_gc_next - ktime_get_real_seconds()) : 0ll);
sptlrpc_sec_put(sec);
out: