Whamcloud - gitweb
LU-4423 ptlrpc: use 64-bit times for request times 77/24977/14
authorArnd Bergmann <arnd@arndb.de>
Mon, 1 May 2017 18:07:59 +0000 (14:07 -0400)
committerOleg Drokin <oleg.drokin@intel.com>
Tue, 9 May 2017 03:44:33 +0000 (03:44 +0000)
All request timestamps and deadlines in lustre are recorded in time_t
and timeval units, which overflow in 2038 on 32-bit systems.

In this patch, I'm converting them to time64_t and timespec64,
respectively. Unfortunately, this makes a relatively large patch,
but I could not find an obvious way to split it up some more without
breaking atomicity of the change.

Also unfortunately, this introduces two instances of div_u64_rem()
in the request path, which can be slow on 32-bit architectures. This
can probably be avoided by a larger restructuring of the code, but
it is unlikely that lustre is used in performance critical setups
on 32-bit architectures, so it seems better to optimize for correctness
rather than speed here.

Linux-commit: 219e6de627243c8dbc701eaafe1c30c481d1f82c

Change-Id: Iff3c2bdb50bbb34d27edd4402838f915c16530f4
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Oleg Drokin <green@linuxhacker.ru>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: James Simmons <uja.ornl@yahoo.com>
Reviewed-on: https://review.whamcloud.com/24977
Reviewed-by: Bob Glossman <bob.glossman@intel.com>
Tested-by: Jenkins
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
15 files changed:
libcfs/autoconf/lustre-libcfs.m4
libcfs/include/libcfs/linux/linux-time.h
lustre/include/lustre_import.h
lustre/include/lustre_net.h
lustre/mdc/mdc_locks.c
lustre/mdc/mdc_reint.c
lustre/osc/osc_request.c
lustre/ptlrpc/client.c
lustre/ptlrpc/events.c
lustre/ptlrpc/import.c
lustre/ptlrpc/lproc_ptlrpc.c
lustre/ptlrpc/niobuf.c
lustre/ptlrpc/pack_generic.c
lustre/ptlrpc/service.c
lustre/target/tgt_handler.c

index db240c2..fb93f0e 100644 (file)
@@ -435,6 +435,41 @@ ktime_to_timespec64, [
 ]) # LIBCFS_KTIME_TO_TIMESPEC64
 
 #
+# Kernel version 3.17 introduced timespec64_sub
+#
+AC_DEFUN([LIBCFS_TIMESPEC64_SUB],[
+LB_CHECK_COMPILE([does function 'timespec64_sub' exist],
+timespec64_sub, [
+       #include <linux/time.h>
+],[
+       struct timespec64 later,earlier,diff;
+
+       diff = timespec64_sub(later, earlier);
+],[
+       AC_DEFINE(HAVE_TIMESPEC64_SUB, 1,
+               ['timespec64_sub' is available])
+])
+]) # LIBCFS_TIMESPEC64_SUB
+
+#
+# Kernel version 3.17 introduced timespec64_to_ktime
+#
+AC_DEFUN([LIBCFS_TIMESPEC64_TO_KTIME],[
+LB_CHECK_COMPILE([does function 'timespec64_to_ktime' exist],
+timespec64_to_ktime, [
+       #include <linux/ktime.h>
+],[
+       struct timespec64 ts;
+       ktime_t now;
+
+       now = timespec64_to_ktime(ts);
+],[
+       AC_DEFINE(HAVE_TIMESPEC64_TO_KTIME, 1,
+               ['timespec64_to_ktime' is available])
+])
+]) # LIBCFS_TIMESPEC64_TO_KTIME
+
+#
 # Kernel version 3.19 introduced ktime_get_seconds
 #
 AC_DEFUN([LIBCFS_KTIME_GET_SECONDS],[
@@ -617,6 +652,8 @@ LIBCFS_KTIME_GET_REAL_TS64
 LIBCFS_KTIME_GET_REAL_SECONDS
 LIBCFS_KTIME_GET_REAL_NS
 LIBCFS_KTIME_TO_TIMESPEC64
+LIBCFS_TIMESPEC64_SUB
+LIBCFS_TIMESPEC64_TO_KTIME
 # 3.19
 LIBCFS_KTIME_GET_SECONDS
 # 4.2
index 6d1b085..2dd750f 100644 (file)
@@ -111,6 +111,11 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
        return ts;
 }
 
+static inline struct timespec timespec64_to_timespec(const struct timespec64 ts)
+{
+       return ts;
+}
+
 #else
 struct timespec64 {
        time64_t        tv_sec;         /* seconds */
@@ -125,6 +130,15 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
        ret.tv_nsec = ts.tv_nsec;
        return ret;
 }
+
+static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
+{
+       struct timespec ret;
+
+       ret.tv_sec = (time_t)ts64.tv_sec;
+       ret.tv_nsec = ts64.tv_nsec;
+       return ret;
+}
 #endif /* __BITS_PER_LONG != 64 */
 
 #endif /* HAVE_TIMESPEC64 */
@@ -172,6 +186,25 @@ static inline struct timespec64 ktime_to_timespec64(ktime_t kt)
 }
 #endif /* HAVE_KTIME_TO_TIMESPEC64 */
 
+#ifndef HAVE_TIMESPEC64_SUB
+static inline struct timespec64
+timespec64_sub(struct timespec64 later, struct timespec64 earlier)
+{
+       struct timespec diff;
+
+       diff = timespec_sub(timespec64_to_timespec(later),
+                           timespec64_to_timespec(earlier));
+       return timespec_to_timespec64(diff);
+}
+#endif
+
+#ifndef HAVE_TIMESPEC64_TO_KTIME
+static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
+{
+       return ktime_set(ts.tv_sec, ts.tv_nsec);
+}
+#endif
+
 static inline int cfs_time_before(cfs_time_t t1, cfs_time_t t2)
 {
         return time_before(t1, t2);
index cccbed5..1b44d32 100644 (file)
@@ -75,7 +75,7 @@ struct ptlrpc_at_array {
        struct list_head *paa_reqs_array; /** array to hold requests */
         __u32             paa_size;       /** the size of array */
         __u32             paa_count;      /** the total count of reqs */
-        time_t            paa_deadline;   /** the earliest deadline of reqs */
+       time64_t          paa_deadline;   /** the earliest deadline of reqs */
         __u32            *paa_reqs_count; /** the count of reqs in each entry */
 };
 
index 6cc0c91..926253f 100644 (file)
@@ -786,16 +786,16 @@ struct ptlrpc_cli_req {
        cfs_duration_t                   cr_delay_limit;
        /** time request was first queued */
        cfs_time_t                       cr_queued_time;
-       /** request sent timeval */
-       struct timeval                   cr_sent_tv;
+       /** request sent in nanoseconds */
+       ktime_t                          cr_sent_ns;
        /** time for request really sent out */
-       time_t                           cr_sent_out;
+       time64_t                         cr_sent_out;
        /** when req reply unlink must finish. */
-       time_t                           cr_reply_deadline;
+       time64_t                         cr_reply_deadline;
        /** when req bulk unlink must finish. */
-       time_t                           cr_bulk_deadline;
+       time64_t                         cr_bulk_deadline;
        /** when req unlink must finish. */
-       time_t                           cr_req_deadline;
+       time64_t                         cr_req_deadline;
        /** Portal to which this request would be sent */
        short                            cr_req_ptl;
        /** Portal where to wait for reply and where reply would be sent */
@@ -850,7 +850,7 @@ struct ptlrpc_cli_req {
 #define rq_bulk                        rq_cli.cr_bulk
 #define rq_delay_limit         rq_cli.cr_delay_limit
 #define rq_queued_time         rq_cli.cr_queued_time
-#define rq_sent_tv             rq_cli.cr_sent_tv
+#define rq_sent_ns             rq_cli.cr_sent_ns
 #define rq_real_sent           rq_cli.cr_sent_out
 #define rq_reply_deadline      rq_cli.cr_reply_deadline
 #define rq_bulk_deadline       rq_cli.cr_bulk_deadline
@@ -912,7 +912,7 @@ struct ptlrpc_srv_req {
        struct ptlrpc_nrs_request        sr_nrq;
        /** @} nrs */
        /** request arrival time */
-       struct timeval                   sr_arrival_time;
+       struct timespec64                sr_arrival_time;
        /** server's half ctx */
        struct ptlrpc_svc_ctx           *sr_svc_ctx;
        /** (server side), pointed directly into req buffer */
@@ -1117,9 +1117,9 @@ struct ptlrpc_request {
        /**
         * when request/reply sent (secs), or time when request should be sent
         */
-       time_t                           rq_sent;
+       time64_t                         rq_sent;
        /** when request must finish. */
-       time_t                           rq_deadline;
+       time64_t                         rq_deadline;
        /** request format description */
        struct req_capsule               rq_pill;
 };
@@ -2051,7 +2051,7 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
        LASSERT(req != NULL);
        desc = req->rq_bulk;
 
-       if (req->rq_bulk_deadline > cfs_time_current_sec())
+       if (req->rq_bulk_deadline > ktime_get_real_seconds())
                return 1;
 
        if (!desc)
@@ -2471,7 +2471,7 @@ ptlrpc_client_early(struct ptlrpc_request *req)
 static inline int
 ptlrpc_client_replied(struct ptlrpc_request *req)
 {
-       if (req->rq_reply_deadline > cfs_time_current_sec())
+       if (req->rq_reply_deadline > ktime_get_real_seconds())
                return 0;
        return req->rq_replied;
 }
@@ -2480,7 +2480,7 @@ ptlrpc_client_replied(struct ptlrpc_request *req)
 static inline int
 ptlrpc_client_recv(struct ptlrpc_request *req)
 {
-       if (req->rq_reply_deadline > cfs_time_current_sec())
+       if (req->rq_reply_deadline > ktime_get_real_seconds())
                return 1;
        return req->rq_receiving_reply;
 }
@@ -2491,11 +2491,11 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
        int rc;
 
        spin_lock(&req->rq_lock);
-       if (req->rq_reply_deadline > cfs_time_current_sec()) {
+       if (req->rq_reply_deadline > ktime_get_real_seconds()) {
                spin_unlock(&req->rq_lock);
                return 1;
        }
-       if (req->rq_req_deadline > cfs_time_current_sec()) {
+       if (req->rq_req_deadline > ktime_get_real_seconds()) {
                spin_unlock(&req->rq_lock);
                return 1;
        }
index 35b96ce..43c1152 100644 (file)
@@ -792,7 +792,7 @@ resend:
         if (resends) {
                 req->rq_generation_set = 1;
                 req->rq_import_generation = generation;
-                req->rq_sent = cfs_time_current_sec() + resends;
+               req->rq_sent = ktime_get_real_seconds() + resends;
         }
 
        /* It is important to obtain modify RPC slot first (if applicable), so
index 898d349..1ef03bd 100644 (file)
@@ -222,7 +222,7 @@ rebuild:
         if (resends) {
                 req->rq_generation_set = 1;
                 req->rq_import_generation = generation;
-                req->rq_sent = cfs_time_current_sec() + resends;
+               req->rq_sent = ktime_get_real_seconds() + resends;
         }
         level = LUSTRE_IMP_FULL;
  resend:
index 6c3b354..b9c2153 100644 (file)
@@ -1637,9 +1637,9 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
        /* cap resend delay to the current request timeout, this is similar to
         * what ptlrpc does (see after_reply()) */
        if (aa->aa_resends > new_req->rq_timeout)
-               new_req->rq_sent = cfs_time_current_sec() + new_req->rq_timeout;
+               new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
        else
-               new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends;
+               new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
         new_req->rq_generation_set = 1;
         new_req->rq_import_generation = request->rq_import_generation;
 
index 13a3ece..e31010c 100644 (file)
@@ -357,7 +357,7 @@ void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
 {
         unsigned int nl, oldnl;
         struct imp_at *at;
-        time_t now = cfs_time_current_sec();
+       time64_t now = ktime_get_real_seconds();
 
         LASSERT(req->rq_import);
 
@@ -371,9 +371,8 @@ void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
                 */
                CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ?
                       D_ADAPTTO : D_WARNING,
-                      "Reported service time %u > total measured time "
-                      CFS_DURATION_T"\n", service_time,
-                      cfs_time_sub(now, req->rq_sent));
+                      "Reported service time %u > total measured time %lld\n",
+                      service_time, now - req->rq_sent);
                return;
        }
 
@@ -419,11 +418,11 @@ static int unpack_reply(struct ptlrpc_request *req)
 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
 __must_hold(&req->rq_lock)
 {
-        struct ptlrpc_request *early_req;
-        time_t                 olddl;
-        int                    rc;
-        ENTRY;
+       struct ptlrpc_request *early_req;
+       time64_t olddl;
+       int rc;
 
+       ENTRY;
         req->rq_early = 0;
        spin_unlock(&req->rq_lock);
 
@@ -464,10 +463,10 @@ __must_hold(&req->rq_lock)
                           ptlrpc_at_get_net_latency(req);
 
        DEBUG_REQ(D_ADAPTTO, req,
-                 "Early reply #%d, new deadline in "CFS_DURATION_T"s "
-                 "("CFS_DURATION_T"s)", req->rq_early_count,
-                 cfs_time_sub(req->rq_deadline, cfs_time_current_sec()),
-                 cfs_time_sub(req->rq_deadline, olddl));
+                 "Early reply #%d, new deadline in %llds (%llds)",
+                 req->rq_early_count,
+                 req->rq_deadline - ktime_get_real_seconds(),
+                 req->rq_deadline - olddl);
 
        RETURN(rc);
 }
@@ -744,7 +743,7 @@ int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
 
        /* Let's setup deadline for req/reply/bulk unlink for opcode. */
        if (cfs_fail_val == opcode) {
-               time_t *fail_t = NULL, *fail2_t = NULL;
+               time64_t *fail_t = NULL, *fail2_t = NULL;
 
                if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
                        fail_t = &request->rq_bulk_deadline;
@@ -758,10 +757,11 @@ int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
                }
 
                if (fail_t) {
-                       *fail_t = cfs_time_current_sec() + LONG_UNLINK;
+                       *fail_t = ktime_get_real_seconds() + LONG_UNLINK;
 
                        if (fail2_t)
-                               *fail2_t = cfs_time_current_sec() + LONG_UNLINK;
+                               *fail2_t = ktime_get_real_seconds() +
+                                          LONG_UNLINK;
 
                        /* The RPC is infected, let the test to change the
                         * fail_loc */
@@ -1327,14 +1327,14 @@ __u64 ptlrpc_known_replied_xid(struct obd_import *imp)
  */
 static int after_reply(struct ptlrpc_request *req)
 {
-        struct obd_import *imp = req->rq_import;
-        struct obd_device *obd = req->rq_import->imp_obd;
-        int rc;
-        struct timeval work_start;
-       __u64 committed;
-        long timediff;
-        ENTRY;
+       struct obd_import *imp = req->rq_import;
+       struct obd_device *obd = req->rq_import->imp_obd;
+       ktime_t work_start;
+       u64 committed;
+       s64 timediff;
+       int rc;
 
+       ENTRY;
         LASSERT(obd != NULL);
         /* repbuf must be unlinked */
        LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
@@ -1360,8 +1360,8 @@ static int after_reply(struct ptlrpc_request *req)
                 RETURN(0);
         }
 
-       do_gettimeofday(&work_start);
-       timediff = cfs_timeval_sub(&work_start, &req->rq_sent_tv, NULL);
+       work_start = ktime_get_real();
+       timediff = ktime_us_delta(req->rq_sent_ns, work_start);
 
         /*
          * NB Until this point, the whole of the incoming message,
@@ -1386,7 +1386,7 @@ static int after_reply(struct ptlrpc_request *req)
        /* retry indefinitely on EINPROGRESS */
        if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
            ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
-               time_t  now = cfs_time_current_sec();
+               time64_t now = ktime_get_real_seconds();
 
                DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
                spin_lock(&req->rq_lock);
@@ -1537,7 +1537,7 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
                    pool_is_at_full_capacity())
                        RETURN(-ENOMEM);
 
-        if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
+       if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
             (!req->rq_generation_set ||
              req->rq_import_generation == imp->imp_generation))
                 RETURN (0);
@@ -1729,7 +1729,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
 
                /* delayed resend - skip */
                if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
-                   req->rq_sent > cfs_time_current_sec())
+                   req->rq_sent > ktime_get_real_seconds())
                        continue;
 
                if (!(req->rq_phase == RQ_PHASE_RPC ||
@@ -2105,14 +2105,13 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
        req->rq_timedout = 1;
        spin_unlock(&req->rq_lock);
 
-       DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent "CFS_DURATION_T
-                 "/real "CFS_DURATION_T"]",
+       DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent %lld/real %lld]",
                   req->rq_net_err ? "failed due to network error" :
                      ((req->rq_real_sent == 0 ||
-                       cfs_time_before(req->rq_real_sent, req->rq_sent) ||
-                       cfs_time_aftereq(req->rq_real_sent, req->rq_deadline)) ?
+                      req->rq_real_sent < req->rq_sent ||
+                      req->rq_real_sent >= req->rq_deadline) ?
                       "timed out for sent delay" : "timed out for slow reply"),
-                  req->rq_sent, req->rq_real_sent);
+                 (s64)req->rq_sent, (s64)req->rq_real_sent);
 
        if (imp != NULL && obd_debug_peer_on_timeout)
                LNetDebugPeer(imp->imp_connection->c_peer);
@@ -2168,11 +2167,11 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
  */
 int ptlrpc_expired_set(void *data)
 {
-       struct ptlrpc_request_set       *set = data;
-       struct list_head                *tmp;
-       time_t                          now = cfs_time_current_sec();
-       ENTRY;
+       struct ptlrpc_request_set *set = data;
+       struct list_head *tmp;
+       time64_t now = ktime_get_real_seconds();
 
+       ENTRY;
        LASSERT(set != NULL);
 
        /*
@@ -2254,13 +2253,13 @@ static void ptlrpc_interrupted_set(void *data)
  */
 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
 {
-       struct list_head        *tmp;
-       time_t                   now = cfs_time_current_sec();
-       int                      timeout = 0;
-       struct ptlrpc_request   *req;
-       int                      deadline;
-       ENTRY;
+       struct list_head *tmp;
+       time64_t now = ktime_get_real_seconds();
+       int timeout = 0;
+       struct ptlrpc_request *req;
+       time64_t deadline;
 
+       ENTRY;
        list_for_each(tmp, &set->set_requests) {
                req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
 
@@ -2570,8 +2569,8 @@ static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
        /* Let's setup deadline for reply unlink. */
        if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
            async && request->rq_reply_deadline == 0 && cfs_fail_val == 0)
-               request->rq_reply_deadline =
-                       cfs_time_current_sec() + LONG_UNLINK;
+               request->rq_reply_deadline = ktime_get_real_seconds() +
+                                            LONG_UNLINK;
 
         /*
          * Nothing left to do.
@@ -3196,7 +3195,7 @@ static spinlock_t ptlrpc_last_xid_lock;
 #define YEAR_2004 (1ULL << 30)
 void ptlrpc_init_xid(void)
 {
-       time_t now = cfs_time_current_sec();
+       time64_t now = ktime_get_real_seconds();
 
        spin_lock_init(&ptlrpc_last_xid_lock);
        if (now < YEAR_2004) {
@@ -3344,7 +3343,7 @@ static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
 {
        /* re-initialize the req */
        req->rq_timeout         = obd_timeout;
-       req->rq_sent            = cfs_time_current_sec();
+       req->rq_sent            = ktime_get_real_seconds();
        req->rq_deadline        = req->rq_sent + req->rq_timeout;
        req->rq_phase           = RQ_PHASE_INTERPRET;
        req->rq_next_phase      = RQ_PHASE_COMPLETE;
index 43c7d9e..28533cc 100644 (file)
@@ -59,7 +59,7 @@ void request_out_callback(struct lnet_event *ev)
        sptlrpc_request_out_callback(req);
 
        spin_lock(&req->rq_lock);
-       req->rq_real_sent = cfs_time_current_sec();
+       req->rq_real_sent = ktime_get_real_seconds();
        req->rq_req_unlinked = 1;
        /* reply_in_callback happened before request_out_callback? */
        if (req->rq_reply_unlinked)
@@ -250,9 +250,9 @@ void client_bulk_callback(struct lnet_event *ev)
 static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
                                   struct ptlrpc_request *req)
 {
-       __u64   sec = req->rq_arrival_time.tv_sec;
-       __u32   usec = req->rq_arrival_time.tv_usec >> 4; /* usec / 16 */
-       __u64   new_seq;
+       u64 sec = req->rq_arrival_time.tv_sec;
+       u32 usec = req->rq_arrival_time.tv_nsec / NSEC_PER_USEC / 16; /* usec / 16 */
+       u64 new_seq;
 
        /* set sequence ID for request and add it to history list,
         * it must be called with hold svcpt::scp_lock */
@@ -335,7 +335,7 @@ void request_in_callback(struct lnet_event *ev)
        req->rq_reqbuf = ev->md.start + ev->offset;
        if (ev->type == LNET_EVENT_PUT && ev->status == 0)
                req->rq_reqdata_len = ev->mlength;
-       do_gettimeofday(&req->rq_arrival_time);
+       ktime_get_real_ts64(&req->rq_arrival_time);
        /* Multi-Rail: keep track of both initiator and source NID. */
        req->rq_peer = ev->initiator;
        req->rq_source = ev->source;
index e559c6c..65938cc 100644 (file)
@@ -234,7 +234,7 @@ void ptlrpc_deactivate_import(struct obd_import *imp)
 EXPORT_SYMBOL(ptlrpc_deactivate_import);
 
 static unsigned int
-ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now)
+ptlrpc_inflight_deadline(struct ptlrpc_request *req, time64_t now)
 {
         long dl;
 
@@ -259,7 +259,7 @@ ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now)
 
 static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
 {
-       time_t now = cfs_time_current_sec();
+       time64_t now = ktime_get_real_seconds();
        struct list_head *tmp, *n;
        struct ptlrpc_request *req;
        unsigned int timeout = 0;
index f10dc6c..9aa7ed5 100644 (file)
@@ -973,35 +973,34 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
        rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, srhi->srhi_seq);
 
        if (rc == 0) {
-               struct timespec arrival, sent, arrivaldiff;
+               struct timespec64 arrival, sent, arrivaldiff;
                char nidstr[LNET_NIDSTR_SIZE];
 
                req = srhi->srhi_req;
 
                libcfs_nid2str_r(req->rq_self, nidstr, sizeof(nidstr));
                arrival.tv_sec = req->rq_arrival_time.tv_sec;
-               arrival.tv_nsec =
-                       req->rq_arrival_time.tv_usec * NSEC_PER_USEC;
+               arrival.tv_nsec = req->rq_arrival_time.tv_nsec;
                sent.tv_sec = req->rq_sent;
                sent.tv_nsec = 0;
-               arrivaldiff = timespec_sub(sent, arrival);
+               arrivaldiff = timespec64_sub(sent, arrival);
 
                /* Print common req fields.
                 * CAVEAT EMPTOR: we're racing with the service handler
                 * here.  The request could contain any old crap, so you
                 * must be just as careful as the service's request
                 * parser. Currently I only print stuff here I know is OK
-                * to look at coz it was set up in request_in_callback()!!! */
-               seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:"
-                          "%ld.%06ld:%ld.%06lds(%+ld.0s) ",
+                * to look at coz it was set up in request_in_callback()!!!
+                */
+               seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld.%06lld:%lld.%06llds(%+lld.0s) ",
                           req->rq_history_seq, nidstr,
                           libcfs_id2str(req->rq_peer), req->rq_xid,
                           req->rq_reqlen, ptlrpc_rqphase2str(req),
-                          req->rq_arrival_time.tv_sec,
-                          req->rq_arrival_time.tv_usec,
-                          arrivaldiff.tv_sec,
-                          arrivaldiff.tv_nsec / NSEC_PER_USEC,
-                          req->rq_sent - req->rq_deadline);
+                          (s64)req->rq_arrival_time.tv_sec,
+                          (s64)(req->rq_arrival_time.tv_nsec / NSEC_PER_USEC),
+                          (s64)arrivaldiff.tv_sec,
+                          (s64)(arrivaldiff.tv_nsec / NSEC_PER_USEC),
+                          (s64)(req->rq_sent - req->rq_deadline));
                if (svc->srv_ops.so_req_printer == NULL)
                        seq_printf(s, "\n");
                else
index 889216c..6cb29ef 100644 (file)
@@ -440,7 +440,7 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
        /* Let's setup deadline for reply unlink. */
        if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
            async && req->rq_bulk_deadline == 0 && cfs_fail_val == 0)
-               req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
+               req->rq_bulk_deadline = ktime_get_real_seconds() + LONG_UNLINK;
 
        if (ptlrpc_client_bulk_active(req) == 0)        /* completed or */
                RETURN(1);                              /* never registered */
@@ -489,7 +489,7 @@ static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
 {
        struct ptlrpc_service_part      *svcpt = req->rq_rqbd->rqbd_svcpt;
        struct ptlrpc_service           *svc = svcpt->scp_service;
-        int service_time = max_t(int, cfs_time_current_sec() -
+       int service_time = max_t(int, ktime_get_real_seconds() -
                                  req->rq_arrival_time.tv_sec, 1);
 
         if (!(flags & PTLRPC_REPLY_EARLY) &&
@@ -624,7 +624,7 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
         if (unlikely(rc))
                 goto out;
 
-        req->rq_sent = cfs_time_current_sec();
+       req->rq_sent = ktime_get_real_seconds();
 
        rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
                          (rs->rs_difficult && !rs->rs_no_ack) ?
@@ -883,8 +883,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
 
        OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
 
-       do_gettimeofday(&request->rq_sent_tv);
-       request->rq_sent = cfs_time_current_sec();
+       request->rq_sent_ns = ktime_get_real();
+       request->rq_sent = ktime_get_real_seconds();
        /* We give the server rq_timeout secs to process the req, and
           add the network latency for our local timeout. */
         request->rq_deadline = request->rq_sent + request->rq_timeout +
@@ -925,7 +925,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
        if (rc == -ENOMEM) {
                /* set rq_sent so that this request is treated
                 * as a delayed send in the upper layers */
-               request->rq_sent = cfs_time_current_sec();
+               request->rq_sent = ktime_get_real_seconds();
        }
 
        if (request->rq_memalloc)
index 5367ae4..aa6552e 100644 (file)
@@ -2543,9 +2543,7 @@ void _debug_req(struct ptlrpc_request *req,
 
        va_start(args, fmt);
        libcfs_debug_vmsg2(msgdata, fmt, args,
-                          " req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d"
-                          " lens %d/%d e %d to %d dl "CFS_TIME_T" ref %d "
-                          "fl "REQ_FLAGS_FMT"/%x/%x rc %d/%d\n",
+                          " req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d lens %d/%d e %d to %lld dl %lld ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d\n",
                           req, req->rq_xid, req->rq_transno,
                           req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0,
                           req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1,
@@ -2557,8 +2555,8 @@ void _debug_req(struct ptlrpc_request *req,
                           libcfs_nid2str(nid),
                           req->rq_request_portal, req->rq_reply_portal,
                           req->rq_reqlen, req->rq_replen,
-                          req->rq_early_count, req->rq_timedout,
-                          req->rq_deadline,
+                          req->rq_early_count, (s64)req->rq_timedout,
+                          (s64)req->rq_deadline,
                           atomic_read(&req->rq_refcount),
                           DEBUG_REQ_FLAGS(req),
                           req_ok ? lustre_msg_get_flags(req->rq_reqmsg) : -1,
index 221974e..7006a57 100644 (file)
@@ -1167,7 +1167,7 @@ static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt)
        }
 
        /* Set timer for closest deadline */
-       next = (__s32)(array->paa_deadline - cfs_time_current_sec() -
+       next = (__s32)(array->paa_deadline - ktime_get_real_seconds() -
                       at_early_margin);
        if (next <= 0) {
                ptlrpc_at_timer((unsigned long)svcpt);
@@ -1198,7 +1198,7 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
        spin_lock(&svcpt->scp_at_lock);
        LASSERT(list_empty(&req->rq_timed_list));
 
-        index = (unsigned long)req->rq_deadline % array->paa_size;
+       div_u64_rem(req->rq_deadline, array->paa_size, &index);
         if (array->paa_reqs_count[index] > 0) {
                 /* latest rpcs will have the latest deadlines in the list,
                  * so search backward. */
@@ -1261,8 +1261,8 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
        struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
        struct ptlrpc_request *reqcopy;
        struct lustre_msg *reqmsg;
-       cfs_duration_t olddl = req->rq_deadline - cfs_time_current_sec();
-       time_t  newdl;
+       time64_t olddl = req->rq_deadline - ktime_get_real_seconds();
+       time64_t newdl;
        int rc;
 
        ENTRY;
@@ -1275,18 +1275,18 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
         /* deadline is when the client expects us to reply, margin is the
            difference between clients' and servers' expectations */
         DEBUG_REQ(D_ADAPTTO, req,
-                  "%ssending early reply (deadline %+lds, margin %+lds) for "
+                 "%ssending early reply (deadline %+llds, margin %+llds) for "
                   "%d+%d", AT_OFF ? "AT off - not " : "",
-                 olddl, olddl - at_get(&svcpt->scp_at_estimate),
+                 (s64)olddl, (s64)(olddl - at_get(&svcpt->scp_at_estimate)),
                  at_get(&svcpt->scp_at_estimate), at_extra);
 
         if (AT_OFF)
                 RETURN(0);
 
         if (olddl < 0) {
-                DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), "
+               DEBUG_REQ(D_WARNING, req, "Already past deadline (%+llds), "
                           "not sending early reply. Consider increasing "
-                          "at_early_margin (%d)?", olddl, at_early_margin);
+                         "at_early_margin (%d)?", (s64)olddl, at_early_margin);
 
                 /* Return an error so we're not re-added to the timed list. */
                 RETURN(-ETIMEDOUT);
@@ -1326,7 +1326,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
                 * account for network latency). See ptlrpc_at_recv_early_reply
                 */
                at_measured(&svcpt->scp_at_estimate, at_extra +
-                           cfs_time_current_sec() -
+                           ktime_get_real_seconds() -
                            req->rq_arrival_time.tv_sec);
                newdl = req->rq_arrival_time.tv_sec +
                        at_get(&svcpt->scp_at_estimate);
@@ -1335,9 +1335,8 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
        /* Check to see if we've actually increased the deadline -
         * we may be past adaptive_max */
        if (req->rq_deadline >= newdl) {
-               DEBUG_REQ(D_WARNING, req, "Couldn't add any time "
-                         "(%ld/%ld), not sending early reply\n",
-                         olddl, newdl - cfs_time_current_sec());
+               DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%lld/%lld), not sending early reply\n",
+                         (s64)olddl, (s64)(newdl - ktime_get_real_seconds()));
                RETURN(-ETIMEDOUT);
        }
 
@@ -1424,8 +1423,8 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
         struct ptlrpc_request *rq, *n;
        struct list_head work_list;
         __u32  index, count;
-        time_t deadline;
-        time_t now = cfs_time_current_sec();
+       time64_t deadline;
+       time64_t now = ktime_get_real_seconds();
         cfs_duration_t delay;
         int first, counter = 0;
         ENTRY;
@@ -1456,7 +1455,7 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
           server will take. Send early replies to everyone expiring soon. */
        INIT_LIST_HEAD(&work_list);
        deadline = -1;
-       index = (unsigned long)array->paa_deadline % array->paa_size;
+       div_u64_rem(array->paa_deadline, array->paa_size, &index);
        count = array->paa_count;
        while (count > 0) {
                count -= array->paa_reqs_count[index];
@@ -1499,8 +1498,7 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
                 LCONSOLE_WARN("%s: This server is not able to keep up with "
                              "request traffic (cpu-bound).\n",
                              svcpt->scp_service->srv_name);
-               CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, "
-                     "delay="CFS_DURATION_T"(jiff)\n",
+               CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=%ld(jiff)\n",
                      counter, svcpt->scp_nreqs_incoming,
                      svcpt->scp_nreqs_active,
                      at_get(&svcpt->scp_at_estimate), delay);
@@ -1950,10 +1948,10 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
         }
 
         /* req_in handling should/must be fast */
-        if (cfs_time_current_sec() - req->rq_arrival_time.tv_sec > 5)
-                DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s",
-                          cfs_time_sub(cfs_time_current_sec(),
-                                       req->rq_arrival_time.tv_sec));
+       if (ktime_get_real_seconds() - req->rq_arrival_time.tv_sec > 5)
+               DEBUG_REQ(D_WARNING, req, "Slow req_in handling %llds",
+                         (s64)(ktime_get_real_seconds() -
+                               req->rq_arrival_time.tv_sec));
 
         /* Set rpc server deadline and add it to the timed list */
         deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) &
@@ -2011,12 +2009,14 @@ static int
 ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
                             struct ptlrpc_thread *thread)
 {
-       struct ptlrpc_service   *svc = svcpt->scp_service;
-       struct ptlrpc_request   *request;
-       struct timeval           work_start;
-       struct timeval           work_end;
-       long                     timediff;
-       int                      fail_opc = 0;
+       struct ptlrpc_service *svc = svcpt->scp_service;
+       struct ptlrpc_request *request;
+       ktime_t work_start;
+       ktime_t work_end;
+       ktime_t arrived;
+       s64 timediff_usecs;
+       s64 arrived_usecs;
+       int fail_opc = 0;
 
        ENTRY;
 
@@ -2039,11 +2039,12 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
        if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
                libcfs_debug_dumplog();
 
-       do_gettimeofday(&work_start);
-       timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
+       work_start = ktime_get_real();
+       arrived = timespec64_to_ktime(request->rq_arrival_time);
+       timediff_usecs = ktime_us_delta(arrived, work_start);
        if (likely(svc->srv_stats != NULL)) {
                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
-                                    timediff);
+                                   timediff_usecs);
                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
                                    svcpt->scp_nreqs_incoming);
                lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
@@ -2055,19 +2056,18 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
        if (likely(request->rq_export)) {
                if (unlikely(ptlrpc_check_req(request)))
                        goto put_conn;
-                ptlrpc_update_export_timer(request->rq_export, timediff >> 19);
+               ptlrpc_update_export_timer(request->rq_export,
+                                          timediff_usecs >> 19);
         }
 
         /* Discard requests queued for longer than the deadline.
            The deadline is increased if we send an early reply. */
-        if (cfs_time_current_sec() > request->rq_deadline) {
-                DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s"
-                          ": deadline "CFS_DURATION_T":"CFS_DURATION_T"s ago\n",
+       if (ktime_get_real_seconds() > request->rq_deadline) {
+               DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline %lld:%llds ago\n",
                           libcfs_id2str(request->rq_peer),
-                          cfs_time_sub(request->rq_deadline,
-                          request->rq_arrival_time.tv_sec),
-                          cfs_time_sub(cfs_time_current_sec(),
-                          request->rq_deadline));
+                         request->rq_deadline -
+                         request->rq_arrival_time.tv_sec,
+                         ktime_get_real_seconds() - request->rq_deadline);
                 goto put_conn;
         }
 
@@ -2098,21 +2098,17 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
        ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
 
 put_conn:
-       if (unlikely(cfs_time_current_sec() > request->rq_deadline)) {
-                    DEBUG_REQ(D_WARNING, request, "Request took longer "
-                              "than estimated ("CFS_DURATION_T":"CFS_DURATION_T"s);"
-                              " client may timeout.",
-                              cfs_time_sub(request->rq_deadline,
-                                           request->rq_arrival_time.tv_sec),
-                              cfs_time_sub(cfs_time_current_sec(),
-                                           request->rq_deadline));
-       }
-
-       do_gettimeofday(&work_end);
-       timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
-       CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
-              "%s:%s+%d:%d:x%llu:%s:%d Request procesed in "
-              "%ldus (%ldus total) trans %llu rc %d/%d\n",
+       if (unlikely(ktime_get_real_seconds() > request->rq_deadline)) {
+               DEBUG_REQ(D_WARNING, request, "Request took longer than estimated (%lld:%llds); client may timeout.",
+                         request->rq_deadline -
+                         request->rq_arrival_time.tv_sec,
+                         ktime_get_real_seconds() - request->rq_deadline);
+       }
+
+       work_end = ktime_get_real();
+       timediff_usecs = ktime_us_delta(work_end, work_start);
+       arrived_usecs = ktime_us_delta(work_end, arrived);
+       CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d Request procesed in %lldus (%lldus total) trans %llu rc %d/%d\n",
                current_comm(),
                (request->rq_export ?
                 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
@@ -2122,8 +2118,8 @@ put_conn:
                 request->rq_xid,
                 libcfs_id2str(request->rq_peer),
                 lustre_msg_get_opc(request->rq_reqmsg),
-                timediff,
-                cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
+               timediff_usecs,
+               arrived_usecs,
                 (request->rq_repmsg ?
                  lustre_msg_get_transno(request->rq_repmsg) :
                  request->rq_transno),
@@ -2137,16 +2133,14 @@ put_conn:
                         LASSERT(opc < LUSTRE_MAX_OPCODES);
                         lprocfs_counter_add(svc->srv_stats,
                                             opc + EXTRA_MAX_OPCODES,
-                                            timediff);
+                                           timediff_usecs);
                 }
         }
         if (unlikely(request->rq_early_count)) {
                 DEBUG_REQ(D_ADAPTTO, request,
-                          "sent %d early replies before finishing in "
-                          CFS_DURATION_T"s",
+                         "sent %d early replies before finishing in %llds",
                           request->rq_early_count,
-                          cfs_time_sub(work_end.tv_sec,
-                          request->rq_arrival_time.tv_sec));
+                         arrived_usecs / USEC_PER_SEC);
         }
 
        ptlrpc_server_finish_active_request(svcpt, request);
@@ -3241,11 +3235,11 @@ EXPORT_SYMBOL(ptlrpc_unregister_service);
  * to be shot, so it's intentionally non-aggressive. */
 static int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
 {
-       struct ptlrpc_request           *request = NULL;
-       struct timeval                  right_now;
-       long                            timediff;
+       struct ptlrpc_request *request = NULL;
+       struct timespec64 right_now;
+       struct timespec64 timediff;
 
-       do_gettimeofday(&right_now);
+       ktime_get_real_ts64(&right_now);
 
        spin_lock(&svcpt->scp_req_lock);
        /* How long has the next entry been waiting? */
@@ -3259,13 +3253,13 @@ static int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
                return 0;
        }
 
-       timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
+       timediff = timespec64_sub(right_now, request->rq_arrival_time);
        spin_unlock(&svcpt->scp_req_lock);
 
-       if ((timediff / ONE_MILLION) >
+       if ((timediff.tv_sec) >
            (AT_OFF ? obd_timeout * 3 / 2 : at_max)) {
-               CERROR("%s: unhealthy - request has been waiting %lds\n",
-                      svcpt->scp_service->srv_name, timediff / ONE_MILLION);
+               CERROR("%s: unhealthy - request has been waiting %llds\n",
+                      svcpt->scp_service->srv_name, (s64)timediff.tv_sec);
                return -1;
        }
 
index 02f2902..aab362f 100644 (file)
@@ -1926,13 +1926,12 @@ int tgt_brw_read(struct tgt_session_info *tsi)
         * If getting the lock took more time than
         * client was willing to wait, drop it. b=11330
         */
-       if (cfs_time_current_sec() > req->rq_deadline ||
+       if (ktime_get_real_seconds() > req->rq_deadline ||
            OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
                no_reply = 1;
-               CERROR("Dropping timed-out read from %s because locking"
-                      "object "DOSTID" took %ld seconds (limit was %ld).\n",
+               CERROR("Dropping timed-out read from %s because locking object " DOSTID " took %lld seconds (limit was %lld).\n",
                       libcfs_id2str(req->rq_peer), POSTID(&ioo->ioo_oid),
-                      cfs_time_current_sec() - req->rq_arrival_time.tv_sec,
+                      ktime_get_real_seconds() - req->rq_arrival_time.tv_sec,
                       req->rq_deadline - req->rq_arrival_time.tv_sec);
                GOTO(out_lock, rc = -ETIMEDOUT);
        }
@@ -2202,14 +2201,13 @@ int tgt_brw_write(struct tgt_session_info *tsi)
         * If getting the lock took more time than
         * client was willing to wait, drop it. b=11330
         */
-       if (cfs_time_current_sec() > req->rq_deadline ||
+       if (ktime_get_real_seconds() > req->rq_deadline ||
            OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
                no_reply = true;
-               CERROR("%s: Dropping timed-out write from %s because locking "
-                      "object "DOSTID" took %ld seconds (limit was %ld).\n",
+               CERROR("%s: Dropping timed-out write from %s because locking object " DOSTID " took %lld seconds (limit was %lld).\n",
                       tgt_name(tsi->tsi_tgt), libcfs_id2str(req->rq_peer),
                       POSTID(&ioo->ioo_oid),
-                      cfs_time_current_sec() - req->rq_arrival_time.tv_sec,
+                      ktime_get_real_seconds() - req->rq_arrival_time.tv_sec,
                       req->rq_deadline - req->rq_arrival_time.tv_sec);
                GOTO(out_lock, rc = -ETIMEDOUT);
        }