]) # LIBCFS_KTIME_TO_TIMESPEC64
#
+# Kernel version 3.17 introduced timespec64_sub
+#
+AC_DEFUN([LIBCFS_TIMESPEC64_SUB],[
+LB_CHECK_COMPILE([does function 'timespec64_sub' exist],
+timespec64_sub, [
+ #include <linux/time.h>
+],[
+ struct timespec64 later,earlier,diff;
+
+ diff = timespec64_sub(later, earlier);
+],[
+ AC_DEFINE(HAVE_TIMESPEC64_SUB, 1,
+ ['timespec64_sub' is available])
+])
+]) # LIBCFS_TIMESPEC64_SUB
+
+#
+# Kernel version 3.17 introduced timespec64_to_ktime
+#
+AC_DEFUN([LIBCFS_TIMESPEC64_TO_KTIME],[
+LB_CHECK_COMPILE([does function 'timespec64_to_ktime' exist],
+timespec64_to_ktime, [
+ #include <linux/ktime.h>
+],[
+ struct timespec64 ts;
+ ktime_t now;
+
+ now = timespec64_to_ktime(ts);
+],[
+ AC_DEFINE(HAVE_TIMESPEC64_TO_KTIME, 1,
+ ['timespec64_to_ktime' is available])
+])
+]) # LIBCFS_TIMESPEC64_TO_KTIME
+
+#
# Kernel version 3.19 introduced ktime_get_seconds
#
AC_DEFUN([LIBCFS_KTIME_GET_SECONDS],[
LIBCFS_KTIME_GET_REAL_SECONDS
LIBCFS_KTIME_GET_REAL_NS
LIBCFS_KTIME_TO_TIMESPEC64
+LIBCFS_TIMESPEC64_SUB
+LIBCFS_TIMESPEC64_TO_KTIME
# 3.19
LIBCFS_KTIME_GET_SECONDS
# 4.2
return ts;
}
+static inline struct timespec timespec64_to_timespec(const struct timespec64 ts)
+{
+ return ts;
+}
+
#else
struct timespec64 {
time64_t tv_sec; /* seconds */
ret.tv_nsec = ts.tv_nsec;
return ret;
}
+
+static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
+{
+ struct timespec ret;
+
+ ret.tv_sec = (time_t)ts64.tv_sec;
+ ret.tv_nsec = ts64.tv_nsec;
+ return ret;
+}
#endif /* __BITS_PER_LONG != 64 */
#endif /* HAVE_TIMESPEC64 */
}
#endif /* HAVE_KTIME_TO_TIMESPEC64 */
+#ifndef HAVE_TIMESPEC64_SUB
+static inline struct timespec64
+timespec64_sub(struct timespec64 later, struct timespec64 earlier)
+{
+ struct timespec diff;
+
+ diff = timespec_sub(timespec64_to_timespec(later),
+ timespec64_to_timespec(earlier));
+ return timespec_to_timespec64(diff);
+}
+#endif
+
+#ifndef HAVE_TIMESPEC64_TO_KTIME
+static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
+{
+ return ktime_set(ts.tv_sec, ts.tv_nsec);
+}
+#endif
+
static inline int cfs_time_before(cfs_time_t t1, cfs_time_t t2)
{
return time_before(t1, t2);
struct list_head *paa_reqs_array; /** array to hold requests */
__u32 paa_size; /** the size of array */
__u32 paa_count; /** the total count of reqs */
- time_t paa_deadline; /** the earliest deadline of reqs */
+ time64_t paa_deadline; /** the earliest deadline of reqs */
__u32 *paa_reqs_count; /** the count of reqs in each entry */
};
cfs_duration_t cr_delay_limit;
/** time request was first queued */
cfs_time_t cr_queued_time;
- /** request sent timeval */
- struct timeval cr_sent_tv;
+ /** request sent in nanoseconds */
+ ktime_t cr_sent_ns;
/** time for request really sent out */
- time_t cr_sent_out;
+ time64_t cr_sent_out;
/** when req reply unlink must finish. */
- time_t cr_reply_deadline;
+ time64_t cr_reply_deadline;
/** when req bulk unlink must finish. */
- time_t cr_bulk_deadline;
+ time64_t cr_bulk_deadline;
/** when req unlink must finish. */
- time_t cr_req_deadline;
+ time64_t cr_req_deadline;
/** Portal to which this request would be sent */
short cr_req_ptl;
/** Portal where to wait for reply and where reply would be sent */
#define rq_bulk rq_cli.cr_bulk
#define rq_delay_limit rq_cli.cr_delay_limit
#define rq_queued_time rq_cli.cr_queued_time
-#define rq_sent_tv rq_cli.cr_sent_tv
+#define rq_sent_ns rq_cli.cr_sent_ns
#define rq_real_sent rq_cli.cr_sent_out
#define rq_reply_deadline rq_cli.cr_reply_deadline
#define rq_bulk_deadline rq_cli.cr_bulk_deadline
struct ptlrpc_nrs_request sr_nrq;
/** @} nrs */
/** request arrival time */
- struct timeval sr_arrival_time;
+ struct timespec64 sr_arrival_time;
/** server's half ctx */
struct ptlrpc_svc_ctx *sr_svc_ctx;
/** (server side), pointed directly into req buffer */
/**
* when request/reply sent (secs), or time when request should be sent
*/
- time_t rq_sent;
+ time64_t rq_sent;
/** when request must finish. */
- time_t rq_deadline;
+ time64_t rq_deadline;
/** request format description */
struct req_capsule rq_pill;
};
LASSERT(req != NULL);
desc = req->rq_bulk;
- if (req->rq_bulk_deadline > cfs_time_current_sec())
+ if (req->rq_bulk_deadline > ktime_get_real_seconds())
return 1;
if (!desc)
static inline int
ptlrpc_client_replied(struct ptlrpc_request *req)
{
- if (req->rq_reply_deadline > cfs_time_current_sec())
+ if (req->rq_reply_deadline > ktime_get_real_seconds())
return 0;
return req->rq_replied;
}
static inline int
ptlrpc_client_recv(struct ptlrpc_request *req)
{
- if (req->rq_reply_deadline > cfs_time_current_sec())
+ if (req->rq_reply_deadline > ktime_get_real_seconds())
return 1;
return req->rq_receiving_reply;
}
int rc;
spin_lock(&req->rq_lock);
- if (req->rq_reply_deadline > cfs_time_current_sec()) {
+ if (req->rq_reply_deadline > ktime_get_real_seconds()) {
spin_unlock(&req->rq_lock);
return 1;
}
- if (req->rq_req_deadline > cfs_time_current_sec()) {
+ if (req->rq_req_deadline > ktime_get_real_seconds()) {
spin_unlock(&req->rq_lock);
return 1;
}
if (resends) {
req->rq_generation_set = 1;
req->rq_import_generation = generation;
- req->rq_sent = cfs_time_current_sec() + resends;
+ req->rq_sent = ktime_get_real_seconds() + resends;
}
/* It is important to obtain modify RPC slot first (if applicable), so
if (resends) {
req->rq_generation_set = 1;
req->rq_import_generation = generation;
- req->rq_sent = cfs_time_current_sec() + resends;
+ req->rq_sent = ktime_get_real_seconds() + resends;
}
level = LUSTRE_IMP_FULL;
resend:
/* cap resend delay to the current request timeout, this is similar to
* what ptlrpc does (see after_reply()) */
if (aa->aa_resends > new_req->rq_timeout)
- new_req->rq_sent = cfs_time_current_sec() + new_req->rq_timeout;
+ new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
else
- new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends;
+ new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
new_req->rq_generation_set = 1;
new_req->rq_import_generation = request->rq_import_generation;
{
unsigned int nl, oldnl;
struct imp_at *at;
- time_t now = cfs_time_current_sec();
+ time64_t now = ktime_get_real_seconds();
LASSERT(req->rq_import);
*/
CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ?
D_ADAPTTO : D_WARNING,
- "Reported service time %u > total measured time "
- CFS_DURATION_T"\n", service_time,
- cfs_time_sub(now, req->rq_sent));
+ "Reported service time %u > total measured time %lld\n",
+ service_time, now - req->rq_sent);
return;
}
static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
__must_hold(&req->rq_lock)
{
- struct ptlrpc_request *early_req;
- time_t olddl;
- int rc;
- ENTRY;
+ struct ptlrpc_request *early_req;
+ time64_t olddl;
+ int rc;
+ ENTRY;
req->rq_early = 0;
spin_unlock(&req->rq_lock);
ptlrpc_at_get_net_latency(req);
DEBUG_REQ(D_ADAPTTO, req,
- "Early reply #%d, new deadline in "CFS_DURATION_T"s "
- "("CFS_DURATION_T"s)", req->rq_early_count,
- cfs_time_sub(req->rq_deadline, cfs_time_current_sec()),
- cfs_time_sub(req->rq_deadline, olddl));
+ "Early reply #%d, new deadline in %llds (%llds)",
+ req->rq_early_count,
+ req->rq_deadline - ktime_get_real_seconds(),
+ req->rq_deadline - olddl);
RETURN(rc);
}
/* Let's setup deadline for req/reply/bulk unlink for opcode. */
if (cfs_fail_val == opcode) {
- time_t *fail_t = NULL, *fail2_t = NULL;
+ time64_t *fail_t = NULL, *fail2_t = NULL;
if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
fail_t = &request->rq_bulk_deadline;
}
if (fail_t) {
- *fail_t = cfs_time_current_sec() + LONG_UNLINK;
+ *fail_t = ktime_get_real_seconds() + LONG_UNLINK;
if (fail2_t)
- *fail2_t = cfs_time_current_sec() + LONG_UNLINK;
+ *fail2_t = ktime_get_real_seconds() +
+ LONG_UNLINK;
/* The RPC is infected, let the test to change the
* fail_loc */
*/
static int after_reply(struct ptlrpc_request *req)
{
- struct obd_import *imp = req->rq_import;
- struct obd_device *obd = req->rq_import->imp_obd;
- int rc;
- struct timeval work_start;
- __u64 committed;
- long timediff;
- ENTRY;
+ struct obd_import *imp = req->rq_import;
+ struct obd_device *obd = req->rq_import->imp_obd;
+ ktime_t work_start;
+ u64 committed;
+ s64 timediff;
+ int rc;
+ ENTRY;
LASSERT(obd != NULL);
/* repbuf must be unlinked */
LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
RETURN(0);
}
- do_gettimeofday(&work_start);
- timediff = cfs_timeval_sub(&work_start, &req->rq_sent_tv, NULL);
+ work_start = ktime_get_real();
+ timediff = ktime_us_delta(req->rq_sent_ns, work_start);
/*
* NB Until this point, the whole of the incoming message,
/* retry indefinitely on EINPROGRESS */
if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
- time_t now = cfs_time_current_sec();
+ time64_t now = ktime_get_real_seconds();
DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
spin_lock(&req->rq_lock);
pool_is_at_full_capacity())
RETURN(-ENOMEM);
- if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
+ if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
(!req->rq_generation_set ||
req->rq_import_generation == imp->imp_generation))
RETURN (0);
/* delayed resend - skip */
if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
- req->rq_sent > cfs_time_current_sec())
+ req->rq_sent > ktime_get_real_seconds())
continue;
if (!(req->rq_phase == RQ_PHASE_RPC ||
req->rq_timedout = 1;
spin_unlock(&req->rq_lock);
- DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent "CFS_DURATION_T
- "/real "CFS_DURATION_T"]",
+ DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent %lld/real %lld]",
req->rq_net_err ? "failed due to network error" :
((req->rq_real_sent == 0 ||
- cfs_time_before(req->rq_real_sent, req->rq_sent) ||
- cfs_time_aftereq(req->rq_real_sent, req->rq_deadline)) ?
+ req->rq_real_sent < req->rq_sent ||
+ req->rq_real_sent >= req->rq_deadline) ?
"timed out for sent delay" : "timed out for slow reply"),
- req->rq_sent, req->rq_real_sent);
+ (s64)req->rq_sent, (s64)req->rq_real_sent);
if (imp != NULL && obd_debug_peer_on_timeout)
LNetDebugPeer(imp->imp_connection->c_peer);
*/
int ptlrpc_expired_set(void *data)
{
- struct ptlrpc_request_set *set = data;
- struct list_head *tmp;
- time_t now = cfs_time_current_sec();
- ENTRY;
+ struct ptlrpc_request_set *set = data;
+ struct list_head *tmp;
+ time64_t now = ktime_get_real_seconds();
+ ENTRY;
LASSERT(set != NULL);
/*
*/
int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
{
- struct list_head *tmp;
- time_t now = cfs_time_current_sec();
- int timeout = 0;
- struct ptlrpc_request *req;
- int deadline;
- ENTRY;
+ struct list_head *tmp;
+ time64_t now = ktime_get_real_seconds();
+ int timeout = 0;
+ struct ptlrpc_request *req;
+ time64_t deadline;
+ ENTRY;
list_for_each(tmp, &set->set_requests) {
req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
/* Let's setup deadline for reply unlink. */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
async && request->rq_reply_deadline == 0 && cfs_fail_val == 0)
- request->rq_reply_deadline =
- cfs_time_current_sec() + LONG_UNLINK;
+ request->rq_reply_deadline = ktime_get_real_seconds() +
+ LONG_UNLINK;
/*
* Nothing left to do.
#define YEAR_2004 (1ULL << 30)
void ptlrpc_init_xid(void)
{
- time_t now = cfs_time_current_sec();
+ time64_t now = ktime_get_real_seconds();
spin_lock_init(&ptlrpc_last_xid_lock);
if (now < YEAR_2004) {
{
/* re-initialize the req */
req->rq_timeout = obd_timeout;
- req->rq_sent = cfs_time_current_sec();
+ req->rq_sent = ktime_get_real_seconds();
req->rq_deadline = req->rq_sent + req->rq_timeout;
req->rq_phase = RQ_PHASE_INTERPRET;
req->rq_next_phase = RQ_PHASE_COMPLETE;
sptlrpc_request_out_callback(req);
spin_lock(&req->rq_lock);
- req->rq_real_sent = cfs_time_current_sec();
+ req->rq_real_sent = ktime_get_real_seconds();
req->rq_req_unlinked = 1;
/* reply_in_callback happened before request_out_callback? */
if (req->rq_reply_unlinked)
static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
struct ptlrpc_request *req)
{
- __u64 sec = req->rq_arrival_time.tv_sec;
- __u32 usec = req->rq_arrival_time.tv_usec >> 4; /* usec / 16 */
- __u64 new_seq;
+ u64 sec = req->rq_arrival_time.tv_sec;
+ u32 usec = req->rq_arrival_time.tv_nsec / NSEC_PER_USEC / 16; /* usec / 16 */
+ u64 new_seq;
/* set sequence ID for request and add it to history list,
* it must be called with hold svcpt::scp_lock */
req->rq_reqbuf = ev->md.start + ev->offset;
if (ev->type == LNET_EVENT_PUT && ev->status == 0)
req->rq_reqdata_len = ev->mlength;
- do_gettimeofday(&req->rq_arrival_time);
+ ktime_get_real_ts64(&req->rq_arrival_time);
/* Multi-Rail: keep track of both initiator and source NID. */
req->rq_peer = ev->initiator;
req->rq_source = ev->source;
EXPORT_SYMBOL(ptlrpc_deactivate_import);
static unsigned int
-ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now)
+ptlrpc_inflight_deadline(struct ptlrpc_request *req, time64_t now)
{
long dl;
static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
{
- time_t now = cfs_time_current_sec();
+ time64_t now = ktime_get_real_seconds();
struct list_head *tmp, *n;
struct ptlrpc_request *req;
unsigned int timeout = 0;
rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, srhi->srhi_seq);
if (rc == 0) {
- struct timespec arrival, sent, arrivaldiff;
+ struct timespec64 arrival, sent, arrivaldiff;
char nidstr[LNET_NIDSTR_SIZE];
req = srhi->srhi_req;
libcfs_nid2str_r(req->rq_self, nidstr, sizeof(nidstr));
arrival.tv_sec = req->rq_arrival_time.tv_sec;
- arrival.tv_nsec =
- req->rq_arrival_time.tv_usec * NSEC_PER_USEC;
+ arrival.tv_nsec = req->rq_arrival_time.tv_nsec;
sent.tv_sec = req->rq_sent;
sent.tv_nsec = 0;
- arrivaldiff = timespec_sub(sent, arrival);
+ arrivaldiff = timespec64_sub(sent, arrival);
/* Print common req fields.
* CAVEAT EMPTOR: we're racing with the service handler
* here. The request could contain any old crap, so you
* must be just as careful as the service's request
* parser. Currently I only print stuff here I know is OK
- * to look at coz it was set up in request_in_callback()!!! */
- seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:"
- "%ld.%06ld:%ld.%06lds(%+ld.0s) ",
+ * to look at coz it was set up in request_in_callback()!!!
+ */
+ seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld.%06lld:%lld.%06llds(%+lld.0s) ",
req->rq_history_seq, nidstr,
libcfs_id2str(req->rq_peer), req->rq_xid,
req->rq_reqlen, ptlrpc_rqphase2str(req),
- req->rq_arrival_time.tv_sec,
- req->rq_arrival_time.tv_usec,
- arrivaldiff.tv_sec,
- arrivaldiff.tv_nsec / NSEC_PER_USEC,
- req->rq_sent - req->rq_deadline);
+ (s64)req->rq_arrival_time.tv_sec,
+ (s64)(req->rq_arrival_time.tv_nsec / NSEC_PER_USEC),
+ (s64)arrivaldiff.tv_sec,
+ (s64)(arrivaldiff.tv_nsec / NSEC_PER_USEC),
+ (s64)(req->rq_sent - req->rq_deadline));
if (svc->srv_ops.so_req_printer == NULL)
seq_printf(s, "\n");
else
/* Let's setup deadline for reply unlink. */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
async && req->rq_bulk_deadline == 0 && cfs_fail_val == 0)
- req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
+ req->rq_bulk_deadline = ktime_get_real_seconds() + LONG_UNLINK;
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
RETURN(1); /* never registered */
{
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
struct ptlrpc_service *svc = svcpt->scp_service;
- int service_time = max_t(int, cfs_time_current_sec() -
+ int service_time = max_t(int, ktime_get_real_seconds() -
req->rq_arrival_time.tv_sec, 1);
if (!(flags & PTLRPC_REPLY_EARLY) &&
if (unlikely(rc))
goto out;
- req->rq_sent = cfs_time_current_sec();
+ req->rq_sent = ktime_get_real_seconds();
rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
(rs->rs_difficult && !rs->rs_no_ack) ?
OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
- do_gettimeofday(&request->rq_sent_tv);
- request->rq_sent = cfs_time_current_sec();
+ request->rq_sent_ns = ktime_get_real();
+ request->rq_sent = ktime_get_real_seconds();
/* We give the server rq_timeout secs to process the req, and
add the network latency for our local timeout. */
request->rq_deadline = request->rq_sent + request->rq_timeout +
if (rc == -ENOMEM) {
/* set rq_sent so that this request is treated
* as a delayed send in the upper layers */
- request->rq_sent = cfs_time_current_sec();
+ request->rq_sent = ktime_get_real_seconds();
}
if (request->rq_memalloc)
va_start(args, fmt);
libcfs_debug_vmsg2(msgdata, fmt, args,
- " req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d"
- " lens %d/%d e %d to %d dl "CFS_TIME_T" ref %d "
- "fl "REQ_FLAGS_FMT"/%x/%x rc %d/%d\n",
+ " req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d lens %d/%d e %d to %lld dl %lld ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d\n",
req, req->rq_xid, req->rq_transno,
req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0,
req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1,
libcfs_nid2str(nid),
req->rq_request_portal, req->rq_reply_portal,
req->rq_reqlen, req->rq_replen,
- req->rq_early_count, req->rq_timedout,
- req->rq_deadline,
+ req->rq_early_count, (s64)req->rq_timedout,
+ (s64)req->rq_deadline,
atomic_read(&req->rq_refcount),
DEBUG_REQ_FLAGS(req),
req_ok ? lustre_msg_get_flags(req->rq_reqmsg) : -1,
}
/* Set timer for closest deadline */
- next = (__s32)(array->paa_deadline - cfs_time_current_sec() -
+ next = (__s32)(array->paa_deadline - ktime_get_real_seconds() -
at_early_margin);
if (next <= 0) {
ptlrpc_at_timer((unsigned long)svcpt);
spin_lock(&svcpt->scp_at_lock);
LASSERT(list_empty(&req->rq_timed_list));
- index = (unsigned long)req->rq_deadline % array->paa_size;
+ div_u64_rem(req->rq_deadline, array->paa_size, &index);
if (array->paa_reqs_count[index] > 0) {
/* latest rpcs will have the latest deadlines in the list,
* so search backward. */
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
struct ptlrpc_request *reqcopy;
struct lustre_msg *reqmsg;
- cfs_duration_t olddl = req->rq_deadline - cfs_time_current_sec();
- time_t newdl;
+ time64_t olddl = req->rq_deadline - ktime_get_real_seconds();
+ time64_t newdl;
int rc;
ENTRY;
/* deadline is when the client expects us to reply, margin is the
difference between clients' and servers' expectations */
DEBUG_REQ(D_ADAPTTO, req,
- "%ssending early reply (deadline %+lds, margin %+lds) for "
+ "%ssending early reply (deadline %+llds, margin %+llds) for "
"%d+%d", AT_OFF ? "AT off - not " : "",
- olddl, olddl - at_get(&svcpt->scp_at_estimate),
+ (s64)olddl, (s64)(olddl - at_get(&svcpt->scp_at_estimate)),
at_get(&svcpt->scp_at_estimate), at_extra);
if (AT_OFF)
RETURN(0);
if (olddl < 0) {
- DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), "
+ DEBUG_REQ(D_WARNING, req, "Already past deadline (%+llds), "
"not sending early reply. Consider increasing "
- "at_early_margin (%d)?", olddl, at_early_margin);
+ "at_early_margin (%d)?", (s64)olddl, at_early_margin);
/* Return an error so we're not re-added to the timed list. */
RETURN(-ETIMEDOUT);
* account for network latency). See ptlrpc_at_recv_early_reply
*/
at_measured(&svcpt->scp_at_estimate, at_extra +
- cfs_time_current_sec() -
+ ktime_get_real_seconds() -
req->rq_arrival_time.tv_sec);
newdl = req->rq_arrival_time.tv_sec +
at_get(&svcpt->scp_at_estimate);
/* Check to see if we've actually increased the deadline -
* we may be past adaptive_max */
if (req->rq_deadline >= newdl) {
- DEBUG_REQ(D_WARNING, req, "Couldn't add any time "
- "(%ld/%ld), not sending early reply\n",
- olddl, newdl - cfs_time_current_sec());
+ DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%lld/%lld), not sending early reply\n",
+ (s64)olddl, (s64)(newdl - ktime_get_real_seconds()));
RETURN(-ETIMEDOUT);
}
struct ptlrpc_request *rq, *n;
struct list_head work_list;
__u32 index, count;
- time_t deadline;
- time_t now = cfs_time_current_sec();
+ time64_t deadline;
+ time64_t now = ktime_get_real_seconds();
cfs_duration_t delay;
int first, counter = 0;
ENTRY;
server will take. Send early replies to everyone expiring soon. */
INIT_LIST_HEAD(&work_list);
deadline = -1;
- index = (unsigned long)array->paa_deadline % array->paa_size;
+ div_u64_rem(array->paa_deadline, array->paa_size, &index);
count = array->paa_count;
while (count > 0) {
count -= array->paa_reqs_count[index];
LCONSOLE_WARN("%s: This server is not able to keep up with "
"request traffic (cpu-bound).\n",
svcpt->scp_service->srv_name);
- CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, "
- "delay="CFS_DURATION_T"(jiff)\n",
+ CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=%ld(jiff)\n",
counter, svcpt->scp_nreqs_incoming,
svcpt->scp_nreqs_active,
at_get(&svcpt->scp_at_estimate), delay);
}
/* req_in handling should/must be fast */
- if (cfs_time_current_sec() - req->rq_arrival_time.tv_sec > 5)
- DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s",
- cfs_time_sub(cfs_time_current_sec(),
- req->rq_arrival_time.tv_sec));
+ if (ktime_get_real_seconds() - req->rq_arrival_time.tv_sec > 5)
+ DEBUG_REQ(D_WARNING, req, "Slow req_in handling %llds",
+ (s64)(ktime_get_real_seconds() -
+ req->rq_arrival_time.tv_sec));
/* Set rpc server deadline and add it to the timed list */
deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) &
ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
struct ptlrpc_thread *thread)
{
- struct ptlrpc_service *svc = svcpt->scp_service;
- struct ptlrpc_request *request;
- struct timeval work_start;
- struct timeval work_end;
- long timediff;
- int fail_opc = 0;
+ struct ptlrpc_service *svc = svcpt->scp_service;
+ struct ptlrpc_request *request;
+ ktime_t work_start;
+ ktime_t work_end;
+ ktime_t arrived;
+ s64 timediff_usecs;
+ s64 arrived_usecs;
+ int fail_opc = 0;
ENTRY;
if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
libcfs_debug_dumplog();
- do_gettimeofday(&work_start);
- timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
+ work_start = ktime_get_real();
+ arrived = timespec64_to_ktime(request->rq_arrival_time);
+ timediff_usecs = ktime_us_delta(arrived, work_start);
if (likely(svc->srv_stats != NULL)) {
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
- timediff);
+ timediff_usecs);
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
svcpt->scp_nreqs_incoming);
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
if (likely(request->rq_export)) {
if (unlikely(ptlrpc_check_req(request)))
goto put_conn;
- ptlrpc_update_export_timer(request->rq_export, timediff >> 19);
+ ptlrpc_update_export_timer(request->rq_export,
+ timediff_usecs >> 19);
}
/* Discard requests queued for longer than the deadline.
The deadline is increased if we send an early reply. */
- if (cfs_time_current_sec() > request->rq_deadline) {
- DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s"
- ": deadline "CFS_DURATION_T":"CFS_DURATION_T"s ago\n",
+ if (ktime_get_real_seconds() > request->rq_deadline) {
+ DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline %lld:%llds ago\n",
libcfs_id2str(request->rq_peer),
- cfs_time_sub(request->rq_deadline,
- request->rq_arrival_time.tv_sec),
- cfs_time_sub(cfs_time_current_sec(),
- request->rq_deadline));
+ request->rq_deadline -
+ request->rq_arrival_time.tv_sec,
+ ktime_get_real_seconds() - request->rq_deadline);
goto put_conn;
}
ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
put_conn:
- if (unlikely(cfs_time_current_sec() > request->rq_deadline)) {
- DEBUG_REQ(D_WARNING, request, "Request took longer "
- "than estimated ("CFS_DURATION_T":"CFS_DURATION_T"s);"
- " client may timeout.",
- cfs_time_sub(request->rq_deadline,
- request->rq_arrival_time.tv_sec),
- cfs_time_sub(cfs_time_current_sec(),
- request->rq_deadline));
- }
-
- do_gettimeofday(&work_end);
- timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
- CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
- "%s:%s+%d:%d:x%llu:%s:%d Request procesed in "
- "%ldus (%ldus total) trans %llu rc %d/%d\n",
+ if (unlikely(ktime_get_real_seconds() > request->rq_deadline)) {
+ DEBUG_REQ(D_WARNING, request, "Request took longer than estimated (%lld:%llds); client may timeout.",
+ request->rq_deadline -
+ request->rq_arrival_time.tv_sec,
+ ktime_get_real_seconds() - request->rq_deadline);
+ }
+
+ work_end = ktime_get_real();
+ timediff_usecs = ktime_us_delta(work_end, work_start);
+ arrived_usecs = ktime_us_delta(work_end, arrived);
+ CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d Request procesed in %lldus (%lldus total) trans %llu rc %d/%d\n",
current_comm(),
(request->rq_export ?
(char *)request->rq_export->exp_client_uuid.uuid : "0"),
request->rq_xid,
libcfs_id2str(request->rq_peer),
lustre_msg_get_opc(request->rq_reqmsg),
- timediff,
- cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
+ timediff_usecs,
+ arrived_usecs,
(request->rq_repmsg ?
lustre_msg_get_transno(request->rq_repmsg) :
request->rq_transno),
LASSERT(opc < LUSTRE_MAX_OPCODES);
lprocfs_counter_add(svc->srv_stats,
opc + EXTRA_MAX_OPCODES,
- timediff);
+ timediff_usecs);
}
}
if (unlikely(request->rq_early_count)) {
DEBUG_REQ(D_ADAPTTO, request,
- "sent %d early replies before finishing in "
- CFS_DURATION_T"s",
+ "sent %d early replies before finishing in %llds",
request->rq_early_count,
- cfs_time_sub(work_end.tv_sec,
- request->rq_arrival_time.tv_sec));
+ arrived_usecs / USEC_PER_SEC);
}
ptlrpc_server_finish_active_request(svcpt, request);
* to be shot, so it's intentionally non-aggressive. */
static int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
{
- struct ptlrpc_request *request = NULL;
- struct timeval right_now;
- long timediff;
+ struct ptlrpc_request *request = NULL;
+ struct timespec64 right_now;
+ struct timespec64 timediff;
- do_gettimeofday(&right_now);
+ ktime_get_real_ts64(&right_now);
spin_lock(&svcpt->scp_req_lock);
/* How long has the next entry been waiting? */
return 0;
}
- timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
+ timediff = timespec64_sub(right_now, request->rq_arrival_time);
spin_unlock(&svcpt->scp_req_lock);
- if ((timediff / ONE_MILLION) >
+ if ((timediff.tv_sec) >
(AT_OFF ? obd_timeout * 3 / 2 : at_max)) {
- CERROR("%s: unhealthy - request has been waiting %lds\n",
- svcpt->scp_service->srv_name, timediff / ONE_MILLION);
+ CERROR("%s: unhealthy - request has been waiting %llds\n",
+ svcpt->scp_service->srv_name, (s64)timediff.tv_sec);
return -1;
}
* If getting the lock took more time than
* client was willing to wait, drop it. b=11330
*/
- if (cfs_time_current_sec() > req->rq_deadline ||
+ if (ktime_get_real_seconds() > req->rq_deadline ||
OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
no_reply = 1;
- CERROR("Dropping timed-out read from %s because locking"
- "object "DOSTID" took %ld seconds (limit was %ld).\n",
+ CERROR("Dropping timed-out read from %s because locking object " DOSTID " took %lld seconds (limit was %lld).\n",
libcfs_id2str(req->rq_peer), POSTID(&ioo->ioo_oid),
- cfs_time_current_sec() - req->rq_arrival_time.tv_sec,
+ ktime_get_real_seconds() - req->rq_arrival_time.tv_sec,
req->rq_deadline - req->rq_arrival_time.tv_sec);
GOTO(out_lock, rc = -ETIMEDOUT);
}
* If getting the lock took more time than
* client was willing to wait, drop it. b=11330
*/
- if (cfs_time_current_sec() > req->rq_deadline ||
+ if (ktime_get_real_seconds() > req->rq_deadline ||
OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
no_reply = true;
- CERROR("%s: Dropping timed-out write from %s because locking "
- "object "DOSTID" took %ld seconds (limit was %ld).\n",
+ CERROR("%s: Dropping timed-out write from %s because locking object " DOSTID " took %lld seconds (limit was %lld).\n",
tgt_name(tsi->tsi_tgt), libcfs_id2str(req->rq_peer),
POSTID(&ioo->ioo_oid),
- cfs_time_current_sec() - req->rq_arrival_time.tv_sec,
+ ktime_get_real_seconds() - req->rq_arrival_time.tv_sec,
req->rq_deadline - req->rq_arrival_time.tv_sec);
GOTO(out_lock, rc = -ETIMEDOUT);
}