* GPL HEADER END
*/
/*
- * Copyright (c) 2014, Intel Corporation.
+ * Copyright (c) 2014, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LNET
#include <lnet/lib-lnet.h>
-#include <lnet/lnetctl.h>
+#include <uapi/linux/lnet/lnetctl.h>
#define LNET_MSG_MASK (LNET_PUT_BIT | LNET_ACK_BIT | \
LNET_GET_BIT | LNET_REPLY_BIT)
/**
* seconds to drop the next message, it's exclusive with dr_drop_at
*/
- cfs_time_t dr_drop_time;
+ time64_t dr_drop_time;
/** baseline to caculate dr_drop_time */
- cfs_time_t dr_time_base;
+ time64_t dr_time_base;
/** statistic of dropped messages */
struct lnet_fault_stat dr_stat;
};
rule->dr_attr = *attr;
if (attr->u.drop.da_interval != 0) {
- rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval);
- rule->dr_drop_time = cfs_time_shift(cfs_rand() %
- attr->u.drop.da_interval);
+ rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
+ rule->dr_drop_time = ktime_get_seconds() +
+ cfs_rand() % attr->u.drop.da_interval;
} else {
rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
}
if (attr->u.drop.da_rate != 0) {
rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
} else {
- rule->dr_drop_time = cfs_time_shift(cfs_rand() %
- attr->u.drop.da_interval);
- rule->dr_time_base = cfs_time_shift(attr->u.drop.
- da_interval);
+ rule->dr_drop_time = ktime_get_seconds() +
+ cfs_rand() % attr->u.drop.da_interval;
+ rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
}
spin_unlock(&rule->dr_lock);
}
/* match this rule, check drop rate now */
spin_lock(&rule->dr_lock);
if (rule->dr_drop_time != 0) { /* time based drop */
- cfs_time_t now = cfs_time_current();
+ time64_t now = ktime_get_seconds();
rule->dr_stat.fs_count++;
- drop = cfs_time_aftereq(now, rule->dr_drop_time);
+ drop = now >= rule->dr_drop_time;
if (drop) {
- if (cfs_time_after(now, rule->dr_time_base))
+ if (now > rule->dr_time_base)
rule->dr_time_base = now;
rule->dr_drop_time = rule->dr_time_base +
- cfs_time_seconds(cfs_rand() %
- attr->u.drop.da_interval);
- rule->dr_time_base += cfs_time_seconds(attr->u.drop.
- da_interval);
-
- CDEBUG(D_NET, "Drop Rule %s->%s: next drop : "
- CFS_TIME_T"\n",
- libcfs_nid2str(attr->fa_src),
- libcfs_nid2str(attr->fa_dst),
- rule->dr_drop_time);
+ cfs_rand() % attr->u.drop.da_interval;
+ rule->dr_time_base += attr->u.drop.da_interval;
+
+ CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lld\n",
+ libcfs_nid2str(attr->fa_src),
+ libcfs_nid2str(attr->fa_dst),
+ rule->dr_drop_time);
}
} else { /* rate based drop */
* Check if message from \a src to \a dst can match any existed drop rule
*/
bool
-lnet_drop_rule_match(lnet_hdr_t *hdr)
+lnet_drop_rule_match(struct lnet_hdr *hdr)
{
struct lnet_drop_rule *rule;
lnet_nid_t src = le64_to_cpu(hdr->src_nid);
/**
* seconds to delay the next message, it's exclusive with dl_delay_at
*/
- cfs_time_t dl_delay_time;
+ time64_t dl_delay_time;
/** baseline to caculate dl_delay_time */
- cfs_time_t dl_time_base;
+ time64_t dl_time_base;
/** jiffies to send the next delayed message */
unsigned long dl_msg_send;
/** delayed message list */
static struct delay_daemon_data delay_dd;
-static cfs_time_t
-round_timeout(cfs_time_t timeout)
-{
- return cfs_time_seconds((unsigned int)
- cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
-}
-
static void
delay_rule_decref(struct lnet_delay_rule *rule)
{
/* match this rule, check delay rate now */
spin_lock(&rule->dl_lock);
if (rule->dl_delay_time != 0) { /* time based delay */
- cfs_time_t now = cfs_time_current();
+ time64_t now = ktime_get_seconds();
rule->dl_stat.fs_count++;
- delay = cfs_time_aftereq(now, rule->dl_delay_time);
+ delay = now >= rule->dl_delay_time;
if (delay) {
- if (cfs_time_after(now, rule->dl_time_base))
+ if (now > rule->dl_time_base)
rule->dl_time_base = now;
rule->dl_delay_time = rule->dl_time_base +
- cfs_time_seconds(cfs_rand() %
- attr->u.delay.la_interval);
- rule->dl_time_base += cfs_time_seconds(attr->u.delay.
- la_interval);
-
- CDEBUG(D_NET, "Delay Rule %s->%s: next delay : "
- CFS_TIME_T"\n",
- libcfs_nid2str(attr->fa_src),
- libcfs_nid2str(attr->fa_dst),
- rule->dl_delay_time);
+ cfs_rand() % attr->u.delay.la_interval;
+ rule->dl_time_base += attr->u.delay.la_interval;
+
+ CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lld\n",
+ libcfs_nid2str(attr->fa_src),
+ libcfs_nid2str(attr->fa_dst),
+ rule->dl_delay_time);
}
} else { /* rate based delay */
rule->dl_stat.u.delay.ls_delayed++;
list_add_tail(&msg->msg_list, &rule->dl_msg_list);
- msg->msg_delay_send = round_timeout(
- cfs_time_shift(attr->u.delay.la_latency));
+ msg->msg_delay_send = ktime_get_seconds() + attr->u.delay.la_latency;
if (rule->dl_msg_send == -1) {
rule->dl_msg_send = msg->msg_delay_send;
mod_timer(&rule->dl_timer, rule->dl_msg_send);
* will be delayed if there is a match.
*/
bool
-lnet_delay_rule_match_locked(lnet_hdr_t *hdr, struct lnet_msg *msg)
+lnet_delay_rule_match_locked(struct lnet_hdr *hdr, struct lnet_msg *msg)
{
struct lnet_delay_rule *rule;
lnet_nid_t src = le64_to_cpu(hdr->src_nid);
{
struct lnet_msg *msg;
struct lnet_msg *tmp;
- unsigned long now = cfs_time_current();
+ time64_t now = ktime_get_seconds();
- if (!all && rule->dl_msg_send > now)
+ if (!all && cfs_time_seconds(rule->dl_msg_send) > now)
return;
spin_lock(&rule->dl_lock);
msg = list_entry(msg_list->next, struct lnet_msg, msg_list);
LASSERT(msg->msg_rxpeer != NULL);
+ LASSERT(msg->msg_rxni != NULL);
- ni = msg->msg_rxpeer->lp_ni;
+ ni = msg->msg_rxni;
cpt = msg->msg_rx_cpt;
list_del_init(&msg->msg_list);
}
}
- lnet_drop_message(ni, cpt, msg->msg_private, msg->msg_len);
- lnet_finalize(ni, msg, rc);
+ lnet_drop_message(ni, cpt, msg->msg_private, msg->msg_len,
+ msg->msg_type);
+ lnet_finalize(msg, rc);
}
}
}
static void
-delay_timer_cb(unsigned long arg)
+delay_timer_cb(cfs_timer_cb_arg_t data)
{
- struct lnet_delay_rule *rule = (struct lnet_delay_rule *)arg;
+ struct lnet_delay_rule *rule = cfs_from_timer(rule, data, dl_timer);
spin_lock_bh(&delay_dd.dd_lock);
if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
}
- init_timer(&rule->dl_timer);
- rule->dl_timer.function = delay_timer_cb;
- rule->dl_timer.data = (unsigned long)rule;
+ cfs_timer_setup(&rule->dl_timer, delay_timer_cb,
+ (unsigned long)rule, 0);
spin_lock_init(&rule->dl_lock);
INIT_LIST_HEAD(&rule->dl_msg_list);
rule->dl_attr = *attr;
if (attr->u.delay.la_interval != 0) {
- rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval);
- rule->dl_delay_time = cfs_time_shift(cfs_rand() %
- attr->u.delay.la_interval);
+ rule->dl_time_base = ktime_get_seconds() +
+ attr->u.delay.la_interval;
+ rule->dl_delay_time = ktime_get_seconds() +
+ cfs_rand() % attr->u.delay.la_interval;
} else {
rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
}
if (attr->u.delay.la_rate != 0) {
rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
} else {
- rule->dl_delay_time = cfs_time_shift(cfs_rand() %
- attr->u.delay.la_interval);
- rule->dl_time_base = cfs_time_shift(attr->u.delay.
- la_interval);
+ rule->dl_delay_time = ktime_get_seconds() +
+ cfs_rand() % attr->u.delay.la_interval;
+ rule->dl_time_base = ktime_get_seconds() +
+ attr->u.delay.la_interval;
}
spin_unlock(&rule->dl_lock);
}