-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2014, 2017, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- *
- * lnet/lnet/net_fault.c
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2014, 2017, Intel Corporation. */
+
+/* This file is part of Lustre, http://www.lustre.org/
*
* Lustre network fault simulation
*
struct lnet_drop_rule {
/** link chain on the_lnet.ln_drop_rules */
- struct list_head dr_link;
+ struct list_head dr_link;
/** attributes of this rule */
- struct lnet_fault_attr dr_attr;
+ struct lnet_fault_large_attr dr_attr;
/** lock to protect \a dr_drop_at and \a dr_stat */
- spinlock_t dr_lock;
+ spinlock_t dr_lock;
/**
* the message sequence to drop, which means message is dropped when
* dr_stat.drs_count == dr_drop_at
*/
- unsigned long dr_drop_at;
+ unsigned long dr_drop_at;
/**
* seconds to drop the next message, it's exclusive with dr_drop_at
*/
- time64_t dr_drop_time;
+ time64_t dr_drop_time;
/** baseline to caculate dr_drop_time */
- time64_t dr_time_base;
+ time64_t dr_time_base;
/** statistic of dropped messages */
- struct lnet_fault_stat dr_stat;
+ struct lnet_fault_stat dr_stat;
};
+static void
+lnet_fault_attr_to_attr4(struct lnet_fault_large_attr *attr,
+ struct lnet_fault_attr *attr4)
+{
+ if (!attr)
+ return;
+
+ attr4->fa_src = lnet_nid_to_nid4(&attr->fa_src);
+ attr4->fa_dst = lnet_nid_to_nid4(&attr->fa_dst);
+ attr4->fa_local_nid = lnet_nid_to_nid4(&attr->fa_local_nid);
+ attr4->fa_ptl_mask = attr->fa_ptl_mask;
+ attr4->fa_msg_mask = attr->fa_msg_mask;
+
+ memcpy(&attr4->u, &attr->u, sizeof(attr4->u));
+}
+
+static void
+lnet_fault_attr4_to_attr(struct lnet_fault_attr *attr4,
+ struct lnet_fault_large_attr *attr)
+{
+ if (!attr4)
+ return;
+
+ if (attr4->fa_src)
+ lnet_nid4_to_nid(attr4->fa_src, &attr->fa_src);
+ else
+ attr->fa_src = LNET_ANY_NID;
+
+ if (attr4->fa_dst)
+ lnet_nid4_to_nid(attr4->fa_dst, &attr->fa_dst);
+ else
+ attr->fa_dst = LNET_ANY_NID;
+
+ if (attr4->fa_local_nid)
+ lnet_nid4_to_nid(attr4->fa_local_nid, &attr->fa_local_nid);
+ else
+ attr->fa_local_nid = LNET_ANY_NID;
+
+ attr->fa_ptl_mask = attr4->fa_ptl_mask;
+ attr->fa_msg_mask = attr4->fa_msg_mask;
+
+ memcpy(&attr->u, &attr4->u, sizeof(attr->u));
+}
+
static bool
-lnet_fault_nid_match(lnet_nid_t nid, lnet_nid_t msg_nid)
+lnet_fault_nid_match(struct lnet_nid *nid, struct lnet_nid *msg_nid)
{
- if (nid == msg_nid || nid == LNET_NID_ANY)
+ if (LNET_NID_IS_ANY(nid))
+ return true;
+ if (!msg_nid)
+ return false;
+ if (nid_same(msg_nid, nid))
return true;
- if (LNET_NIDNET(nid) != LNET_NIDNET(msg_nid))
+ if (LNET_NID_NET(nid) != LNET_NID_NET(msg_nid))
return false;
/* 255.255.255.255@net is wildcard for all addresses in a network */
- return LNET_NIDADDR(nid) == LNET_NIDADDR(LNET_NID_ANY);
+ return __be32_to_cpu(nid->nid_addr[0]) == LNET_NIDADDR(LNET_NID_ANY);
}
static bool
-lnet_fault_attr_match(struct lnet_fault_attr *attr, lnet_nid_t src,
- lnet_nid_t local_nid, lnet_nid_t dst,
+lnet_fault_attr_match(struct lnet_fault_large_attr *attr,
+ struct lnet_nid *src,
+ struct lnet_nid *local_nid,
+ struct lnet_nid *dst,
unsigned int type, unsigned int portal)
{
- if (!lnet_fault_nid_match(attr->fa_src, src) ||
- !lnet_fault_nid_match(attr->fa_dst, dst) ||
- !lnet_fault_nid_match(attr->fa_local_nid, local_nid))
+ if (!lnet_fault_nid_match(&attr->fa_src, src) ||
+ !lnet_fault_nid_match(&attr->fa_dst, dst) ||
+ !lnet_fault_nid_match(&attr->fa_local_nid, local_nid))
return false;
if (!(attr->fa_msg_mask & BIT(type)))
}
static int
-lnet_fault_attr_validate(struct lnet_fault_attr *attr)
+lnet_fault_attr_validate(struct lnet_fault_large_attr *attr)
{
if (attr->fa_msg_mask == 0)
attr->fa_msg_mask = LNET_MSG_MASK; /* all message types */
* incoming message.
*/
static int
-lnet_drop_rule_add(struct lnet_fault_attr *attr)
+lnet_drop_rule_add(struct lnet_fault_large_attr *attr)
{
struct lnet_drop_rule *rule;
ENTRY;
if (attr->u.drop.da_interval != 0) {
rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
rule->dr_drop_time = ktime_get_seconds() +
- prandom_u32_max(attr->u.drop.da_interval);
+ get_random_u32_below(attr->u.drop.da_interval);
} else {
- rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
+ rule->dr_drop_at = get_random_u32_below(attr->u.drop.da_rate);
}
lnet_net_lock(LNET_LOCK_EX);
lnet_net_unlock(LNET_LOCK_EX);
CDEBUG(D_NET, "Added drop rule: src %s, dst %s, rate %d, interval %d\n",
- libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
+ libcfs_nidstr(&attr->fa_src), libcfs_nidstr(&attr->fa_dst),
attr->u.drop.da_rate, attr->u.drop.da_interval);
RETURN(0);
}
* If both of them are zero, all rules will be removed
*/
static int
-lnet_drop_rule_del(lnet_nid_t src, lnet_nid_t dst)
+lnet_drop_rule_del(struct lnet_nid *src, struct lnet_nid *dst)
{
struct lnet_drop_rule *rule;
struct lnet_drop_rule *tmp;
lnet_net_lock(LNET_LOCK_EX);
list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) {
- if (rule->dr_attr.fa_src != src && src != 0)
+ if (!(LNET_NID_IS_ANY(src) || nid_same(&rule->dr_attr.fa_src, src)))
continue;
- if (rule->dr_attr.fa_dst != dst && dst != 0)
+ if (!(LNET_NID_IS_ANY(dst) || nid_same(&rule->dr_attr.fa_dst, dst)))
continue;
list_move(&rule->dr_link, &zombies);
list_for_each_entry_safe(rule, tmp, &zombies, dr_link) {
CDEBUG(D_NET, "Remove drop rule: src %s->dst: %s (1/%d, %d)\n",
- libcfs_nid2str(rule->dr_attr.fa_src),
- libcfs_nid2str(rule->dr_attr.fa_dst),
+ libcfs_nidstr(&rule->dr_attr.fa_src),
+ libcfs_nidstr(&rule->dr_attr.fa_dst),
rule->dr_attr.u.drop.da_rate,
rule->dr_attr.u.drop.da_interval);
* List drop rule at position of \a pos
*/
static int
-lnet_drop_rule_list(int pos, struct lnet_fault_attr *attr,
+lnet_drop_rule_list(int pos, struct lnet_fault_large_attr *attr,
struct lnet_fault_stat *stat)
{
struct lnet_drop_rule *rule;
cpt = lnet_net_lock_current();
list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
- struct lnet_fault_attr *attr = &rule->dr_attr;
+ struct lnet_fault_large_attr *attr = &rule->dr_attr;
spin_lock(&rule->dr_lock);
memset(&rule->dr_stat, 0, sizeof(rule->dr_stat));
if (attr->u.drop.da_rate != 0) {
- rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
+ rule->dr_drop_at = get_random_u32_below(attr->u.drop.da_rate);
} else {
rule->dr_drop_time = ktime_get_seconds() +
- prandom_u32_max(attr->u.drop.da_interval);
+ get_random_u32_below(attr->u.drop.da_interval);
rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
}
spin_unlock(&rule->dr_lock);
int i;
/* assign a random failure */
- choice = prandom_u32_max(LNET_MSG_STATUS_END - LNET_MSG_STATUS_OK);
+ choice = get_random_u32_below(LNET_MSG_STATUS_END - LNET_MSG_STATUS_OK);
if (choice == 0)
choice++;
* decide whether should drop this message or not
*/
static bool
-drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
- lnet_nid_t local_nid, lnet_nid_t dst,
+drop_rule_match(struct lnet_drop_rule *rule,
+ struct lnet_nid *src,
+ struct lnet_nid *local_nid,
+ struct lnet_nid *dst,
unsigned int type, unsigned int portal,
enum lnet_msg_hstatus *hstatus)
{
- struct lnet_fault_attr *attr = &rule->dr_attr;
- bool drop;
+ struct lnet_fault_large_attr *attr = &rule->dr_attr;
+ bool drop;
if (!lnet_fault_attr_match(attr, src, local_nid, dst, type, portal))
return false;
/* match this rule, check drop rate now */
spin_lock(&rule->dr_lock);
if (attr->u.drop.da_random) {
- int value = prandom_u32_max(attr->u.drop.da_interval);
+ int value = get_random_u32_below(attr->u.drop.da_interval);
if (value >= (attr->u.drop.da_interval / 2))
drop = true;
else
rule->dr_time_base = now;
rule->dr_drop_time = rule->dr_time_base +
- prandom_u32_max(attr->u.drop.da_interval);
+ get_random_u32_below(attr->u.drop.da_interval);
rule->dr_time_base += attr->u.drop.da_interval;
CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lld\n",
- libcfs_nid2str(attr->fa_src),
- libcfs_nid2str(attr->fa_dst),
+ libcfs_nidstr(&attr->fa_src),
+ libcfs_nidstr(&attr->fa_dst),
rule->dr_drop_time);
}
count = rule->dr_stat.fs_count;
if (do_div(count, attr->u.drop.da_rate) == 0) {
rule->dr_drop_at = rule->dr_stat.fs_count +
- prandom_u32_max(attr->u.drop.da_rate);
+ get_random_u32_below(attr->u.drop.da_rate);
CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
- libcfs_nid2str(attr->fa_src),
- libcfs_nid2str(attr->fa_dst), rule->dr_drop_at);
+ libcfs_nidstr(&attr->fa_src),
+ libcfs_nidstr(&attr->fa_dst), rule->dr_drop_at);
}
}
*/
bool
lnet_drop_rule_match(struct lnet_hdr *hdr,
- lnet_nid_t local_nid,
+ struct lnet_nid *local_nid,
enum lnet_msg_hstatus *hstatus)
{
- lnet_nid_t src = le64_to_cpu(hdr->src_nid);
- lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
- unsigned int typ = le32_to_cpu(hdr->type);
+ unsigned int typ = hdr->type;
struct lnet_drop_rule *rule;
unsigned int ptl = -1;
bool drop = false;
cpt = lnet_net_lock_current();
list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
- drop = drop_rule_match(rule, src, local_nid, dst, typ, ptl,
+ drop = drop_rule_match(rule, &hdr->src_nid, local_nid,
+ &hdr->dest_nid, typ, ptl,
hstatus);
if (drop)
break;
struct lnet_delay_rule {
/** link chain on the_lnet.ln_delay_rules */
- struct list_head dl_link;
+ struct list_head dl_link;
/** link chain on delay_dd.dd_sched_rules */
- struct list_head dl_sched_link;
+ struct list_head dl_sched_link;
/** attributes of this rule */
- struct lnet_fault_attr dl_attr;
+ struct lnet_fault_large_attr dl_attr;
/** lock to protect \a below members */
- spinlock_t dl_lock;
+ spinlock_t dl_lock;
/** refcount of delay rule */
- atomic_t dl_refcount;
+ atomic_t dl_refcount;
/**
* the message sequence to delay, which means message is delayed when
* dl_stat.fs_count == dl_delay_at
*/
- unsigned long dl_delay_at;
+ unsigned long dl_delay_at;
/**
* seconds to delay the next message, it's exclusive with dl_delay_at
*/
- time64_t dl_delay_time;
+ time64_t dl_delay_time;
/** baseline to caculate dl_delay_time */
- time64_t dl_time_base;
+ time64_t dl_time_base;
/** seconds until we send the next delayed message */
- time64_t dl_msg_send;
+ time64_t dl_msg_send;
/** delayed message list */
- struct list_head dl_msg_list;
+ struct list_head dl_msg_list;
/** statistic of delayed messages */
- struct lnet_fault_stat dl_stat;
+ struct lnet_fault_stat dl_stat;
/** timer to wakeup delay_daemon */
- struct timer_list dl_timer;
+ struct timer_list dl_timer;
};
struct delay_daemon_data {
* decide whether should delay this message or not
*/
static bool
-delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
- lnet_nid_t dst, unsigned int type, unsigned int portal,
- struct lnet_msg *msg)
+delay_rule_match(struct lnet_delay_rule *rule, struct lnet_nid *src,
+ struct lnet_nid *dst, unsigned int type, unsigned int portal,
+ struct lnet_msg *msg)
{
- struct lnet_fault_attr *attr = &rule->dl_attr;
+ struct lnet_fault_large_attr *attr = &rule->dl_attr;
bool delay;
time64_t now = ktime_get_seconds();
- if (!lnet_fault_attr_match(attr, src, LNET_NID_ANY,
+ if (!lnet_fault_attr_match(attr, src, NULL,
dst, type, portal))
return false;
rule->dl_time_base = now;
rule->dl_delay_time = rule->dl_time_base +
- prandom_u32_max(attr->u.delay.la_interval);
+ get_random_u32_below(attr->u.delay.la_interval);
rule->dl_time_base += attr->u.delay.la_interval;
CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lld\n",
- libcfs_nid2str(attr->fa_src),
- libcfs_nid2str(attr->fa_dst),
+ libcfs_nidstr(&attr->fa_src),
+ libcfs_nidstr(&attr->fa_dst),
rule->dl_delay_time);
}
count = rule->dl_stat.fs_count;
if (do_div(count, attr->u.delay.la_rate) == 0) {
rule->dl_delay_at = rule->dl_stat.fs_count +
- prandom_u32_max(attr->u.delay.la_rate);
+ get_random_u32_below(attr->u.delay.la_rate);
CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
- libcfs_nid2str(attr->fa_src),
- libcfs_nid2str(attr->fa_dst), rule->dl_delay_at);
+ libcfs_nidstr(&attr->fa_src),
+ libcfs_nidstr(&attr->fa_dst), rule->dl_delay_at);
}
}
lnet_delay_rule_match_locked(struct lnet_hdr *hdr, struct lnet_msg *msg)
{
struct lnet_delay_rule *rule;
- lnet_nid_t src = le64_to_cpu(hdr->src_nid);
- lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
- unsigned int typ = le32_to_cpu(hdr->type);
+ unsigned int typ = hdr->type;
unsigned int ptl = -1;
/* NB: called with hold of lnet_net_lock */
ptl = le32_to_cpu(hdr->msg.get.ptl_index);
list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
- if (delay_rule_match(rule, src, dst, typ, ptl, msg))
+ if (delay_rule_match(rule, &hdr->src_nid, &hdr->dest_nid,
+ typ, ptl, msg))
return true;
}
}
if (list_empty(&rule->dl_msg_list)) {
- del_timer(&rule->dl_timer);
+ timer_delete(&rule->dl_timer);
rule->dl_msg_send = -1;
} else if (!list_empty(msg_list)) {
/* dequeued some timedout messages, update timer for the
* next delayed message on rule */
- msg = list_entry(rule->dl_msg_list.next,
- struct lnet_msg, msg_list);
+ msg = list_first_entry(&rule->dl_msg_list,
+ struct lnet_msg, msg_list);
rule->dl_msg_send = msg->msg_delay_send;
mod_timer(&rule->dl_timer,
jiffies +
{
struct lnet_msg *msg;
- while (!list_empty(msg_list)) {
+ while ((msg = list_first_entry_or_null(msg_list, struct lnet_msg,
+ msg_list)) != NULL) {
struct lnet_ni *ni;
int cpt;
int rc;
- msg = list_entry(msg_list->next, struct lnet_msg, msg_list);
-
if (msg->msg_sending) {
/* Delayed send */
list_del_init(&msg->msg_list);
case LNET_CREDIT_OK:
lnet_ni_recv(ni, msg->msg_private, msg, 0,
0, msg->msg_len, msg->msg_len);
- /* fallthrough */
+ fallthrough;
case LNET_CREDIT_WAIT:
continue;
default: /* failures */
break;
}
- rule = list_entry(delay_dd.dd_sched_rules.next,
- struct lnet_delay_rule, dl_sched_link);
+ rule = list_first_entry(&delay_dd.dd_sched_rules,
+ struct lnet_delay_rule, dl_sched_link);
list_del_init(&rule->dl_sched_link);
spin_unlock_bh(&delay_dd.dd_lock);
* incoming message.
*/
int
-lnet_delay_rule_add(struct lnet_fault_attr *attr)
+lnet_delay_rule_add(struct lnet_fault_large_attr *attr)
{
struct lnet_delay_rule *rule;
- int rc = 0;
+ int rc = 0;
ENTRY;
if (!((attr->u.delay.la_rate == 0) ^
rule->dl_time_base = ktime_get_seconds() +
attr->u.delay.la_interval;
rule->dl_delay_time = ktime_get_seconds() +
- prandom_u32_max(attr->u.delay.la_interval);
+ get_random_u32_below(attr->u.delay.la_interval);
} else {
- rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
+ rule->dl_delay_at = get_random_u32_below(attr->u.delay.la_rate);
}
rule->dl_msg_send = -1;
lnet_net_unlock(LNET_LOCK_EX);
CDEBUG(D_NET, "Added delay rule: src %s, dst %s, rate %d\n",
- libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
+ libcfs_nidstr(&attr->fa_src), libcfs_nidstr(&attr->fa_dst),
attr->u.delay.la_rate);
mutex_unlock(&delay_dd.dd_mutex);
* processed immediately.
*/
int
-lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown)
+lnet_delay_rule_del(struct lnet_nid *src, struct lnet_nid *dst, bool shutdown)
{
struct lnet_delay_rule *rule;
struct lnet_delay_rule *tmp;
lnet_net_lock(LNET_LOCK_EX);
list_for_each_entry_safe(rule, tmp, &the_lnet.ln_delay_rules, dl_link) {
- if (rule->dl_attr.fa_src != src && src != 0)
+ if (!(LNET_NID_IS_ANY(src) || nid_same(&rule->dl_attr.fa_src, src)))
continue;
- if (rule->dl_attr.fa_dst != dst && dst != 0)
+ if (!(LNET_NID_IS_ANY(dst) || nid_same(&rule->dl_attr.fa_dst, dst)))
continue;
CDEBUG(D_NET, "Remove delay rule: src %s->dst: %s (1/%d, %d)\n",
- libcfs_nid2str(rule->dl_attr.fa_src),
- libcfs_nid2str(rule->dl_attr.fa_dst),
+ libcfs_nidstr(&rule->dl_attr.fa_src),
+ libcfs_nidstr(&rule->dl_attr.fa_dst),
rule->dl_attr.u.delay.la_rate,
rule->dl_attr.u.delay.la_interval);
/* refcount is taken over by rule_list */
list_for_each_entry_safe(rule, tmp, &rule_list, dl_link) {
list_del_init(&rule->dl_link);
- del_timer_sync(&rule->dl_timer);
+ timer_delete_sync(&rule->dl_timer);
delayed_msg_check(rule, true, &msg_list);
delay_rule_decref(rule); /* -1 for the_lnet.ln_delay_rules */
n++;
* List Delay Rule at position of \a pos
*/
int
-lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr,
+lnet_delay_rule_list(int pos, struct lnet_fault_large_attr *attr,
struct lnet_fault_stat *stat)
{
struct lnet_delay_rule *rule;
cpt = lnet_net_lock_current();
list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
- struct lnet_fault_attr *attr = &rule->dl_attr;
+ struct lnet_fault_large_attr *attr = &rule->dl_attr;
spin_lock(&rule->dl_lock);
memset(&rule->dl_stat, 0, sizeof(rule->dl_stat));
if (attr->u.delay.la_rate != 0) {
- rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
+ rule->dl_delay_at = get_random_u32_below(attr->u.delay.la_rate);
} else {
rule->dl_delay_time = ktime_get_seconds() +
- prandom_u32_max(attr->u.delay.la_interval);
+ get_random_u32_below(attr->u.delay.la_interval);
rule->dl_time_base = ktime_get_seconds() +
attr->u.delay.la_interval;
}
int
lnet_fault_ctl(int opc, struct libcfs_ioctl_data *data)
{
- struct lnet_fault_attr *attr;
+ struct lnet_fault_attr *attr4;
struct lnet_fault_stat *stat;
+ struct lnet_fault_large_attr attr = { { 0 } };
+ int rc;
+
+ attr4 = (struct lnet_fault_attr *)data->ioc_inlbuf1;
- attr = (struct lnet_fault_attr *)data->ioc_inlbuf1;
+ lnet_fault_attr4_to_attr(attr4, &attr);
switch (opc) {
default:
return -EINVAL;
case LNET_CTL_DROP_ADD:
- if (attr == NULL)
+ if (!attr4)
return -EINVAL;
- return lnet_drop_rule_add(attr);
+ return lnet_drop_rule_add(&attr);
case LNET_CTL_DROP_DEL:
- if (attr == NULL)
+ if (!attr4)
return -EINVAL;
- data->ioc_count = lnet_drop_rule_del(attr->fa_src,
- attr->fa_dst);
+ data->ioc_count = lnet_drop_rule_del(&attr.fa_src,
+ &attr.fa_dst);
return 0;
case LNET_CTL_DROP_RESET:
case LNET_CTL_DROP_LIST:
stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
- if (attr == NULL || stat == NULL)
+ if (!attr4 || !stat)
return -EINVAL;
- return lnet_drop_rule_list(data->ioc_count, attr, stat);
+ rc = lnet_drop_rule_list(data->ioc_count, &attr, stat);
+ lnet_fault_attr_to_attr4(&attr, attr4);
+ return rc;
case LNET_CTL_DELAY_ADD:
- if (attr == NULL)
+ if (!attr4)
return -EINVAL;
- return lnet_delay_rule_add(attr);
+ return lnet_delay_rule_add(&attr);
case LNET_CTL_DELAY_DEL:
- if (attr == NULL)
+ if (!attr4)
return -EINVAL;
- data->ioc_count = lnet_delay_rule_del(attr->fa_src,
- attr->fa_dst, false);
+ data->ioc_count = lnet_delay_rule_del(&attr.fa_src,
+ &attr.fa_dst, false);
return 0;
case LNET_CTL_DELAY_RESET:
case LNET_CTL_DELAY_LIST:
stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
- if (attr == NULL || stat == NULL)
+ if (!attr4 || !stat)
return -EINVAL;
- return lnet_delay_rule_list(data->ioc_count, attr, stat);
+ rc = lnet_delay_rule_list(data->ioc_count, &attr, stat);
+ lnet_fault_attr_to_attr4(&attr, attr4);
+ return rc;
}
}
void
lnet_fault_fini(void)
{
- lnet_drop_rule_del(0, 0);
- lnet_delay_rule_del(0, 0, true);
+ lnet_drop_rule_del(NULL, NULL);
+ lnet_delay_rule_del(NULL, NULL, true);
LASSERT(list_empty(&the_lnet.ln_drop_rules));
LASSERT(list_empty(&the_lnet.ln_delay_rules));