4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2014, 2017, Intel Corporation.
27 * This file is part of Lustre, http://www.lustre.org/
28 * Lustre is a trademark of Sun Microsystems, Inc.
30 * lnet/lnet/net_fault.c
32 * Lustre network fault simulation
34 * Author: liang.zhen@intel.com
37 #define DEBUG_SUBSYSTEM S_LNET
39 #include <linux/random.h>
40 #include <lnet/lib-lnet.h>
41 #include <uapi/linux/lnet/lnetctl.h>
43 #define LNET_MSG_MASK (LNET_PUT_BIT | LNET_ACK_BIT | \
44 LNET_GET_BIT | LNET_REPLY_BIT)
46 struct lnet_drop_rule {
47 /** link chain on the_lnet.ln_drop_rules */
48 struct list_head dr_link;
49 /** attributes of this rule */
50 struct lnet_fault_attr dr_attr;
51 /** lock to protect \a dr_drop_at and \a dr_stat */
54 * the message sequence to drop, which means message is dropped when
55 * dr_stat.drs_count == dr_drop_at
57 unsigned long dr_drop_at;
59 * seconds to drop the next message, it's exclusive with dr_drop_at
61 time64_t dr_drop_time;
62 /** baseline to caculate dr_drop_time */
63 time64_t dr_time_base;
64 /** statistic of dropped messages */
65 struct lnet_fault_stat dr_stat;
69 lnet_fault_nid_match(lnet_nid_t nid, lnet_nid_t msg_nid)
71 if (nid == msg_nid || nid == LNET_NID_ANY)
74 if (LNET_NIDNET(nid) != LNET_NIDNET(msg_nid))
77 /* 255.255.255.255@net is wildcard for all addresses in a network */
78 return LNET_NIDADDR(nid) == LNET_NIDADDR(LNET_NID_ANY);
82 lnet_fault_attr_match(struct lnet_fault_attr *attr, lnet_nid_t src,
83 lnet_nid_t local_nid, lnet_nid_t dst,
84 unsigned int type, unsigned int portal)
86 if (!lnet_fault_nid_match(attr->fa_src, src) ||
87 !lnet_fault_nid_match(attr->fa_dst, dst) ||
88 !lnet_fault_nid_match(attr->fa_local_nid, local_nid))
91 if (!(attr->fa_msg_mask & (1 << type)))
94 /* NB: ACK and REPLY have no portal, but they should have been
95 * rejected by message mask */
96 if (attr->fa_ptl_mask != 0 && /* has portal filter */
97 !(attr->fa_ptl_mask & (1ULL << portal)))
104 lnet_fault_attr_validate(struct lnet_fault_attr *attr)
106 if (attr->fa_msg_mask == 0)
107 attr->fa_msg_mask = LNET_MSG_MASK; /* all message types */
109 if (attr->fa_ptl_mask == 0) /* no portal filter */
112 /* NB: only PUT and GET can be filtered if portal filter has been set */
113 attr->fa_msg_mask &= LNET_GET_BIT | LNET_PUT_BIT;
114 if (attr->fa_msg_mask == 0) {
115 CDEBUG(D_NET, "can't find valid message type bits %x\n",
123 lnet_fault_stat_inc(struct lnet_fault_stat *stat, unsigned int type)
125 /* NB: fs_counter is NOT updated by this function */
143 * LNet message drop simulation
147 * Add a new drop rule to LNet
148 * There is no check for duplicated drop rule, all rules will be checked for
152 lnet_drop_rule_add(struct lnet_fault_attr *attr)
154 struct lnet_drop_rule *rule;
157 if (!((attr->u.drop.da_rate == 0) ^ (attr->u.drop.da_interval == 0))) {
159 "please provide either drop rate or drop interval, "
160 "but not both at the same time %d/%d\n",
161 attr->u.drop.da_rate, attr->u.drop.da_interval);
165 if (lnet_fault_attr_validate(attr) != 0)
172 spin_lock_init(&rule->dr_lock);
174 rule->dr_attr = *attr;
175 if (attr->u.drop.da_interval != 0) {
176 rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
177 rule->dr_drop_time = ktime_get_seconds() +
178 prandom_u32_max(attr->u.drop.da_interval);
180 rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
183 lnet_net_lock(LNET_LOCK_EX);
184 list_add(&rule->dr_link, &the_lnet.ln_drop_rules);
185 lnet_net_unlock(LNET_LOCK_EX);
187 CDEBUG(D_NET, "Added drop rule: src %s, dst %s, rate %d, interval %d\n",
188 libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
189 attr->u.drop.da_rate, attr->u.drop.da_interval);
194 * Remove matched drop rules from lnet, all rules that can match \a src and
195 * \a dst will be removed.
196 * If \a src is zero, then all rules have \a dst as destination will be remove
197 * If \a dst is zero, then all rules have \a src as source will be removed
198 * If both of them are zero, all rules will be removed
201 lnet_drop_rule_del(lnet_nid_t src, lnet_nid_t dst)
203 struct lnet_drop_rule *rule;
204 struct lnet_drop_rule *tmp;
205 struct list_head zombies;
209 INIT_LIST_HEAD(&zombies);
211 lnet_net_lock(LNET_LOCK_EX);
212 list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) {
213 if (rule->dr_attr.fa_src != src && src != 0)
216 if (rule->dr_attr.fa_dst != dst && dst != 0)
219 list_move(&rule->dr_link, &zombies);
221 lnet_net_unlock(LNET_LOCK_EX);
223 list_for_each_entry_safe(rule, tmp, &zombies, dr_link) {
224 CDEBUG(D_NET, "Remove drop rule: src %s->dst: %s (1/%d, %d)\n",
225 libcfs_nid2str(rule->dr_attr.fa_src),
226 libcfs_nid2str(rule->dr_attr.fa_dst),
227 rule->dr_attr.u.drop.da_rate,
228 rule->dr_attr.u.drop.da_interval);
230 list_del(&rule->dr_link);
239 * List drop rule at position of \a pos
242 lnet_drop_rule_list(int pos, struct lnet_fault_attr *attr,
243 struct lnet_fault_stat *stat)
245 struct lnet_drop_rule *rule;
251 cpt = lnet_net_lock_current();
252 list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
256 spin_lock(&rule->dr_lock);
257 *attr = rule->dr_attr;
258 *stat = rule->dr_stat;
259 spin_unlock(&rule->dr_lock);
264 lnet_net_unlock(cpt);
269 * reset counters for all drop rules
272 lnet_drop_rule_reset(void)
274 struct lnet_drop_rule *rule;
278 cpt = lnet_net_lock_current();
280 list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
281 struct lnet_fault_attr *attr = &rule->dr_attr;
283 spin_lock(&rule->dr_lock);
285 memset(&rule->dr_stat, 0, sizeof(rule->dr_stat));
286 if (attr->u.drop.da_rate != 0) {
287 rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
289 rule->dr_drop_time = ktime_get_seconds() +
290 prandom_u32_max(attr->u.drop.da_interval);
291 rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
293 spin_unlock(&rule->dr_lock);
296 lnet_net_unlock(cpt);
301 lnet_fault_match_health(enum lnet_msg_hstatus *hstatus, __u32 mask)
308 /* assign a random failure */
309 choice = prandom_u32_max(LNET_MSG_STATUS_END - LNET_MSG_STATUS_OK);
313 if (mask == HSTATUS_RANDOM) {
318 if (mask & (1 << choice)) {
323 /* round to the closest ON bit */
325 best_delta = HSTATUS_END;
327 if (mask & (1 << i)) {
331 if (delta < best_delta) {
343 * check source/destination NID, portal, message type and drop rate,
344 * decide whether should drop this message or not
347 drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
348 lnet_nid_t local_nid, lnet_nid_t dst,
349 unsigned int type, unsigned int portal,
350 enum lnet_msg_hstatus *hstatus)
352 struct lnet_fault_attr *attr = &rule->dr_attr;
355 if (!lnet_fault_attr_match(attr, src, local_nid, dst, type, portal))
358 if (attr->u.drop.da_drop_all) {
359 CDEBUG(D_NET, "set to drop all messages\n");
365 * if we're trying to match a health status error but it hasn't
366 * been set in the rule, then don't match
368 if ((hstatus && !attr->u.drop.da_health_error_mask) ||
369 (!hstatus && attr->u.drop.da_health_error_mask))
372 /* match this rule, check drop rate now */
373 spin_lock(&rule->dr_lock);
374 if (attr->u.drop.da_random) {
375 int value = prandom_u32_max(attr->u.drop.da_interval);
376 if (value >= (attr->u.drop.da_interval / 2))
380 } else if (rule->dr_drop_time != 0) { /* time based drop */
381 time64_t now = ktime_get_seconds();
383 rule->dr_stat.fs_count++;
384 drop = now >= rule->dr_drop_time;
386 if (now > rule->dr_time_base)
387 rule->dr_time_base = now;
389 rule->dr_drop_time = rule->dr_time_base +
390 prandom_u32_max(attr->u.drop.da_interval);
391 rule->dr_time_base += attr->u.drop.da_interval;
393 CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lld\n",
394 libcfs_nid2str(attr->fa_src),
395 libcfs_nid2str(attr->fa_dst),
399 } else { /* rate based drop */
402 drop = rule->dr_stat.fs_count++ == rule->dr_drop_at;
403 count = rule->dr_stat.fs_count;
404 if (do_div(count, attr->u.drop.da_rate) == 0) {
405 rule->dr_drop_at = rule->dr_stat.fs_count +
406 prandom_u32_max(attr->u.drop.da_rate);
407 CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
408 libcfs_nid2str(attr->fa_src),
409 libcfs_nid2str(attr->fa_dst), rule->dr_drop_at);
415 if (drop) { /* drop this message, update counters */
417 lnet_fault_match_health(hstatus,
418 attr->u.drop.da_health_error_mask);
419 lnet_fault_stat_inc(&rule->dr_stat, type);
420 rule->dr_stat.u.drop.ds_dropped++;
423 spin_unlock(&rule->dr_lock);
428 * Check if message from \a src to \a dst can match any existed drop rule
431 lnet_drop_rule_match(struct lnet_hdr *hdr,
432 lnet_nid_t local_nid,
433 enum lnet_msg_hstatus *hstatus)
435 lnet_nid_t src = le64_to_cpu(hdr->src_nid);
436 lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
437 unsigned int typ = le32_to_cpu(hdr->type);
438 struct lnet_drop_rule *rule;
439 unsigned int ptl = -1;
443 /* NB: if Portal is specified, then only PUT and GET will be
444 * filtered by drop rule */
445 if (typ == LNET_MSG_PUT)
446 ptl = le32_to_cpu(hdr->msg.put.ptl_index);
447 else if (typ == LNET_MSG_GET)
448 ptl = le32_to_cpu(hdr->msg.get.ptl_index);
450 cpt = lnet_net_lock_current();
451 list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
452 drop = drop_rule_match(rule, src, local_nid, dst, typ, ptl,
457 lnet_net_unlock(cpt);
463 * LNet Delay Simulation
465 /** timestamp (second) to send delayed message */
466 #define msg_delay_send msg_ev.hdr_data
468 struct lnet_delay_rule {
469 /** link chain on the_lnet.ln_delay_rules */
470 struct list_head dl_link;
471 /** link chain on delay_dd.dd_sched_rules */
472 struct list_head dl_sched_link;
473 /** attributes of this rule */
474 struct lnet_fault_attr dl_attr;
475 /** lock to protect \a below members */
477 /** refcount of delay rule */
478 atomic_t dl_refcount;
480 * the message sequence to delay, which means message is delayed when
481 * dl_stat.fs_count == dl_delay_at
483 unsigned long dl_delay_at;
485 * seconds to delay the next message, it's exclusive with dl_delay_at
487 time64_t dl_delay_time;
488 /** baseline to caculate dl_delay_time */
489 time64_t dl_time_base;
490 /** jiffies to send the next delayed message */
491 unsigned long dl_msg_send;
492 /** delayed message list */
493 struct list_head dl_msg_list;
494 /** statistic of delayed messages */
495 struct lnet_fault_stat dl_stat;
496 /** timer to wakeup delay_daemon */
497 struct timer_list dl_timer;
500 struct delay_daemon_data {
501 /** serialise rule add/remove */
502 struct mutex dd_mutex;
503 /** protect rules on \a dd_sched_rules */
505 /** scheduled delay rules (by timer) */
506 struct list_head dd_sched_rules;
507 /** deamon thread sleeps at here */
508 wait_queue_head_t dd_waitq;
509 /** controler (lctl command) wait at here */
510 wait_queue_head_t dd_ctl_waitq;
511 /** deamon is running */
512 unsigned int dd_running;
513 /** deamon stopped */
514 unsigned int dd_stopped;
517 static struct delay_daemon_data delay_dd;
520 delay_rule_decref(struct lnet_delay_rule *rule)
522 if (atomic_dec_and_test(&rule->dl_refcount)) {
523 LASSERT(list_empty(&rule->dl_sched_link));
524 LASSERT(list_empty(&rule->dl_msg_list));
525 LASSERT(list_empty(&rule->dl_link));
532 * check source/destination NID, portal, message type and delay rate,
533 * decide whether should delay this message or not
536 delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
537 lnet_nid_t dst, unsigned int type, unsigned int portal,
538 struct lnet_msg *msg)
540 struct lnet_fault_attr *attr = &rule->dl_attr;
543 if (!lnet_fault_attr_match(attr, src, LNET_NID_ANY,
547 /* match this rule, check delay rate now */
548 spin_lock(&rule->dl_lock);
549 if (rule->dl_delay_time != 0) { /* time based delay */
550 time64_t now = ktime_get_seconds();
552 rule->dl_stat.fs_count++;
553 delay = now >= rule->dl_delay_time;
555 if (now > rule->dl_time_base)
556 rule->dl_time_base = now;
558 rule->dl_delay_time = rule->dl_time_base +
559 prandom_u32_max(attr->u.delay.la_interval);
560 rule->dl_time_base += attr->u.delay.la_interval;
562 CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lld\n",
563 libcfs_nid2str(attr->fa_src),
564 libcfs_nid2str(attr->fa_dst),
565 rule->dl_delay_time);
568 } else { /* rate based delay */
571 delay = rule->dl_stat.fs_count++ == rule->dl_delay_at;
572 /* generate the next random rate sequence */
573 count = rule->dl_stat.fs_count;
574 if (do_div(count, attr->u.delay.la_rate) == 0) {
575 rule->dl_delay_at = rule->dl_stat.fs_count +
576 prandom_u32_max(attr->u.delay.la_rate);
577 CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
578 libcfs_nid2str(attr->fa_src),
579 libcfs_nid2str(attr->fa_dst), rule->dl_delay_at);
584 spin_unlock(&rule->dl_lock);
588 /* delay this message, update counters */
589 lnet_fault_stat_inc(&rule->dl_stat, type);
590 rule->dl_stat.u.delay.ls_delayed++;
592 list_add_tail(&msg->msg_list, &rule->dl_msg_list);
593 msg->msg_delay_send = ktime_get_seconds() + attr->u.delay.la_latency;
594 if (rule->dl_msg_send == -1) {
595 rule->dl_msg_send = msg->msg_delay_send;
596 mod_timer(&rule->dl_timer, rule->dl_msg_send);
599 spin_unlock(&rule->dl_lock);
604 * check if \a msg can match any Delay Rule, receiving of this message
605 * will be delayed if there is a match.
608 lnet_delay_rule_match_locked(struct lnet_hdr *hdr, struct lnet_msg *msg)
610 struct lnet_delay_rule *rule;
611 lnet_nid_t src = le64_to_cpu(hdr->src_nid);
612 lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
613 unsigned int typ = le32_to_cpu(hdr->type);
614 unsigned int ptl = -1;
616 /* NB: called with hold of lnet_net_lock */
618 /* NB: if Portal is specified, then only PUT and GET will be
619 * filtered by delay rule */
620 if (typ == LNET_MSG_PUT)
621 ptl = le32_to_cpu(hdr->msg.put.ptl_index);
622 else if (typ == LNET_MSG_GET)
623 ptl = le32_to_cpu(hdr->msg.get.ptl_index);
625 list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
626 if (delay_rule_match(rule, src, dst, typ, ptl, msg))
633 /** check out delayed messages for send */
635 delayed_msg_check(struct lnet_delay_rule *rule, bool all,
636 struct list_head *msg_list)
638 struct lnet_msg *msg;
639 struct lnet_msg *tmp;
640 time64_t now = ktime_get_seconds();
642 if (!all && cfs_time_seconds(rule->dl_msg_send) > now)
645 spin_lock(&rule->dl_lock);
646 list_for_each_entry_safe(msg, tmp, &rule->dl_msg_list, msg_list) {
647 if (!all && msg->msg_delay_send > now)
650 msg->msg_delay_send = 0;
651 list_move_tail(&msg->msg_list, msg_list);
654 if (list_empty(&rule->dl_msg_list)) {
655 del_timer(&rule->dl_timer);
656 rule->dl_msg_send = -1;
658 } else if (!list_empty(msg_list)) {
659 /* dequeued some timedout messages, update timer for the
660 * next delayed message on rule */
661 msg = list_entry(rule->dl_msg_list.next,
662 struct lnet_msg, msg_list);
663 rule->dl_msg_send = msg->msg_delay_send;
664 mod_timer(&rule->dl_timer, rule->dl_msg_send);
666 spin_unlock(&rule->dl_lock);
670 delayed_msg_process(struct list_head *msg_list, bool drop)
672 struct lnet_msg *msg;
674 while (!list_empty(msg_list)) {
679 msg = list_entry(msg_list->next, struct lnet_msg, msg_list);
680 LASSERT(msg->msg_rxpeer != NULL);
681 LASSERT(msg->msg_rxni != NULL);
684 cpt = msg->msg_rx_cpt;
686 list_del_init(&msg->msg_list);
690 } else if (!msg->msg_routing) {
691 rc = lnet_parse_local(ni, msg);
697 rc = lnet_parse_forward_locked(ni, msg);
698 lnet_net_unlock(cpt);
702 lnet_ni_recv(ni, msg->msg_private, msg, 0,
703 0, msg->msg_len, msg->msg_len);
704 case LNET_CREDIT_WAIT:
706 default: /* failures */
711 lnet_drop_message(ni, cpt, msg->msg_private, msg->msg_len,
713 lnet_finalize(msg, rc);
718 * Process delayed messages for scheduled rules
719 * This function can either be called by delay_rule_daemon, or by lnet_finalise
722 lnet_delay_rule_check(void)
724 struct lnet_delay_rule *rule;
725 struct list_head msgs;
727 INIT_LIST_HEAD(&msgs);
729 if (list_empty(&delay_dd.dd_sched_rules))
732 spin_lock_bh(&delay_dd.dd_lock);
733 if (list_empty(&delay_dd.dd_sched_rules)) {
734 spin_unlock_bh(&delay_dd.dd_lock);
738 rule = list_entry(delay_dd.dd_sched_rules.next,
739 struct lnet_delay_rule, dl_sched_link);
740 list_del_init(&rule->dl_sched_link);
741 spin_unlock_bh(&delay_dd.dd_lock);
743 delayed_msg_check(rule, false, &msgs);
744 delay_rule_decref(rule); /* -1 for delay_dd.dd_sched_rules */
747 if (!list_empty(&msgs))
748 delayed_msg_process(&msgs, false);
751 /** deamon thread to handle delayed messages */
753 lnet_delay_rule_daemon(void *arg)
755 delay_dd.dd_running = 1;
756 wake_up(&delay_dd.dd_ctl_waitq);
758 while (delay_dd.dd_running) {
759 wait_event_interruptible(delay_dd.dd_waitq,
760 !delay_dd.dd_running ||
761 !list_empty(&delay_dd.dd_sched_rules));
762 lnet_delay_rule_check();
765 /* in case more rules have been enqueued after my last check */
766 lnet_delay_rule_check();
767 delay_dd.dd_stopped = 1;
768 wake_up(&delay_dd.dd_ctl_waitq);
774 delay_timer_cb(cfs_timer_cb_arg_t data)
776 struct lnet_delay_rule *rule = cfs_from_timer(rule, data, dl_timer);
778 spin_lock_bh(&delay_dd.dd_lock);
779 if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
780 atomic_inc(&rule->dl_refcount);
781 list_add_tail(&rule->dl_sched_link, &delay_dd.dd_sched_rules);
782 wake_up(&delay_dd.dd_waitq);
784 spin_unlock_bh(&delay_dd.dd_lock);
788 * Add a new delay rule to LNet
789 * There is no check for duplicated delay rule, all rules will be checked for
793 lnet_delay_rule_add(struct lnet_fault_attr *attr)
795 struct lnet_delay_rule *rule;
799 if (!((attr->u.delay.la_rate == 0) ^
800 (attr->u.delay.la_interval == 0))) {
802 "please provide either delay rate or delay interval, "
803 "but not both at the same time %d/%d\n",
804 attr->u.delay.la_rate, attr->u.delay.la_interval);
808 if (attr->u.delay.la_latency == 0) {
809 CDEBUG(D_NET, "delay latency cannot be zero\n");
813 if (lnet_fault_attr_validate(attr) != 0)
820 mutex_lock(&delay_dd.dd_mutex);
821 if (!delay_dd.dd_running) {
822 struct task_struct *task;
824 /* NB: although LND threads will process delayed message
825 * in lnet_finalize, but there is no guarantee that LND
826 * threads will be waken up if no other message needs to
828 * Only one daemon thread, performance is not the concern
829 * of this simualation module.
831 task = kthread_run(lnet_delay_rule_daemon, NULL, "lnet_dd");
836 wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
839 cfs_timer_setup(&rule->dl_timer, delay_timer_cb,
840 (unsigned long)rule, 0);
842 spin_lock_init(&rule->dl_lock);
843 INIT_LIST_HEAD(&rule->dl_msg_list);
844 INIT_LIST_HEAD(&rule->dl_sched_link);
846 rule->dl_attr = *attr;
847 if (attr->u.delay.la_interval != 0) {
848 rule->dl_time_base = ktime_get_seconds() +
849 attr->u.delay.la_interval;
850 rule->dl_delay_time = ktime_get_seconds() +
851 prandom_u32_max(attr->u.delay.la_interval);
853 rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
856 rule->dl_msg_send = -1;
858 lnet_net_lock(LNET_LOCK_EX);
859 atomic_set(&rule->dl_refcount, 1);
860 list_add(&rule->dl_link, &the_lnet.ln_delay_rules);
861 lnet_net_unlock(LNET_LOCK_EX);
863 CDEBUG(D_NET, "Added delay rule: src %s, dst %s, rate %d\n",
864 libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
865 attr->u.delay.la_rate);
867 mutex_unlock(&delay_dd.dd_mutex);
870 mutex_unlock(&delay_dd.dd_mutex);
876 * Remove matched Delay Rules from lnet, if \a shutdown is true or both \a src
877 * and \a dst are zero, all rules will be removed, otherwise only matched rules
879 * If \a src is zero, then all rules have \a dst as destination will be remove
880 * If \a dst is zero, then all rules have \a src as source will be removed
882 * When a delay rule is removed, all delayed messages of this rule will be
883 * processed immediately.
886 lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown)
888 struct lnet_delay_rule *rule;
889 struct lnet_delay_rule *tmp;
890 struct list_head rule_list;
891 struct list_head msg_list;
896 INIT_LIST_HEAD(&rule_list);
897 INIT_LIST_HEAD(&msg_list);
902 mutex_lock(&delay_dd.dd_mutex);
903 lnet_net_lock(LNET_LOCK_EX);
905 list_for_each_entry_safe(rule, tmp, &the_lnet.ln_delay_rules, dl_link) {
906 if (rule->dl_attr.fa_src != src && src != 0)
909 if (rule->dl_attr.fa_dst != dst && dst != 0)
912 CDEBUG(D_NET, "Remove delay rule: src %s->dst: %s (1/%d, %d)\n",
913 libcfs_nid2str(rule->dl_attr.fa_src),
914 libcfs_nid2str(rule->dl_attr.fa_dst),
915 rule->dl_attr.u.delay.la_rate,
916 rule->dl_attr.u.delay.la_interval);
917 /* refcount is taken over by rule_list */
918 list_move(&rule->dl_link, &rule_list);
921 /* check if we need to shutdown delay_daemon */
922 cleanup = list_empty(&the_lnet.ln_delay_rules) &&
923 !list_empty(&rule_list);
924 lnet_net_unlock(LNET_LOCK_EX);
926 list_for_each_entry_safe(rule, tmp, &rule_list, dl_link) {
927 list_del_init(&rule->dl_link);
929 del_timer_sync(&rule->dl_timer);
930 delayed_msg_check(rule, true, &msg_list);
931 delay_rule_decref(rule); /* -1 for the_lnet.ln_delay_rules */
935 if (cleanup) { /* no more delay rule, shutdown delay_daemon */
936 LASSERT(delay_dd.dd_running);
937 delay_dd.dd_running = 0;
938 wake_up(&delay_dd.dd_waitq);
940 while (!delay_dd.dd_stopped)
941 wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_stopped);
943 mutex_unlock(&delay_dd.dd_mutex);
945 if (!list_empty(&msg_list))
946 delayed_msg_process(&msg_list, shutdown);
952 * List Delay Rule at position of \a pos
955 lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr,
956 struct lnet_fault_stat *stat)
958 struct lnet_delay_rule *rule;
964 cpt = lnet_net_lock_current();
965 list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
969 spin_lock(&rule->dl_lock);
970 *attr = rule->dl_attr;
971 *stat = rule->dl_stat;
972 spin_unlock(&rule->dl_lock);
977 lnet_net_unlock(cpt);
982 * reset counters for all Delay Rules
985 lnet_delay_rule_reset(void)
987 struct lnet_delay_rule *rule;
991 cpt = lnet_net_lock_current();
993 list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
994 struct lnet_fault_attr *attr = &rule->dl_attr;
996 spin_lock(&rule->dl_lock);
998 memset(&rule->dl_stat, 0, sizeof(rule->dl_stat));
999 if (attr->u.delay.la_rate != 0) {
1000 rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
1002 rule->dl_delay_time = ktime_get_seconds() +
1003 prandom_u32_max(attr->u.delay.la_interval);
1004 rule->dl_time_base = ktime_get_seconds() +
1005 attr->u.delay.la_interval;
1007 spin_unlock(&rule->dl_lock);
1010 lnet_net_unlock(cpt);
1015 lnet_fault_ctl(int opc, struct libcfs_ioctl_data *data)
1017 struct lnet_fault_attr *attr;
1018 struct lnet_fault_stat *stat;
1020 attr = (struct lnet_fault_attr *)data->ioc_inlbuf1;
1026 case LNET_CTL_DROP_ADD:
1030 return lnet_drop_rule_add(attr);
1032 case LNET_CTL_DROP_DEL:
1036 data->ioc_count = lnet_drop_rule_del(attr->fa_src,
1040 case LNET_CTL_DROP_RESET:
1041 lnet_drop_rule_reset();
1044 case LNET_CTL_DROP_LIST:
1045 stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
1046 if (attr == NULL || stat == NULL)
1049 return lnet_drop_rule_list(data->ioc_count, attr, stat);
1051 case LNET_CTL_DELAY_ADD:
1055 return lnet_delay_rule_add(attr);
1057 case LNET_CTL_DELAY_DEL:
1061 data->ioc_count = lnet_delay_rule_del(attr->fa_src,
1062 attr->fa_dst, false);
1065 case LNET_CTL_DELAY_RESET:
1066 lnet_delay_rule_reset();
1069 case LNET_CTL_DELAY_LIST:
1070 stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
1071 if (attr == NULL || stat == NULL)
1074 return lnet_delay_rule_list(data->ioc_count, attr, stat);
1079 lnet_fault_init(void)
1081 CLASSERT(LNET_PUT_BIT == 1 << LNET_MSG_PUT);
1082 CLASSERT(LNET_ACK_BIT == 1 << LNET_MSG_ACK);
1083 CLASSERT(LNET_GET_BIT == 1 << LNET_MSG_GET);
1084 CLASSERT(LNET_REPLY_BIT == 1 << LNET_MSG_REPLY);
1086 mutex_init(&delay_dd.dd_mutex);
1087 spin_lock_init(&delay_dd.dd_lock);
1088 init_waitqueue_head(&delay_dd.dd_waitq);
1089 init_waitqueue_head(&delay_dd.dd_ctl_waitq);
1090 INIT_LIST_HEAD(&delay_dd.dd_sched_rules);
1096 lnet_fault_fini(void)
1098 lnet_drop_rule_del(0, 0);
1099 lnet_delay_rule_del(0, 0, true);
1101 LASSERT(list_empty(&the_lnet.ln_drop_rules));
1102 LASSERT(list_empty(&the_lnet.ln_delay_rules));
1103 LASSERT(list_empty(&delay_dd.dd_sched_rules));