4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2014, 2017, Intel Corporation.
27 * This file is part of Lustre, http://www.lustre.org/
28 * Lustre is a trademark of Sun Microsystems, Inc.
30 * lnet/lnet/net_fault.c
32 * Lustre network fault simulation
34 * Author: liang.zhen@intel.com
37 #define DEBUG_SUBSYSTEM S_LNET
39 #include <lnet/lib-lnet.h>
40 #include <uapi/linux/lnet/lnetctl.h>
42 #define LNET_MSG_MASK (LNET_PUT_BIT | LNET_ACK_BIT | \
43 LNET_GET_BIT | LNET_REPLY_BIT)
45 struct lnet_drop_rule {
46 /** link chain on the_lnet.ln_drop_rules */
47 struct list_head dr_link;
48 /** attributes of this rule */
49 struct lnet_fault_attr dr_attr;
50 /** lock to protect \a dr_drop_at and \a dr_stat */
53 * the message sequence to drop, which means message is dropped when
54 * dr_stat.drs_count == dr_drop_at
56 unsigned long dr_drop_at;
58 * seconds to drop the next message, it's exclusive with dr_drop_at
60 time64_t dr_drop_time;
61 /** baseline to caculate dr_drop_time */
62 time64_t dr_time_base;
63 /** statistic of dropped messages */
64 struct lnet_fault_stat dr_stat;
68 lnet_fault_nid_match(lnet_nid_t nid, lnet_nid_t msg_nid)
70 if (nid == msg_nid || nid == LNET_NID_ANY)
73 if (LNET_NIDNET(nid) != LNET_NIDNET(msg_nid))
76 /* 255.255.255.255@net is wildcard for all addresses in a network */
77 return LNET_NIDADDR(nid) == LNET_NIDADDR(LNET_NID_ANY);
81 lnet_fault_attr_match(struct lnet_fault_attr *attr, lnet_nid_t src,
82 lnet_nid_t local_nid, lnet_nid_t dst,
83 unsigned int type, unsigned int portal)
85 if (!lnet_fault_nid_match(attr->fa_src, src) ||
86 !lnet_fault_nid_match(attr->fa_dst, dst) ||
87 !lnet_fault_nid_match(attr->fa_local_nid, local_nid))
90 if (!(attr->fa_msg_mask & (1 << type)))
93 /* NB: ACK and REPLY have no portal, but they should have been
94 * rejected by message mask */
95 if (attr->fa_ptl_mask != 0 && /* has portal filter */
96 !(attr->fa_ptl_mask & (1ULL << portal)))
103 lnet_fault_attr_validate(struct lnet_fault_attr *attr)
105 if (attr->fa_msg_mask == 0)
106 attr->fa_msg_mask = LNET_MSG_MASK; /* all message types */
108 if (attr->fa_ptl_mask == 0) /* no portal filter */
111 /* NB: only PUT and GET can be filtered if portal filter has been set */
112 attr->fa_msg_mask &= LNET_GET_BIT | LNET_PUT_BIT;
113 if (attr->fa_msg_mask == 0) {
114 CDEBUG(D_NET, "can't find valid message type bits %x\n",
122 lnet_fault_stat_inc(struct lnet_fault_stat *stat, unsigned int type)
124 /* NB: fs_counter is NOT updated by this function */
142 * LNet message drop simulation
146 * Add a new drop rule to LNet
147 * There is no check for duplicated drop rule, all rules will be checked for
151 lnet_drop_rule_add(struct lnet_fault_attr *attr)
153 struct lnet_drop_rule *rule;
156 if (!((attr->u.drop.da_rate == 0) ^ (attr->u.drop.da_interval == 0))) {
158 "please provide either drop rate or drop interval, "
159 "but not both at the same time %d/%d\n",
160 attr->u.drop.da_rate, attr->u.drop.da_interval);
164 if (lnet_fault_attr_validate(attr) != 0)
171 spin_lock_init(&rule->dr_lock);
173 rule->dr_attr = *attr;
174 if (attr->u.drop.da_interval != 0) {
175 rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
176 rule->dr_drop_time = ktime_get_seconds() +
177 cfs_rand() % attr->u.drop.da_interval;
179 rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
182 lnet_net_lock(LNET_LOCK_EX);
183 list_add(&rule->dr_link, &the_lnet.ln_drop_rules);
184 lnet_net_unlock(LNET_LOCK_EX);
186 CDEBUG(D_NET, "Added drop rule: src %s, dst %s, rate %d, interval %d\n",
187 libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
188 attr->u.drop.da_rate, attr->u.drop.da_interval);
193 * Remove matched drop rules from lnet, all rules that can match \a src and
194 * \a dst will be removed.
195 * If \a src is zero, then all rules have \a dst as destination will be remove
196 * If \a dst is zero, then all rules have \a src as source will be removed
197 * If both of them are zero, all rules will be removed
200 lnet_drop_rule_del(lnet_nid_t src, lnet_nid_t dst)
202 struct lnet_drop_rule *rule;
203 struct lnet_drop_rule *tmp;
204 struct list_head zombies;
208 INIT_LIST_HEAD(&zombies);
210 lnet_net_lock(LNET_LOCK_EX);
211 list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) {
212 if (rule->dr_attr.fa_src != src && src != 0)
215 if (rule->dr_attr.fa_dst != dst && dst != 0)
218 list_move(&rule->dr_link, &zombies);
220 lnet_net_unlock(LNET_LOCK_EX);
222 list_for_each_entry_safe(rule, tmp, &zombies, dr_link) {
223 CDEBUG(D_NET, "Remove drop rule: src %s->dst: %s (1/%d, %d)\n",
224 libcfs_nid2str(rule->dr_attr.fa_src),
225 libcfs_nid2str(rule->dr_attr.fa_dst),
226 rule->dr_attr.u.drop.da_rate,
227 rule->dr_attr.u.drop.da_interval);
229 list_del(&rule->dr_link);
238 * List drop rule at position of \a pos
241 lnet_drop_rule_list(int pos, struct lnet_fault_attr *attr,
242 struct lnet_fault_stat *stat)
244 struct lnet_drop_rule *rule;
250 cpt = lnet_net_lock_current();
251 list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
255 spin_lock(&rule->dr_lock);
256 *attr = rule->dr_attr;
257 *stat = rule->dr_stat;
258 spin_unlock(&rule->dr_lock);
263 lnet_net_unlock(cpt);
268 * reset counters for all drop rules
271 lnet_drop_rule_reset(void)
273 struct lnet_drop_rule *rule;
277 cpt = lnet_net_lock_current();
279 list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
280 struct lnet_fault_attr *attr = &rule->dr_attr;
282 spin_lock(&rule->dr_lock);
284 memset(&rule->dr_stat, 0, sizeof(rule->dr_stat));
285 if (attr->u.drop.da_rate != 0) {
286 rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
288 rule->dr_drop_time = ktime_get_seconds() +
289 cfs_rand() % attr->u.drop.da_interval;
290 rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
292 spin_unlock(&rule->dr_lock);
295 lnet_net_unlock(cpt);
300 lnet_fault_match_health(enum lnet_msg_hstatus *hstatus, __u32 mask)
308 /* assign a random failure */
310 choice = random % (LNET_MSG_STATUS_END - LNET_MSG_STATUS_OK);
314 if (mask == HSTATUS_RANDOM) {
319 if (mask & (1 << choice)) {
324 /* round to the closest ON bit */
326 best_delta = HSTATUS_END;
328 if (mask & (1 << i)) {
332 if (delta < best_delta) {
344 * check source/destination NID, portal, message type and drop rate,
345 * decide whether should drop this message or not
348 drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
349 lnet_nid_t local_nid, lnet_nid_t dst,
350 unsigned int type, unsigned int portal,
351 enum lnet_msg_hstatus *hstatus)
353 struct lnet_fault_attr *attr = &rule->dr_attr;
356 if (!lnet_fault_attr_match(attr, src, local_nid, dst, type, portal))
359 if (attr->u.drop.da_drop_all) {
360 CDEBUG(D_NET, "set to drop all messages\n");
366 * if we're trying to match a health status error but it hasn't
367 * been set in the rule, then don't match
369 if ((hstatus && !attr->u.drop.da_health_error_mask) ||
370 (!hstatus && attr->u.drop.da_health_error_mask))
373 /* match this rule, check drop rate now */
374 spin_lock(&rule->dr_lock);
375 if (attr->u.drop.da_random) {
376 int value = cfs_rand() % attr->u.drop.da_interval;
377 if (value >= (attr->u.drop.da_interval / 2))
381 } else if (rule->dr_drop_time != 0) { /* time based drop */
382 time64_t now = ktime_get_seconds();
384 rule->dr_stat.fs_count++;
385 drop = now >= rule->dr_drop_time;
387 if (now > rule->dr_time_base)
388 rule->dr_time_base = now;
390 rule->dr_drop_time = rule->dr_time_base +
391 cfs_rand() % attr->u.drop.da_interval;
392 rule->dr_time_base += attr->u.drop.da_interval;
394 CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lld\n",
395 libcfs_nid2str(attr->fa_src),
396 libcfs_nid2str(attr->fa_dst),
400 } else { /* rate based drop */
403 drop = rule->dr_stat.fs_count++ == rule->dr_drop_at;
404 count = rule->dr_stat.fs_count;
405 if (do_div(count, attr->u.drop.da_rate) == 0) {
406 rule->dr_drop_at = rule->dr_stat.fs_count +
407 cfs_rand() % attr->u.drop.da_rate;
408 CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
409 libcfs_nid2str(attr->fa_src),
410 libcfs_nid2str(attr->fa_dst), rule->dr_drop_at);
416 if (drop) { /* drop this message, update counters */
418 lnet_fault_match_health(hstatus,
419 attr->u.drop.da_health_error_mask);
420 lnet_fault_stat_inc(&rule->dr_stat, type);
421 rule->dr_stat.u.drop.ds_dropped++;
424 spin_unlock(&rule->dr_lock);
429 * Check if message from \a src to \a dst can match any existed drop rule
432 lnet_drop_rule_match(struct lnet_hdr *hdr,
433 lnet_nid_t local_nid,
434 enum lnet_msg_hstatus *hstatus)
436 lnet_nid_t src = le64_to_cpu(hdr->src_nid);
437 lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
438 unsigned int typ = le32_to_cpu(hdr->type);
439 struct lnet_drop_rule *rule;
440 unsigned int ptl = -1;
444 /* NB: if Portal is specified, then only PUT and GET will be
445 * filtered by drop rule */
446 if (typ == LNET_MSG_PUT)
447 ptl = le32_to_cpu(hdr->msg.put.ptl_index);
448 else if (typ == LNET_MSG_GET)
449 ptl = le32_to_cpu(hdr->msg.get.ptl_index);
451 cpt = lnet_net_lock_current();
452 list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
453 drop = drop_rule_match(rule, src, local_nid, dst, typ, ptl,
458 lnet_net_unlock(cpt);
464 * LNet Delay Simulation
466 /** timestamp (second) to send delayed message */
467 #define msg_delay_send msg_ev.hdr_data
469 struct lnet_delay_rule {
470 /** link chain on the_lnet.ln_delay_rules */
471 struct list_head dl_link;
472 /** link chain on delay_dd.dd_sched_rules */
473 struct list_head dl_sched_link;
474 /** attributes of this rule */
475 struct lnet_fault_attr dl_attr;
476 /** lock to protect \a below members */
478 /** refcount of delay rule */
479 atomic_t dl_refcount;
481 * the message sequence to delay, which means message is delayed when
482 * dl_stat.fs_count == dl_delay_at
484 unsigned long dl_delay_at;
486 * seconds to delay the next message, it's exclusive with dl_delay_at
488 time64_t dl_delay_time;
489 /** baseline to caculate dl_delay_time */
490 time64_t dl_time_base;
491 /** jiffies to send the next delayed message */
492 unsigned long dl_msg_send;
493 /** delayed message list */
494 struct list_head dl_msg_list;
495 /** statistic of delayed messages */
496 struct lnet_fault_stat dl_stat;
497 /** timer to wakeup delay_daemon */
498 struct timer_list dl_timer;
501 struct delay_daemon_data {
502 /** serialise rule add/remove */
503 struct mutex dd_mutex;
504 /** protect rules on \a dd_sched_rules */
506 /** scheduled delay rules (by timer) */
507 struct list_head dd_sched_rules;
508 /** deamon thread sleeps at here */
509 wait_queue_head_t dd_waitq;
510 /** controler (lctl command) wait at here */
511 wait_queue_head_t dd_ctl_waitq;
512 /** deamon is running */
513 unsigned int dd_running;
514 /** deamon stopped */
515 unsigned int dd_stopped;
518 static struct delay_daemon_data delay_dd;
521 delay_rule_decref(struct lnet_delay_rule *rule)
523 if (atomic_dec_and_test(&rule->dl_refcount)) {
524 LASSERT(list_empty(&rule->dl_sched_link));
525 LASSERT(list_empty(&rule->dl_msg_list));
526 LASSERT(list_empty(&rule->dl_link));
533 * check source/destination NID, portal, message type and delay rate,
534 * decide whether should delay this message or not
537 delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
538 lnet_nid_t dst, unsigned int type, unsigned int portal,
539 struct lnet_msg *msg)
541 struct lnet_fault_attr *attr = &rule->dl_attr;
544 if (!lnet_fault_attr_match(attr, src, LNET_NID_ANY,
548 /* match this rule, check delay rate now */
549 spin_lock(&rule->dl_lock);
550 if (rule->dl_delay_time != 0) { /* time based delay */
551 time64_t now = ktime_get_seconds();
553 rule->dl_stat.fs_count++;
554 delay = now >= rule->dl_delay_time;
556 if (now > rule->dl_time_base)
557 rule->dl_time_base = now;
559 rule->dl_delay_time = rule->dl_time_base +
560 cfs_rand() % attr->u.delay.la_interval;
561 rule->dl_time_base += attr->u.delay.la_interval;
563 CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lld\n",
564 libcfs_nid2str(attr->fa_src),
565 libcfs_nid2str(attr->fa_dst),
566 rule->dl_delay_time);
569 } else { /* rate based delay */
572 delay = rule->dl_stat.fs_count++ == rule->dl_delay_at;
573 /* generate the next random rate sequence */
574 count = rule->dl_stat.fs_count;
575 if (do_div(count, attr->u.delay.la_rate) == 0) {
576 rule->dl_delay_at = rule->dl_stat.fs_count +
577 cfs_rand() % attr->u.delay.la_rate;
578 CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
579 libcfs_nid2str(attr->fa_src),
580 libcfs_nid2str(attr->fa_dst), rule->dl_delay_at);
585 spin_unlock(&rule->dl_lock);
589 /* delay this message, update counters */
590 lnet_fault_stat_inc(&rule->dl_stat, type);
591 rule->dl_stat.u.delay.ls_delayed++;
593 list_add_tail(&msg->msg_list, &rule->dl_msg_list);
594 msg->msg_delay_send = ktime_get_seconds() + attr->u.delay.la_latency;
595 if (rule->dl_msg_send == -1) {
596 rule->dl_msg_send = msg->msg_delay_send;
597 mod_timer(&rule->dl_timer, rule->dl_msg_send);
600 spin_unlock(&rule->dl_lock);
605 * check if \a msg can match any Delay Rule, receiving of this message
606 * will be delayed if there is a match.
609 lnet_delay_rule_match_locked(struct lnet_hdr *hdr, struct lnet_msg *msg)
611 struct lnet_delay_rule *rule;
612 lnet_nid_t src = le64_to_cpu(hdr->src_nid);
613 lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
614 unsigned int typ = le32_to_cpu(hdr->type);
615 unsigned int ptl = -1;
617 /* NB: called with hold of lnet_net_lock */
619 /* NB: if Portal is specified, then only PUT and GET will be
620 * filtered by delay rule */
621 if (typ == LNET_MSG_PUT)
622 ptl = le32_to_cpu(hdr->msg.put.ptl_index);
623 else if (typ == LNET_MSG_GET)
624 ptl = le32_to_cpu(hdr->msg.get.ptl_index);
626 list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
627 if (delay_rule_match(rule, src, dst, typ, ptl, msg))
634 /** check out delayed messages for send */
636 delayed_msg_check(struct lnet_delay_rule *rule, bool all,
637 struct list_head *msg_list)
639 struct lnet_msg *msg;
640 struct lnet_msg *tmp;
641 time64_t now = ktime_get_seconds();
643 if (!all && cfs_time_seconds(rule->dl_msg_send) > now)
646 spin_lock(&rule->dl_lock);
647 list_for_each_entry_safe(msg, tmp, &rule->dl_msg_list, msg_list) {
648 if (!all && msg->msg_delay_send > now)
651 msg->msg_delay_send = 0;
652 list_move_tail(&msg->msg_list, msg_list);
655 if (list_empty(&rule->dl_msg_list)) {
656 del_timer(&rule->dl_timer);
657 rule->dl_msg_send = -1;
659 } else if (!list_empty(msg_list)) {
660 /* dequeued some timedout messages, update timer for the
661 * next delayed message on rule */
662 msg = list_entry(rule->dl_msg_list.next,
663 struct lnet_msg, msg_list);
664 rule->dl_msg_send = msg->msg_delay_send;
665 mod_timer(&rule->dl_timer, rule->dl_msg_send);
667 spin_unlock(&rule->dl_lock);
671 delayed_msg_process(struct list_head *msg_list, bool drop)
673 struct lnet_msg *msg;
675 while (!list_empty(msg_list)) {
680 msg = list_entry(msg_list->next, struct lnet_msg, msg_list);
681 LASSERT(msg->msg_rxpeer != NULL);
682 LASSERT(msg->msg_rxni != NULL);
685 cpt = msg->msg_rx_cpt;
687 list_del_init(&msg->msg_list);
691 } else if (!msg->msg_routing) {
692 rc = lnet_parse_local(ni, msg);
698 rc = lnet_parse_forward_locked(ni, msg);
699 lnet_net_unlock(cpt);
703 lnet_ni_recv(ni, msg->msg_private, msg, 0,
704 0, msg->msg_len, msg->msg_len);
705 case LNET_CREDIT_WAIT:
707 default: /* failures */
712 lnet_drop_message(ni, cpt, msg->msg_private, msg->msg_len,
714 lnet_finalize(msg, rc);
719 * Process delayed messages for scheduled rules
720 * This function can either be called by delay_rule_daemon, or by lnet_finalise
723 lnet_delay_rule_check(void)
725 struct lnet_delay_rule *rule;
726 struct list_head msgs;
728 INIT_LIST_HEAD(&msgs);
730 if (list_empty(&delay_dd.dd_sched_rules))
733 spin_lock_bh(&delay_dd.dd_lock);
734 if (list_empty(&delay_dd.dd_sched_rules)) {
735 spin_unlock_bh(&delay_dd.dd_lock);
739 rule = list_entry(delay_dd.dd_sched_rules.next,
740 struct lnet_delay_rule, dl_sched_link);
741 list_del_init(&rule->dl_sched_link);
742 spin_unlock_bh(&delay_dd.dd_lock);
744 delayed_msg_check(rule, false, &msgs);
745 delay_rule_decref(rule); /* -1 for delay_dd.dd_sched_rules */
748 if (!list_empty(&msgs))
749 delayed_msg_process(&msgs, false);
752 /** deamon thread to handle delayed messages */
754 lnet_delay_rule_daemon(void *arg)
756 delay_dd.dd_running = 1;
757 wake_up(&delay_dd.dd_ctl_waitq);
759 while (delay_dd.dd_running) {
760 wait_event_interruptible(delay_dd.dd_waitq,
761 !delay_dd.dd_running ||
762 !list_empty(&delay_dd.dd_sched_rules));
763 lnet_delay_rule_check();
766 /* in case more rules have been enqueued after my last check */
767 lnet_delay_rule_check();
768 delay_dd.dd_stopped = 1;
769 wake_up(&delay_dd.dd_ctl_waitq);
775 delay_timer_cb(cfs_timer_cb_arg_t data)
777 struct lnet_delay_rule *rule = cfs_from_timer(rule, data, dl_timer);
779 spin_lock_bh(&delay_dd.dd_lock);
780 if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
781 atomic_inc(&rule->dl_refcount);
782 list_add_tail(&rule->dl_sched_link, &delay_dd.dd_sched_rules);
783 wake_up(&delay_dd.dd_waitq);
785 spin_unlock_bh(&delay_dd.dd_lock);
789 * Add a new delay rule to LNet
790 * There is no check for duplicated delay rule, all rules will be checked for
794 lnet_delay_rule_add(struct lnet_fault_attr *attr)
796 struct lnet_delay_rule *rule;
800 if (!((attr->u.delay.la_rate == 0) ^
801 (attr->u.delay.la_interval == 0))) {
803 "please provide either delay rate or delay interval, "
804 "but not both at the same time %d/%d\n",
805 attr->u.delay.la_rate, attr->u.delay.la_interval);
809 if (attr->u.delay.la_latency == 0) {
810 CDEBUG(D_NET, "delay latency cannot be zero\n");
814 if (lnet_fault_attr_validate(attr) != 0)
821 mutex_lock(&delay_dd.dd_mutex);
822 if (!delay_dd.dd_running) {
823 struct task_struct *task;
825 /* NB: although LND threads will process delayed message
826 * in lnet_finalize, but there is no guarantee that LND
827 * threads will be waken up if no other message needs to
829 * Only one daemon thread, performance is not the concern
830 * of this simualation module.
832 task = kthread_run(lnet_delay_rule_daemon, NULL, "lnet_dd");
837 wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
840 cfs_timer_setup(&rule->dl_timer, delay_timer_cb,
841 (unsigned long)rule, 0);
843 spin_lock_init(&rule->dl_lock);
844 INIT_LIST_HEAD(&rule->dl_msg_list);
845 INIT_LIST_HEAD(&rule->dl_sched_link);
847 rule->dl_attr = *attr;
848 if (attr->u.delay.la_interval != 0) {
849 rule->dl_time_base = ktime_get_seconds() +
850 attr->u.delay.la_interval;
851 rule->dl_delay_time = ktime_get_seconds() +
852 cfs_rand() % attr->u.delay.la_interval;
854 rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
857 rule->dl_msg_send = -1;
859 lnet_net_lock(LNET_LOCK_EX);
860 atomic_set(&rule->dl_refcount, 1);
861 list_add(&rule->dl_link, &the_lnet.ln_delay_rules);
862 lnet_net_unlock(LNET_LOCK_EX);
864 CDEBUG(D_NET, "Added delay rule: src %s, dst %s, rate %d\n",
865 libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
866 attr->u.delay.la_rate);
868 mutex_unlock(&delay_dd.dd_mutex);
871 mutex_unlock(&delay_dd.dd_mutex);
877 * Remove matched Delay Rules from lnet, if \a shutdown is true or both \a src
878 * and \a dst are zero, all rules will be removed, otherwise only matched rules
880 * If \a src is zero, then all rules have \a dst as destination will be remove
881 * If \a dst is zero, then all rules have \a src as source will be removed
883 * When a delay rule is removed, all delayed messages of this rule will be
884 * processed immediately.
887 lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown)
889 struct lnet_delay_rule *rule;
890 struct lnet_delay_rule *tmp;
891 struct list_head rule_list;
892 struct list_head msg_list;
897 INIT_LIST_HEAD(&rule_list);
898 INIT_LIST_HEAD(&msg_list);
903 mutex_lock(&delay_dd.dd_mutex);
904 lnet_net_lock(LNET_LOCK_EX);
906 list_for_each_entry_safe(rule, tmp, &the_lnet.ln_delay_rules, dl_link) {
907 if (rule->dl_attr.fa_src != src && src != 0)
910 if (rule->dl_attr.fa_dst != dst && dst != 0)
913 CDEBUG(D_NET, "Remove delay rule: src %s->dst: %s (1/%d, %d)\n",
914 libcfs_nid2str(rule->dl_attr.fa_src),
915 libcfs_nid2str(rule->dl_attr.fa_dst),
916 rule->dl_attr.u.delay.la_rate,
917 rule->dl_attr.u.delay.la_interval);
918 /* refcount is taken over by rule_list */
919 list_move(&rule->dl_link, &rule_list);
922 /* check if we need to shutdown delay_daemon */
923 cleanup = list_empty(&the_lnet.ln_delay_rules) &&
924 !list_empty(&rule_list);
925 lnet_net_unlock(LNET_LOCK_EX);
927 list_for_each_entry_safe(rule, tmp, &rule_list, dl_link) {
928 list_del_init(&rule->dl_link);
930 del_timer_sync(&rule->dl_timer);
931 delayed_msg_check(rule, true, &msg_list);
932 delay_rule_decref(rule); /* -1 for the_lnet.ln_delay_rules */
936 if (cleanup) { /* no more delay rule, shutdown delay_daemon */
937 LASSERT(delay_dd.dd_running);
938 delay_dd.dd_running = 0;
939 wake_up(&delay_dd.dd_waitq);
941 while (!delay_dd.dd_stopped)
942 wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_stopped);
944 mutex_unlock(&delay_dd.dd_mutex);
946 if (!list_empty(&msg_list))
947 delayed_msg_process(&msg_list, shutdown);
953 * List Delay Rule at position of \a pos
956 lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr,
957 struct lnet_fault_stat *stat)
959 struct lnet_delay_rule *rule;
965 cpt = lnet_net_lock_current();
966 list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
970 spin_lock(&rule->dl_lock);
971 *attr = rule->dl_attr;
972 *stat = rule->dl_stat;
973 spin_unlock(&rule->dl_lock);
978 lnet_net_unlock(cpt);
983 * reset counters for all Delay Rules
986 lnet_delay_rule_reset(void)
988 struct lnet_delay_rule *rule;
992 cpt = lnet_net_lock_current();
994 list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
995 struct lnet_fault_attr *attr = &rule->dl_attr;
997 spin_lock(&rule->dl_lock);
999 memset(&rule->dl_stat, 0, sizeof(rule->dl_stat));
1000 if (attr->u.delay.la_rate != 0) {
1001 rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
1003 rule->dl_delay_time = ktime_get_seconds() +
1004 cfs_rand() % attr->u.delay.la_interval;
1005 rule->dl_time_base = ktime_get_seconds() +
1006 attr->u.delay.la_interval;
1008 spin_unlock(&rule->dl_lock);
1011 lnet_net_unlock(cpt);
1016 lnet_fault_ctl(int opc, struct libcfs_ioctl_data *data)
1018 struct lnet_fault_attr *attr;
1019 struct lnet_fault_stat *stat;
1021 attr = (struct lnet_fault_attr *)data->ioc_inlbuf1;
1027 case LNET_CTL_DROP_ADD:
1031 return lnet_drop_rule_add(attr);
1033 case LNET_CTL_DROP_DEL:
1037 data->ioc_count = lnet_drop_rule_del(attr->fa_src,
1041 case LNET_CTL_DROP_RESET:
1042 lnet_drop_rule_reset();
1045 case LNET_CTL_DROP_LIST:
1046 stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
1047 if (attr == NULL || stat == NULL)
1050 return lnet_drop_rule_list(data->ioc_count, attr, stat);
1052 case LNET_CTL_DELAY_ADD:
1056 return lnet_delay_rule_add(attr);
1058 case LNET_CTL_DELAY_DEL:
1062 data->ioc_count = lnet_delay_rule_del(attr->fa_src,
1063 attr->fa_dst, false);
1066 case LNET_CTL_DELAY_RESET:
1067 lnet_delay_rule_reset();
1070 case LNET_CTL_DELAY_LIST:
1071 stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
1072 if (attr == NULL || stat == NULL)
1075 return lnet_delay_rule_list(data->ioc_count, attr, stat);
1080 lnet_fault_init(void)
1082 CLASSERT(LNET_PUT_BIT == 1 << LNET_MSG_PUT);
1083 CLASSERT(LNET_ACK_BIT == 1 << LNET_MSG_ACK);
1084 CLASSERT(LNET_GET_BIT == 1 << LNET_MSG_GET);
1085 CLASSERT(LNET_REPLY_BIT == 1 << LNET_MSG_REPLY);
1087 mutex_init(&delay_dd.dd_mutex);
1088 spin_lock_init(&delay_dd.dd_lock);
1089 init_waitqueue_head(&delay_dd.dd_waitq);
1090 init_waitqueue_head(&delay_dd.dd_ctl_waitq);
1091 INIT_LIST_HEAD(&delay_dd.dd_sched_rules);
1097 lnet_fault_fini(void)
1099 lnet_drop_rule_del(0, 0);
1100 lnet_delay_rule_del(0, 0, true);
1102 LASSERT(list_empty(&the_lnet.ln_drop_rules));
1103 LASSERT(list_empty(&the_lnet.ln_delay_rules));
1104 LASSERT(list_empty(&delay_dd.dd_sched_rules));