4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2014, Intel Corporation.
27 * This file is part of Lustre, http://www.lustre.org/
28 * Lustre is a trademark of Sun Microsystems, Inc.
30 * lnet/lnet/net_fault.c
32 * Lustre network fault simulation
34 * Author: liang.zhen@intel.com
37 #define DEBUG_SUBSYSTEM S_LNET
39 #include <lnet/lib-lnet.h>
40 #include <lnet/lnetctl.h>
42 #define LNET_MSG_MASK (LNET_PUT_BIT | LNET_ACK_BIT | \
43 LNET_GET_BIT | LNET_REPLY_BIT)
45 struct lnet_drop_rule {
46 /** link chain on the_lnet.ln_drop_rules */
47 struct list_head dr_link;
48 /** attributes of this rule */
49 struct lnet_fault_attr dr_attr;
50 /** lock to protect \a dr_drop_at and \a dr_stat */
53 * the message sequence to drop, which means message is dropped when
54 * dr_stat.drs_count == dr_drop_at
56 unsigned long dr_drop_at;
58 * seconds to drop the next message, it's exclusive with dr_drop_at
60 cfs_time_t dr_drop_time;
61 /** baseline to caculate dr_drop_time */
62 cfs_time_t dr_time_base;
63 /** statistic of dropped messages */
64 struct lnet_fault_stat dr_stat;
68 lnet_fault_nid_match(lnet_nid_t nid, lnet_nid_t msg_nid)
70 if (nid == msg_nid || nid == LNET_NID_ANY)
73 if (LNET_NIDNET(nid) != LNET_NIDNET(msg_nid))
76 /* 255.255.255.255@net is wildcard for all addresses in a network */
77 return LNET_NIDADDR(nid) == LNET_NIDADDR(LNET_NID_ANY);
81 lnet_fault_attr_match(struct lnet_fault_attr *attr, lnet_nid_t src,
82 lnet_nid_t dst, unsigned int type, unsigned int portal)
84 if (!lnet_fault_nid_match(attr->fa_src, src) ||
85 !lnet_fault_nid_match(attr->fa_dst, dst))
88 if (!(attr->fa_msg_mask & (1 << type)))
91 /* NB: ACK and REPLY have no portal, but they should have been
92 * rejected by message mask */
93 if (attr->fa_ptl_mask != 0 && /* has portal filter */
94 !(attr->fa_ptl_mask & (1ULL << portal)))
101 lnet_fault_attr_validate(struct lnet_fault_attr *attr)
103 if (attr->fa_msg_mask == 0)
104 attr->fa_msg_mask = LNET_MSG_MASK; /* all message types */
106 if (attr->fa_ptl_mask == 0) /* no portal filter */
109 /* NB: only PUT and GET can be filtered if portal filter has been set */
110 attr->fa_msg_mask &= LNET_GET_BIT | LNET_PUT_BIT;
111 if (attr->fa_msg_mask == 0) {
112 CDEBUG(D_NET, "can't find valid message type bits %x\n",
120 lnet_fault_stat_inc(struct lnet_fault_stat *stat, unsigned int type)
122 /* NB: fs_counter is NOT updated by this function */
140 * LNet message drop simulation
144 * Add a new drop rule to LNet
145 * There is no check for duplicated drop rule, all rules will be checked for
149 lnet_drop_rule_add(struct lnet_fault_attr *attr)
151 struct lnet_drop_rule *rule;
154 if (!((attr->u.drop.da_rate == 0) ^ (attr->u.drop.da_interval == 0))) {
156 "please provide either drop rate or drop interval, "
157 "but not both at the same time %d/%d\n",
158 attr->u.drop.da_rate, attr->u.drop.da_interval);
162 if (lnet_fault_attr_validate(attr) != 0)
169 spin_lock_init(&rule->dr_lock);
171 rule->dr_attr = *attr;
172 if (attr->u.drop.da_interval != 0) {
173 rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval);
174 rule->dr_drop_time = cfs_time_shift(cfs_rand() %
175 attr->u.drop.da_interval);
177 rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
180 lnet_net_lock(LNET_LOCK_EX);
181 list_add(&rule->dr_link, &the_lnet.ln_drop_rules);
182 lnet_net_unlock(LNET_LOCK_EX);
184 CDEBUG(D_NET, "Added drop rule: src %s, dst %s, rate %d, interval %d\n",
185 libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
186 attr->u.drop.da_rate, attr->u.drop.da_interval);
191 * Remove matched drop rules from lnet, all rules that can match \a src and
192 * \a dst will be removed.
193 * If \a src is zero, then all rules have \a dst as destination will be remove
194 * If \a dst is zero, then all rules have \a src as source will be removed
195 * If both of them are zero, all rules will be removed
198 lnet_drop_rule_del(lnet_nid_t src, lnet_nid_t dst)
200 struct lnet_drop_rule *rule;
201 struct lnet_drop_rule *tmp;
202 struct list_head zombies;
206 INIT_LIST_HEAD(&zombies);
208 lnet_net_lock(LNET_LOCK_EX);
209 list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) {
210 if (rule->dr_attr.fa_src != src && src != 0)
213 if (rule->dr_attr.fa_dst != dst && dst != 0)
216 list_move(&rule->dr_link, &zombies);
218 lnet_net_unlock(LNET_LOCK_EX);
220 list_for_each_entry_safe(rule, tmp, &zombies, dr_link) {
221 CDEBUG(D_NET, "Remove drop rule: src %s->dst: %s (1/%d, %d)\n",
222 libcfs_nid2str(rule->dr_attr.fa_src),
223 libcfs_nid2str(rule->dr_attr.fa_dst),
224 rule->dr_attr.u.drop.da_rate,
225 rule->dr_attr.u.drop.da_interval);
227 list_del(&rule->dr_link);
236 * List drop rule at position of \a pos
239 lnet_drop_rule_list(int pos, struct lnet_fault_attr *attr,
240 struct lnet_fault_stat *stat)
242 struct lnet_drop_rule *rule;
248 cpt = lnet_net_lock_current();
249 list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
253 spin_lock(&rule->dr_lock);
254 *attr = rule->dr_attr;
255 *stat = rule->dr_stat;
256 spin_unlock(&rule->dr_lock);
261 lnet_net_unlock(cpt);
266 * reset counters for all drop rules
269 lnet_drop_rule_reset(void)
271 struct lnet_drop_rule *rule;
275 cpt = lnet_net_lock_current();
277 list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
278 struct lnet_fault_attr *attr = &rule->dr_attr;
280 spin_lock(&rule->dr_lock);
282 memset(&rule->dr_stat, 0, sizeof(rule->dr_stat));
283 if (attr->u.drop.da_rate != 0) {
284 rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
286 rule->dr_drop_time = cfs_time_shift(cfs_rand() %
287 attr->u.drop.da_interval);
288 rule->dr_time_base = cfs_time_shift(attr->u.drop.
291 spin_unlock(&rule->dr_lock);
294 lnet_net_unlock(cpt);
299 * check source/destination NID, portal, message type and drop rate,
300 * decide whether should drop this message or not
303 drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
304 lnet_nid_t dst, unsigned int type, unsigned int portal)
306 struct lnet_fault_attr *attr = &rule->dr_attr;
309 if (!lnet_fault_attr_match(attr, src, dst, type, portal))
312 /* match this rule, check drop rate now */
313 spin_lock(&rule->dr_lock);
314 if (rule->dr_drop_time != 0) { /* time based drop */
315 cfs_time_t now = cfs_time_current();
317 rule->dr_stat.fs_count++;
318 drop = cfs_time_aftereq(now, rule->dr_drop_time);
320 if (cfs_time_after(now, rule->dr_time_base))
321 rule->dr_time_base = now;
323 rule->dr_drop_time = rule->dr_time_base +
324 cfs_time_seconds(cfs_rand() %
325 attr->u.drop.da_interval);
326 rule->dr_time_base += cfs_time_seconds(attr->u.drop.
329 CDEBUG(D_NET, "Drop Rule %s->%s: next drop : "
331 libcfs_nid2str(attr->fa_src),
332 libcfs_nid2str(attr->fa_dst),
336 } else { /* rate based drop */
337 drop = rule->dr_stat.fs_count++ == rule->dr_drop_at;
339 if (rule->dr_stat.fs_count % attr->u.drop.da_rate == 0) {
340 rule->dr_drop_at = rule->dr_stat.fs_count +
341 cfs_rand() % attr->u.drop.da_rate;
342 CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
343 libcfs_nid2str(attr->fa_src),
344 libcfs_nid2str(attr->fa_dst), rule->dr_drop_at);
348 if (drop) { /* drop this message, update counters */
349 lnet_fault_stat_inc(&rule->dr_stat, type);
350 rule->dr_stat.u.drop.ds_dropped++;
353 spin_unlock(&rule->dr_lock);
358 * Check if message from \a src to \a dst can match any existed drop rule
361 lnet_drop_rule_match(lnet_hdr_t *hdr)
363 struct lnet_drop_rule *rule;
364 lnet_nid_t src = le64_to_cpu(hdr->src_nid);
365 lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
366 unsigned int typ = le32_to_cpu(hdr->type);
367 unsigned int ptl = -1;
371 /* NB: if Portal is specified, then only PUT and GET will be
372 * filtered by drop rule */
373 if (typ == LNET_MSG_PUT)
374 ptl = le32_to_cpu(hdr->msg.put.ptl_index);
375 else if (typ == LNET_MSG_GET)
376 ptl = le32_to_cpu(hdr->msg.get.ptl_index);
378 cpt = lnet_net_lock_current();
379 list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
380 drop = drop_rule_match(rule, src, dst, typ, ptl);
385 lnet_net_unlock(cpt);
390 * LNet Delay Simulation
392 /** timestamp (second) to send delayed message */
393 #define msg_delay_send msg_ev.hdr_data
395 struct lnet_delay_rule {
396 /** link chain on the_lnet.ln_delay_rules */
397 struct list_head dl_link;
398 /** link chain on delay_dd.dd_sched_rules */
399 struct list_head dl_sched_link;
400 /** attributes of this rule */
401 struct lnet_fault_attr dl_attr;
402 /** lock to protect \a below members */
404 /** refcount of delay rule */
405 atomic_t dl_refcount;
407 * the message sequence to delay, which means message is delayed when
408 * dl_stat.fs_count == dl_delay_at
410 unsigned long dl_delay_at;
412 * seconds to delay the next message, it's exclusive with dl_delay_at
414 cfs_time_t dl_delay_time;
415 /** baseline to caculate dl_delay_time */
416 cfs_time_t dl_time_base;
417 /** jiffies to send the next delayed message */
418 unsigned long dl_msg_send;
419 /** delayed message list */
420 struct list_head dl_msg_list;
421 /** statistic of delayed messages */
422 struct lnet_fault_stat dl_stat;
423 /** timer to wakeup delay_daemon */
424 struct timer_list dl_timer;
427 struct delay_daemon_data {
428 /** serialise rule add/remove */
429 struct mutex dd_mutex;
430 /** protect rules on \a dd_sched_rules */
432 /** scheduled delay rules (by timer) */
433 struct list_head dd_sched_rules;
434 /** deamon thread sleeps at here */
435 wait_queue_head_t dd_waitq;
436 /** controler (lctl command) wait at here */
437 wait_queue_head_t dd_ctl_waitq;
438 /** deamon is running */
439 unsigned int dd_running;
440 /** deamon stopped */
441 unsigned int dd_stopped;
444 static struct delay_daemon_data delay_dd;
447 round_timeout(cfs_time_t timeout)
449 return cfs_time_seconds((unsigned int)
450 cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
454 delay_rule_decref(struct lnet_delay_rule *rule)
456 if (atomic_dec_and_test(&rule->dl_refcount)) {
457 LASSERT(list_empty(&rule->dl_sched_link));
458 LASSERT(list_empty(&rule->dl_msg_list));
459 LASSERT(list_empty(&rule->dl_link));
466 * check source/destination NID, portal, message type and delay rate,
467 * decide whether should delay this message or not
470 delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
471 lnet_nid_t dst, unsigned int type, unsigned int portal,
472 struct lnet_msg *msg)
474 struct lnet_fault_attr *attr = &rule->dl_attr;
477 if (!lnet_fault_attr_match(attr, src, dst, type, portal))
480 /* match this rule, check delay rate now */
481 spin_lock(&rule->dl_lock);
482 if (rule->dl_delay_time != 0) { /* time based delay */
483 cfs_time_t now = cfs_time_current();
485 rule->dl_stat.fs_count++;
486 delay = cfs_time_aftereq(now, rule->dl_delay_time);
488 if (cfs_time_after(now, rule->dl_time_base))
489 rule->dl_time_base = now;
491 rule->dl_delay_time = rule->dl_time_base +
492 cfs_time_seconds(cfs_rand() %
493 attr->u.delay.la_interval);
494 rule->dl_time_base += cfs_time_seconds(attr->u.delay.
497 CDEBUG(D_NET, "Delay Rule %s->%s: next delay : "
499 libcfs_nid2str(attr->fa_src),
500 libcfs_nid2str(attr->fa_dst),
501 rule->dl_delay_time);
504 } else { /* rate based delay */
505 delay = rule->dl_stat.fs_count++ == rule->dl_delay_at;
506 /* generate the next random rate sequence */
507 if (rule->dl_stat.fs_count % attr->u.delay.la_rate == 0) {
508 rule->dl_delay_at = rule->dl_stat.fs_count +
509 cfs_rand() % attr->u.delay.la_rate;
510 CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
511 libcfs_nid2str(attr->fa_src),
512 libcfs_nid2str(attr->fa_dst), rule->dl_delay_at);
517 spin_unlock(&rule->dl_lock);
521 /* delay this message, update counters */
522 lnet_fault_stat_inc(&rule->dl_stat, type);
523 rule->dl_stat.u.delay.ls_delayed++;
525 list_add_tail(&msg->msg_list, &rule->dl_msg_list);
526 msg->msg_delay_send = round_timeout(
527 cfs_time_shift(attr->u.delay.la_latency));
528 if (rule->dl_msg_send == -1) {
529 rule->dl_msg_send = msg->msg_delay_send;
530 mod_timer(&rule->dl_timer, rule->dl_msg_send);
533 spin_unlock(&rule->dl_lock);
538 * check if \a msg can match any Delay Rule, receiving of this message
539 * will be delayed if there is a match.
542 lnet_delay_rule_match_locked(lnet_hdr_t *hdr, struct lnet_msg *msg)
544 struct lnet_delay_rule *rule;
545 lnet_nid_t src = le64_to_cpu(hdr->src_nid);
546 lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
547 unsigned int typ = le32_to_cpu(hdr->type);
548 unsigned int ptl = -1;
550 /* NB: called with hold of lnet_net_lock */
552 /* NB: if Portal is specified, then only PUT and GET will be
553 * filtered by delay rule */
554 if (typ == LNET_MSG_PUT)
555 ptl = le32_to_cpu(hdr->msg.put.ptl_index);
556 else if (typ == LNET_MSG_GET)
557 ptl = le32_to_cpu(hdr->msg.get.ptl_index);
559 list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
560 if (delay_rule_match(rule, src, dst, typ, ptl, msg))
567 /** check out delayed messages for send */
569 delayed_msg_check(struct lnet_delay_rule *rule, bool all,
570 struct list_head *msg_list)
572 struct lnet_msg *msg;
573 struct lnet_msg *tmp;
574 unsigned long now = cfs_time_current();
576 if (!all && rule->dl_msg_send > now)
579 spin_lock(&rule->dl_lock);
580 list_for_each_entry_safe(msg, tmp, &rule->dl_msg_list, msg_list) {
581 if (!all && msg->msg_delay_send > now)
584 msg->msg_delay_send = 0;
585 list_move_tail(&msg->msg_list, msg_list);
588 if (list_empty(&rule->dl_msg_list)) {
589 del_timer(&rule->dl_timer);
590 rule->dl_msg_send = -1;
592 } else if (!list_empty(msg_list)) {
593 /* dequeued some timedout messages, update timer for the
594 * next delayed message on rule */
595 msg = list_entry(rule->dl_msg_list.next,
596 struct lnet_msg, msg_list);
597 rule->dl_msg_send = msg->msg_delay_send;
598 mod_timer(&rule->dl_timer, rule->dl_msg_send);
600 spin_unlock(&rule->dl_lock);
604 delayed_msg_process(struct list_head *msg_list, bool drop)
606 struct lnet_msg *msg;
608 while (!list_empty(msg_list)) {
613 msg = list_entry(msg_list->next, struct lnet_msg, msg_list);
614 LASSERT(msg->msg_rxpeer != NULL);
616 ni = msg->msg_rxpeer->lp_ni;
617 cpt = msg->msg_rx_cpt;
619 list_del_init(&msg->msg_list);
623 } else if (!msg->msg_routing) {
624 rc = lnet_parse_local(ni, msg);
630 rc = lnet_parse_forward_locked(ni, msg);
631 lnet_net_unlock(cpt);
635 lnet_ni_recv(ni, msg->msg_private, msg, 0,
636 0, msg->msg_len, msg->msg_len);
637 case LNET_CREDIT_WAIT:
639 default: /* failures */
644 lnet_drop_message(ni, cpt, msg->msg_private, msg->msg_len);
645 lnet_finalize(ni, msg, rc);
650 * Process delayed messages for scheduled rules
651 * This function can either be called by delay_rule_daemon, or by lnet_finalise
654 lnet_delay_rule_check(void)
656 struct lnet_delay_rule *rule;
657 struct list_head msgs;
659 INIT_LIST_HEAD(&msgs);
661 if (list_empty(&delay_dd.dd_sched_rules))
664 spin_lock_bh(&delay_dd.dd_lock);
665 if (list_empty(&delay_dd.dd_sched_rules)) {
666 spin_unlock_bh(&delay_dd.dd_lock);
670 rule = list_entry(delay_dd.dd_sched_rules.next,
671 struct lnet_delay_rule, dl_sched_link);
672 list_del_init(&rule->dl_sched_link);
673 spin_unlock_bh(&delay_dd.dd_lock);
675 delayed_msg_check(rule, false, &msgs);
676 delay_rule_decref(rule); /* -1 for delay_dd.dd_sched_rules */
679 if (!list_empty(&msgs))
680 delayed_msg_process(&msgs, false);
683 /** deamon thread to handle delayed messages */
685 lnet_delay_rule_daemon(void *arg)
687 delay_dd.dd_running = 1;
688 wake_up(&delay_dd.dd_ctl_waitq);
690 while (delay_dd.dd_running) {
691 wait_event_interruptible(delay_dd.dd_waitq,
692 !delay_dd.dd_running ||
693 !list_empty(&delay_dd.dd_sched_rules));
694 lnet_delay_rule_check();
697 /* in case more rules have been enqueued after my last check */
698 lnet_delay_rule_check();
699 delay_dd.dd_stopped = 1;
700 wake_up(&delay_dd.dd_ctl_waitq);
706 delay_timer_cb(unsigned long arg)
708 struct lnet_delay_rule *rule = (struct lnet_delay_rule *)arg;
710 spin_lock_bh(&delay_dd.dd_lock);
711 if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
712 atomic_inc(&rule->dl_refcount);
713 list_add_tail(&rule->dl_sched_link, &delay_dd.dd_sched_rules);
714 wake_up(&delay_dd.dd_waitq);
716 spin_unlock_bh(&delay_dd.dd_lock);
720 * Add a new delay rule to LNet
721 * There is no check for duplicated delay rule, all rules will be checked for
725 lnet_delay_rule_add(struct lnet_fault_attr *attr)
727 struct lnet_delay_rule *rule;
731 if (!((attr->u.delay.la_rate == 0) ^
732 (attr->u.delay.la_interval == 0))) {
734 "please provide either delay rate or delay interval, "
735 "but not both at the same time %d/%d\n",
736 attr->u.delay.la_rate, attr->u.delay.la_interval);
740 if (attr->u.delay.la_latency == 0) {
741 CDEBUG(D_NET, "delay latency cannot be zero\n");
745 if (lnet_fault_attr_validate(attr) != 0)
752 mutex_lock(&delay_dd.dd_mutex);
753 if (!delay_dd.dd_running) {
754 struct task_struct *task;
756 /* NB: although LND threads will process delayed message
757 * in lnet_finalize, but there is no guarantee that LND
758 * threads will be waken up if no other message needs to
760 * Only one daemon thread, performance is not the concern
761 * of this simualation module.
763 task = kthread_run(lnet_delay_rule_daemon, NULL, "lnet_dd");
768 wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
771 init_timer(&rule->dl_timer);
772 rule->dl_timer.function = delay_timer_cb;
773 rule->dl_timer.data = (unsigned long)rule;
775 spin_lock_init(&rule->dl_lock);
776 INIT_LIST_HEAD(&rule->dl_msg_list);
777 INIT_LIST_HEAD(&rule->dl_sched_link);
779 rule->dl_attr = *attr;
780 if (attr->u.delay.la_interval != 0) {
781 rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval);
782 rule->dl_delay_time = cfs_time_shift(cfs_rand() %
783 attr->u.delay.la_interval);
785 rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
788 rule->dl_msg_send = -1;
790 lnet_net_lock(LNET_LOCK_EX);
791 atomic_set(&rule->dl_refcount, 1);
792 list_add(&rule->dl_link, &the_lnet.ln_delay_rules);
793 lnet_net_unlock(LNET_LOCK_EX);
795 CDEBUG(D_NET, "Added delay rule: src %s, dst %s, rate %d\n",
796 libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
797 attr->u.delay.la_rate);
799 mutex_unlock(&delay_dd.dd_mutex);
802 mutex_unlock(&delay_dd.dd_mutex);
808 * Remove matched Delay Rules from lnet, if \a shutdown is true or both \a src
809 * and \a dst are zero, all rules will be removed, otherwise only matched rules
811 * If \a src is zero, then all rules have \a dst as destination will be remove
812 * If \a dst is zero, then all rules have \a src as source will be removed
814 * When a delay rule is removed, all delayed messages of this rule will be
815 * processed immediately.
818 lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown)
820 struct lnet_delay_rule *rule;
821 struct lnet_delay_rule *tmp;
822 struct list_head rule_list;
823 struct list_head msg_list;
828 INIT_LIST_HEAD(&rule_list);
829 INIT_LIST_HEAD(&msg_list);
834 mutex_lock(&delay_dd.dd_mutex);
835 lnet_net_lock(LNET_LOCK_EX);
837 list_for_each_entry_safe(rule, tmp, &the_lnet.ln_delay_rules, dl_link) {
838 if (rule->dl_attr.fa_src != src && src != 0)
841 if (rule->dl_attr.fa_dst != dst && dst != 0)
844 CDEBUG(D_NET, "Remove delay rule: src %s->dst: %s (1/%d, %d)\n",
845 libcfs_nid2str(rule->dl_attr.fa_src),
846 libcfs_nid2str(rule->dl_attr.fa_dst),
847 rule->dl_attr.u.delay.la_rate,
848 rule->dl_attr.u.delay.la_interval);
849 /* refcount is taken over by rule_list */
850 list_move(&rule->dl_link, &rule_list);
853 /* check if we need to shutdown delay_daemon */
854 cleanup = list_empty(&the_lnet.ln_delay_rules) &&
855 !list_empty(&rule_list);
856 lnet_net_unlock(LNET_LOCK_EX);
858 list_for_each_entry_safe(rule, tmp, &rule_list, dl_link) {
859 list_del_init(&rule->dl_link);
861 del_timer_sync(&rule->dl_timer);
862 delayed_msg_check(rule, true, &msg_list);
863 delay_rule_decref(rule); /* -1 for the_lnet.ln_delay_rules */
867 if (cleanup) { /* no more delay rule, shutdown delay_daemon */
868 LASSERT(delay_dd.dd_running);
869 delay_dd.dd_running = 0;
870 wake_up(&delay_dd.dd_waitq);
872 while (!delay_dd.dd_stopped)
873 wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_stopped);
875 mutex_unlock(&delay_dd.dd_mutex);
877 if (!list_empty(&msg_list))
878 delayed_msg_process(&msg_list, shutdown);
884 * List Delay Rule at position of \a pos
887 lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr,
888 struct lnet_fault_stat *stat)
890 struct lnet_delay_rule *rule;
896 cpt = lnet_net_lock_current();
897 list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
901 spin_lock(&rule->dl_lock);
902 *attr = rule->dl_attr;
903 *stat = rule->dl_stat;
904 spin_unlock(&rule->dl_lock);
909 lnet_net_unlock(cpt);
914 * reset counters for all Delay Rules
917 lnet_delay_rule_reset(void)
919 struct lnet_delay_rule *rule;
923 cpt = lnet_net_lock_current();
925 list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
926 struct lnet_fault_attr *attr = &rule->dl_attr;
928 spin_lock(&rule->dl_lock);
930 memset(&rule->dl_stat, 0, sizeof(rule->dl_stat));
931 if (attr->u.delay.la_rate != 0) {
932 rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
934 rule->dl_delay_time = cfs_time_shift(cfs_rand() %
935 attr->u.delay.la_interval);
936 rule->dl_time_base = cfs_time_shift(attr->u.delay.
939 spin_unlock(&rule->dl_lock);
942 lnet_net_unlock(cpt);
947 lnet_fault_ctl(int opc, struct libcfs_ioctl_data *data)
949 struct lnet_fault_attr *attr;
950 struct lnet_fault_stat *stat;
952 attr = (struct lnet_fault_attr *)data->ioc_inlbuf1;
958 case LNET_CTL_DROP_ADD:
962 return lnet_drop_rule_add(attr);
964 case LNET_CTL_DROP_DEL:
968 data->ioc_count = lnet_drop_rule_del(attr->fa_src,
972 case LNET_CTL_DROP_RESET:
973 lnet_drop_rule_reset();
976 case LNET_CTL_DROP_LIST:
977 stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
978 if (attr == NULL || stat == NULL)
981 return lnet_drop_rule_list(data->ioc_count, attr, stat);
983 case LNET_CTL_DELAY_ADD:
987 return lnet_delay_rule_add(attr);
989 case LNET_CTL_DELAY_DEL:
993 data->ioc_count = lnet_delay_rule_del(attr->fa_src,
994 attr->fa_dst, false);
997 case LNET_CTL_DELAY_RESET:
998 lnet_delay_rule_reset();
1001 case LNET_CTL_DELAY_LIST:
1002 stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
1003 if (attr == NULL || stat == NULL)
1006 return lnet_delay_rule_list(data->ioc_count, attr, stat);
1011 lnet_fault_init(void)
1013 CLASSERT(LNET_PUT_BIT == 1 << LNET_MSG_PUT);
1014 CLASSERT(LNET_ACK_BIT == 1 << LNET_MSG_ACK);
1015 CLASSERT(LNET_GET_BIT == 1 << LNET_MSG_GET);
1016 CLASSERT(LNET_REPLY_BIT == 1 << LNET_MSG_REPLY);
1018 mutex_init(&delay_dd.dd_mutex);
1019 spin_lock_init(&delay_dd.dd_lock);
1020 init_waitqueue_head(&delay_dd.dd_waitq);
1021 init_waitqueue_head(&delay_dd.dd_ctl_waitq);
1022 INIT_LIST_HEAD(&delay_dd.dd_sched_rules);
1028 lnet_fault_fini(void)
1030 lnet_drop_rule_del(0, 0);
1031 lnet_delay_rule_del(0, 0, true);
1033 LASSERT(list_empty(&the_lnet.ln_drop_rules));
1034 LASSERT(list_empty(&the_lnet.ln_delay_rules));
1035 LASSERT(list_empty(&delay_dd.dd_sched_rules));