4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2014, 2017, Intel Corporation.
27 * This file is part of Lustre, http://www.lustre.org/
28 * Lustre is a trademark of Sun Microsystems, Inc.
30 * lnet/lnet/net_fault.c
32 * Lustre network fault simulation
34 * Author: liang.zhen@intel.com
37 #define DEBUG_SUBSYSTEM S_LNET
39 #include <lnet/lib-lnet.h>
40 #include <uapi/linux/lnet/lnetctl.h>
42 #define LNET_MSG_MASK (LNET_PUT_BIT | LNET_ACK_BIT | \
43 LNET_GET_BIT | LNET_REPLY_BIT)
45 struct lnet_drop_rule {
46 /** link chain on the_lnet.ln_drop_rules */
47 struct list_head dr_link;
48 /** attributes of this rule */
49 struct lnet_fault_attr dr_attr;
50 /** lock to protect \a dr_drop_at and \a dr_stat */
53 * the message sequence to drop, which means message is dropped when
54 * dr_stat.drs_count == dr_drop_at
56 unsigned long dr_drop_at;
58 * seconds to drop the next message, it's exclusive with dr_drop_at
60 time64_t dr_drop_time;
61 /** baseline to caculate dr_drop_time */
62 time64_t dr_time_base;
63 /** statistic of dropped messages */
64 struct lnet_fault_stat dr_stat;
68 lnet_fault_nid_match(lnet_nid_t nid, lnet_nid_t msg_nid)
70 if (nid == msg_nid || nid == LNET_NID_ANY)
73 if (LNET_NIDNET(nid) != LNET_NIDNET(msg_nid))
76 /* 255.255.255.255@net is wildcard for all addresses in a network */
77 return LNET_NIDADDR(nid) == LNET_NIDADDR(LNET_NID_ANY);
81 lnet_fault_attr_match(struct lnet_fault_attr *attr, lnet_nid_t src,
82 lnet_nid_t dst, unsigned int type, unsigned int portal)
84 if (!lnet_fault_nid_match(attr->fa_src, src) ||
85 !lnet_fault_nid_match(attr->fa_dst, dst))
88 if (!(attr->fa_msg_mask & (1 << type)))
91 /* NB: ACK and REPLY have no portal, but they should have been
92 * rejected by message mask */
93 if (attr->fa_ptl_mask != 0 && /* has portal filter */
94 !(attr->fa_ptl_mask & (1ULL << portal)))
101 lnet_fault_attr_validate(struct lnet_fault_attr *attr)
103 if (attr->fa_msg_mask == 0)
104 attr->fa_msg_mask = LNET_MSG_MASK; /* all message types */
106 if (attr->fa_ptl_mask == 0) /* no portal filter */
109 /* NB: only PUT and GET can be filtered if portal filter has been set */
110 attr->fa_msg_mask &= LNET_GET_BIT | LNET_PUT_BIT;
111 if (attr->fa_msg_mask == 0) {
112 CDEBUG(D_NET, "can't find valid message type bits %x\n",
120 lnet_fault_stat_inc(struct lnet_fault_stat *stat, unsigned int type)
122 /* NB: fs_counter is NOT updated by this function */
140 * LNet message drop simulation
144 * Add a new drop rule to LNet
145 * There is no check for duplicated drop rule, all rules will be checked for
149 lnet_drop_rule_add(struct lnet_fault_attr *attr)
151 struct lnet_drop_rule *rule;
154 if (!((attr->u.drop.da_rate == 0) ^ (attr->u.drop.da_interval == 0))) {
156 "please provide either drop rate or drop interval, "
157 "but not both at the same time %d/%d\n",
158 attr->u.drop.da_rate, attr->u.drop.da_interval);
162 if (lnet_fault_attr_validate(attr) != 0)
169 spin_lock_init(&rule->dr_lock);
171 rule->dr_attr = *attr;
172 if (attr->u.drop.da_interval != 0) {
173 rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
174 rule->dr_drop_time = ktime_get_seconds() +
175 cfs_rand() % attr->u.drop.da_interval;
177 rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
180 lnet_net_lock(LNET_LOCK_EX);
181 list_add(&rule->dr_link, &the_lnet.ln_drop_rules);
182 lnet_net_unlock(LNET_LOCK_EX);
184 CDEBUG(D_NET, "Added drop rule: src %s, dst %s, rate %d, interval %d\n",
185 libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
186 attr->u.drop.da_rate, attr->u.drop.da_interval);
191 * Remove matched drop rules from lnet, all rules that can match \a src and
192 * \a dst will be removed.
193 * If \a src is zero, then all rules have \a dst as destination will be remove
194 * If \a dst is zero, then all rules have \a src as source will be removed
195 * If both of them are zero, all rules will be removed
198 lnet_drop_rule_del(lnet_nid_t src, lnet_nid_t dst)
200 struct lnet_drop_rule *rule;
201 struct lnet_drop_rule *tmp;
202 struct list_head zombies;
206 INIT_LIST_HEAD(&zombies);
208 lnet_net_lock(LNET_LOCK_EX);
209 list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) {
210 if (rule->dr_attr.fa_src != src && src != 0)
213 if (rule->dr_attr.fa_dst != dst && dst != 0)
216 list_move(&rule->dr_link, &zombies);
218 lnet_net_unlock(LNET_LOCK_EX);
220 list_for_each_entry_safe(rule, tmp, &zombies, dr_link) {
221 CDEBUG(D_NET, "Remove drop rule: src %s->dst: %s (1/%d, %d)\n",
222 libcfs_nid2str(rule->dr_attr.fa_src),
223 libcfs_nid2str(rule->dr_attr.fa_dst),
224 rule->dr_attr.u.drop.da_rate,
225 rule->dr_attr.u.drop.da_interval);
227 list_del(&rule->dr_link);
236 * List drop rule at position of \a pos
239 lnet_drop_rule_list(int pos, struct lnet_fault_attr *attr,
240 struct lnet_fault_stat *stat)
242 struct lnet_drop_rule *rule;
248 cpt = lnet_net_lock_current();
249 list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
253 spin_lock(&rule->dr_lock);
254 *attr = rule->dr_attr;
255 *stat = rule->dr_stat;
256 spin_unlock(&rule->dr_lock);
261 lnet_net_unlock(cpt);
266 * reset counters for all drop rules
269 lnet_drop_rule_reset(void)
271 struct lnet_drop_rule *rule;
275 cpt = lnet_net_lock_current();
277 list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
278 struct lnet_fault_attr *attr = &rule->dr_attr;
280 spin_lock(&rule->dr_lock);
282 memset(&rule->dr_stat, 0, sizeof(rule->dr_stat));
283 if (attr->u.drop.da_rate != 0) {
284 rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
286 rule->dr_drop_time = ktime_get_seconds() +
287 cfs_rand() % attr->u.drop.da_interval;
288 rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
290 spin_unlock(&rule->dr_lock);
293 lnet_net_unlock(cpt);
298 * check source/destination NID, portal, message type and drop rate,
299 * decide whether should drop this message or not
302 drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
303 lnet_nid_t dst, unsigned int type, unsigned int portal)
305 struct lnet_fault_attr *attr = &rule->dr_attr;
308 if (!lnet_fault_attr_match(attr, src, dst, type, portal))
311 /* match this rule, check drop rate now */
312 spin_lock(&rule->dr_lock);
313 if (rule->dr_drop_time != 0) { /* time based drop */
314 time64_t now = ktime_get_seconds();
316 rule->dr_stat.fs_count++;
317 drop = now >= rule->dr_drop_time;
319 if (now > rule->dr_time_base)
320 rule->dr_time_base = now;
322 rule->dr_drop_time = rule->dr_time_base +
323 cfs_rand() % attr->u.drop.da_interval;
324 rule->dr_time_base += attr->u.drop.da_interval;
326 CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lld\n",
327 libcfs_nid2str(attr->fa_src),
328 libcfs_nid2str(attr->fa_dst),
332 } else { /* rate based drop */
335 drop = rule->dr_stat.fs_count++ == rule->dr_drop_at;
336 count = rule->dr_stat.fs_count;
337 if (do_div(count, attr->u.drop.da_rate) == 0) {
338 rule->dr_drop_at = rule->dr_stat.fs_count +
339 cfs_rand() % attr->u.drop.da_rate;
340 CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
341 libcfs_nid2str(attr->fa_src),
342 libcfs_nid2str(attr->fa_dst), rule->dr_drop_at);
346 if (drop) { /* drop this message, update counters */
347 lnet_fault_stat_inc(&rule->dr_stat, type);
348 rule->dr_stat.u.drop.ds_dropped++;
351 spin_unlock(&rule->dr_lock);
356 * Check if message from \a src to \a dst can match any existed drop rule
359 lnet_drop_rule_match(struct lnet_hdr *hdr)
361 struct lnet_drop_rule *rule;
362 lnet_nid_t src = le64_to_cpu(hdr->src_nid);
363 lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
364 unsigned int typ = le32_to_cpu(hdr->type);
365 unsigned int ptl = -1;
369 /* NB: if Portal is specified, then only PUT and GET will be
370 * filtered by drop rule */
371 if (typ == LNET_MSG_PUT)
372 ptl = le32_to_cpu(hdr->msg.put.ptl_index);
373 else if (typ == LNET_MSG_GET)
374 ptl = le32_to_cpu(hdr->msg.get.ptl_index);
376 cpt = lnet_net_lock_current();
377 list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
378 drop = drop_rule_match(rule, src, dst, typ, ptl);
383 lnet_net_unlock(cpt);
388 * LNet Delay Simulation
390 /** timestamp (second) to send delayed message */
391 #define msg_delay_send msg_ev.hdr_data
393 struct lnet_delay_rule {
394 /** link chain on the_lnet.ln_delay_rules */
395 struct list_head dl_link;
396 /** link chain on delay_dd.dd_sched_rules */
397 struct list_head dl_sched_link;
398 /** attributes of this rule */
399 struct lnet_fault_attr dl_attr;
400 /** lock to protect \a below members */
402 /** refcount of delay rule */
403 atomic_t dl_refcount;
405 * the message sequence to delay, which means message is delayed when
406 * dl_stat.fs_count == dl_delay_at
408 unsigned long dl_delay_at;
410 * seconds to delay the next message, it's exclusive with dl_delay_at
412 time64_t dl_delay_time;
413 /** baseline to caculate dl_delay_time */
414 time64_t dl_time_base;
415 /** jiffies to send the next delayed message */
416 unsigned long dl_msg_send;
417 /** delayed message list */
418 struct list_head dl_msg_list;
419 /** statistic of delayed messages */
420 struct lnet_fault_stat dl_stat;
421 /** timer to wakeup delay_daemon */
422 struct timer_list dl_timer;
425 struct delay_daemon_data {
426 /** serialise rule add/remove */
427 struct mutex dd_mutex;
428 /** protect rules on \a dd_sched_rules */
430 /** scheduled delay rules (by timer) */
431 struct list_head dd_sched_rules;
432 /** deamon thread sleeps at here */
433 wait_queue_head_t dd_waitq;
434 /** controler (lctl command) wait at here */
435 wait_queue_head_t dd_ctl_waitq;
436 /** deamon is running */
437 unsigned int dd_running;
438 /** deamon stopped */
439 unsigned int dd_stopped;
442 static struct delay_daemon_data delay_dd;
445 delay_rule_decref(struct lnet_delay_rule *rule)
447 if (atomic_dec_and_test(&rule->dl_refcount)) {
448 LASSERT(list_empty(&rule->dl_sched_link));
449 LASSERT(list_empty(&rule->dl_msg_list));
450 LASSERT(list_empty(&rule->dl_link));
457 * check source/destination NID, portal, message type and delay rate,
458 * decide whether should delay this message or not
461 delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
462 lnet_nid_t dst, unsigned int type, unsigned int portal,
463 struct lnet_msg *msg)
465 struct lnet_fault_attr *attr = &rule->dl_attr;
468 if (!lnet_fault_attr_match(attr, src, dst, type, portal))
471 /* match this rule, check delay rate now */
472 spin_lock(&rule->dl_lock);
473 if (rule->dl_delay_time != 0) { /* time based delay */
474 time64_t now = ktime_get_seconds();
476 rule->dl_stat.fs_count++;
477 delay = now >= rule->dl_delay_time;
479 if (now > rule->dl_time_base)
480 rule->dl_time_base = now;
482 rule->dl_delay_time = rule->dl_time_base +
483 cfs_rand() % attr->u.delay.la_interval;
484 rule->dl_time_base += attr->u.delay.la_interval;
486 CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lld\n",
487 libcfs_nid2str(attr->fa_src),
488 libcfs_nid2str(attr->fa_dst),
489 rule->dl_delay_time);
492 } else { /* rate based delay */
495 delay = rule->dl_stat.fs_count++ == rule->dl_delay_at;
496 /* generate the next random rate sequence */
497 count = rule->dl_stat.fs_count;
498 if (do_div(count, attr->u.delay.la_rate) == 0) {
499 rule->dl_delay_at = rule->dl_stat.fs_count +
500 cfs_rand() % attr->u.delay.la_rate;
501 CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
502 libcfs_nid2str(attr->fa_src),
503 libcfs_nid2str(attr->fa_dst), rule->dl_delay_at);
508 spin_unlock(&rule->dl_lock);
512 /* delay this message, update counters */
513 lnet_fault_stat_inc(&rule->dl_stat, type);
514 rule->dl_stat.u.delay.ls_delayed++;
516 list_add_tail(&msg->msg_list, &rule->dl_msg_list);
517 msg->msg_delay_send = ktime_get_seconds() + attr->u.delay.la_latency;
518 if (rule->dl_msg_send == -1) {
519 rule->dl_msg_send = msg->msg_delay_send;
520 mod_timer(&rule->dl_timer, rule->dl_msg_send);
523 spin_unlock(&rule->dl_lock);
528 * check if \a msg can match any Delay Rule, receiving of this message
529 * will be delayed if there is a match.
532 lnet_delay_rule_match_locked(struct lnet_hdr *hdr, struct lnet_msg *msg)
534 struct lnet_delay_rule *rule;
535 lnet_nid_t src = le64_to_cpu(hdr->src_nid);
536 lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
537 unsigned int typ = le32_to_cpu(hdr->type);
538 unsigned int ptl = -1;
540 /* NB: called with hold of lnet_net_lock */
542 /* NB: if Portal is specified, then only PUT and GET will be
543 * filtered by delay rule */
544 if (typ == LNET_MSG_PUT)
545 ptl = le32_to_cpu(hdr->msg.put.ptl_index);
546 else if (typ == LNET_MSG_GET)
547 ptl = le32_to_cpu(hdr->msg.get.ptl_index);
549 list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
550 if (delay_rule_match(rule, src, dst, typ, ptl, msg))
557 /** check out delayed messages for send */
559 delayed_msg_check(struct lnet_delay_rule *rule, bool all,
560 struct list_head *msg_list)
562 struct lnet_msg *msg;
563 struct lnet_msg *tmp;
564 time64_t now = ktime_get_seconds();
566 if (!all && cfs_time_seconds(rule->dl_msg_send) > now)
569 spin_lock(&rule->dl_lock);
570 list_for_each_entry_safe(msg, tmp, &rule->dl_msg_list, msg_list) {
571 if (!all && msg->msg_delay_send > now)
574 msg->msg_delay_send = 0;
575 list_move_tail(&msg->msg_list, msg_list);
578 if (list_empty(&rule->dl_msg_list)) {
579 del_timer(&rule->dl_timer);
580 rule->dl_msg_send = -1;
582 } else if (!list_empty(msg_list)) {
583 /* dequeued some timedout messages, update timer for the
584 * next delayed message on rule */
585 msg = list_entry(rule->dl_msg_list.next,
586 struct lnet_msg, msg_list);
587 rule->dl_msg_send = msg->msg_delay_send;
588 mod_timer(&rule->dl_timer, rule->dl_msg_send);
590 spin_unlock(&rule->dl_lock);
594 delayed_msg_process(struct list_head *msg_list, bool drop)
596 struct lnet_msg *msg;
598 while (!list_empty(msg_list)) {
603 msg = list_entry(msg_list->next, struct lnet_msg, msg_list);
604 LASSERT(msg->msg_rxpeer != NULL);
605 LASSERT(msg->msg_rxni != NULL);
608 cpt = msg->msg_rx_cpt;
610 list_del_init(&msg->msg_list);
614 } else if (!msg->msg_routing) {
615 rc = lnet_parse_local(ni, msg);
621 rc = lnet_parse_forward_locked(ni, msg);
622 lnet_net_unlock(cpt);
626 lnet_ni_recv(ni, msg->msg_private, msg, 0,
627 0, msg->msg_len, msg->msg_len);
628 case LNET_CREDIT_WAIT:
630 default: /* failures */
635 lnet_drop_message(ni, cpt, msg->msg_private, msg->msg_len,
637 lnet_finalize(msg, rc);
642 * Process delayed messages for scheduled rules
643 * This function can either be called by delay_rule_daemon, or by lnet_finalise
646 lnet_delay_rule_check(void)
648 struct lnet_delay_rule *rule;
649 struct list_head msgs;
651 INIT_LIST_HEAD(&msgs);
653 if (list_empty(&delay_dd.dd_sched_rules))
656 spin_lock_bh(&delay_dd.dd_lock);
657 if (list_empty(&delay_dd.dd_sched_rules)) {
658 spin_unlock_bh(&delay_dd.dd_lock);
662 rule = list_entry(delay_dd.dd_sched_rules.next,
663 struct lnet_delay_rule, dl_sched_link);
664 list_del_init(&rule->dl_sched_link);
665 spin_unlock_bh(&delay_dd.dd_lock);
667 delayed_msg_check(rule, false, &msgs);
668 delay_rule_decref(rule); /* -1 for delay_dd.dd_sched_rules */
671 if (!list_empty(&msgs))
672 delayed_msg_process(&msgs, false);
675 /** deamon thread to handle delayed messages */
677 lnet_delay_rule_daemon(void *arg)
679 delay_dd.dd_running = 1;
680 wake_up(&delay_dd.dd_ctl_waitq);
682 while (delay_dd.dd_running) {
683 wait_event_interruptible(delay_dd.dd_waitq,
684 !delay_dd.dd_running ||
685 !list_empty(&delay_dd.dd_sched_rules));
686 lnet_delay_rule_check();
689 /* in case more rules have been enqueued after my last check */
690 lnet_delay_rule_check();
691 delay_dd.dd_stopped = 1;
692 wake_up(&delay_dd.dd_ctl_waitq);
698 delay_timer_cb(cfs_timer_cb_arg_t data)
700 struct lnet_delay_rule *rule = cfs_from_timer(rule, data, dl_timer);
702 spin_lock_bh(&delay_dd.dd_lock);
703 if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
704 atomic_inc(&rule->dl_refcount);
705 list_add_tail(&rule->dl_sched_link, &delay_dd.dd_sched_rules);
706 wake_up(&delay_dd.dd_waitq);
708 spin_unlock_bh(&delay_dd.dd_lock);
712 * Add a new delay rule to LNet
713 * There is no check for duplicated delay rule, all rules will be checked for
717 lnet_delay_rule_add(struct lnet_fault_attr *attr)
719 struct lnet_delay_rule *rule;
723 if (!((attr->u.delay.la_rate == 0) ^
724 (attr->u.delay.la_interval == 0))) {
726 "please provide either delay rate or delay interval, "
727 "but not both at the same time %d/%d\n",
728 attr->u.delay.la_rate, attr->u.delay.la_interval);
732 if (attr->u.delay.la_latency == 0) {
733 CDEBUG(D_NET, "delay latency cannot be zero\n");
737 if (lnet_fault_attr_validate(attr) != 0)
744 mutex_lock(&delay_dd.dd_mutex);
745 if (!delay_dd.dd_running) {
746 struct task_struct *task;
748 /* NB: although LND threads will process delayed message
749 * in lnet_finalize, but there is no guarantee that LND
750 * threads will be waken up if no other message needs to
752 * Only one daemon thread, performance is not the concern
753 * of this simualation module.
755 task = kthread_run(lnet_delay_rule_daemon, NULL, "lnet_dd");
760 wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
763 cfs_timer_setup(&rule->dl_timer, delay_timer_cb,
764 (unsigned long)rule, 0);
766 spin_lock_init(&rule->dl_lock);
767 INIT_LIST_HEAD(&rule->dl_msg_list);
768 INIT_LIST_HEAD(&rule->dl_sched_link);
770 rule->dl_attr = *attr;
771 if (attr->u.delay.la_interval != 0) {
772 rule->dl_time_base = ktime_get_seconds() +
773 attr->u.delay.la_interval;
774 rule->dl_delay_time = ktime_get_seconds() +
775 cfs_rand() % attr->u.delay.la_interval;
777 rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
780 rule->dl_msg_send = -1;
782 lnet_net_lock(LNET_LOCK_EX);
783 atomic_set(&rule->dl_refcount, 1);
784 list_add(&rule->dl_link, &the_lnet.ln_delay_rules);
785 lnet_net_unlock(LNET_LOCK_EX);
787 CDEBUG(D_NET, "Added delay rule: src %s, dst %s, rate %d\n",
788 libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
789 attr->u.delay.la_rate);
791 mutex_unlock(&delay_dd.dd_mutex);
794 mutex_unlock(&delay_dd.dd_mutex);
800 * Remove matched Delay Rules from lnet, if \a shutdown is true or both \a src
801 * and \a dst are zero, all rules will be removed, otherwise only matched rules
803 * If \a src is zero, then all rules have \a dst as destination will be remove
804 * If \a dst is zero, then all rules have \a src as source will be removed
806 * When a delay rule is removed, all delayed messages of this rule will be
807 * processed immediately.
810 lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown)
812 struct lnet_delay_rule *rule;
813 struct lnet_delay_rule *tmp;
814 struct list_head rule_list;
815 struct list_head msg_list;
820 INIT_LIST_HEAD(&rule_list);
821 INIT_LIST_HEAD(&msg_list);
826 mutex_lock(&delay_dd.dd_mutex);
827 lnet_net_lock(LNET_LOCK_EX);
829 list_for_each_entry_safe(rule, tmp, &the_lnet.ln_delay_rules, dl_link) {
830 if (rule->dl_attr.fa_src != src && src != 0)
833 if (rule->dl_attr.fa_dst != dst && dst != 0)
836 CDEBUG(D_NET, "Remove delay rule: src %s->dst: %s (1/%d, %d)\n",
837 libcfs_nid2str(rule->dl_attr.fa_src),
838 libcfs_nid2str(rule->dl_attr.fa_dst),
839 rule->dl_attr.u.delay.la_rate,
840 rule->dl_attr.u.delay.la_interval);
841 /* refcount is taken over by rule_list */
842 list_move(&rule->dl_link, &rule_list);
845 /* check if we need to shutdown delay_daemon */
846 cleanup = list_empty(&the_lnet.ln_delay_rules) &&
847 !list_empty(&rule_list);
848 lnet_net_unlock(LNET_LOCK_EX);
850 list_for_each_entry_safe(rule, tmp, &rule_list, dl_link) {
851 list_del_init(&rule->dl_link);
853 del_timer_sync(&rule->dl_timer);
854 delayed_msg_check(rule, true, &msg_list);
855 delay_rule_decref(rule); /* -1 for the_lnet.ln_delay_rules */
859 if (cleanup) { /* no more delay rule, shutdown delay_daemon */
860 LASSERT(delay_dd.dd_running);
861 delay_dd.dd_running = 0;
862 wake_up(&delay_dd.dd_waitq);
864 while (!delay_dd.dd_stopped)
865 wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_stopped);
867 mutex_unlock(&delay_dd.dd_mutex);
869 if (!list_empty(&msg_list))
870 delayed_msg_process(&msg_list, shutdown);
876 * List Delay Rule at position of \a pos
879 lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr,
880 struct lnet_fault_stat *stat)
882 struct lnet_delay_rule *rule;
888 cpt = lnet_net_lock_current();
889 list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
893 spin_lock(&rule->dl_lock);
894 *attr = rule->dl_attr;
895 *stat = rule->dl_stat;
896 spin_unlock(&rule->dl_lock);
901 lnet_net_unlock(cpt);
906 * reset counters for all Delay Rules
909 lnet_delay_rule_reset(void)
911 struct lnet_delay_rule *rule;
915 cpt = lnet_net_lock_current();
917 list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
918 struct lnet_fault_attr *attr = &rule->dl_attr;
920 spin_lock(&rule->dl_lock);
922 memset(&rule->dl_stat, 0, sizeof(rule->dl_stat));
923 if (attr->u.delay.la_rate != 0) {
924 rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
926 rule->dl_delay_time = ktime_get_seconds() +
927 cfs_rand() % attr->u.delay.la_interval;
928 rule->dl_time_base = ktime_get_seconds() +
929 attr->u.delay.la_interval;
931 spin_unlock(&rule->dl_lock);
934 lnet_net_unlock(cpt);
939 lnet_fault_ctl(int opc, struct libcfs_ioctl_data *data)
941 struct lnet_fault_attr *attr;
942 struct lnet_fault_stat *stat;
944 attr = (struct lnet_fault_attr *)data->ioc_inlbuf1;
950 case LNET_CTL_DROP_ADD:
954 return lnet_drop_rule_add(attr);
956 case LNET_CTL_DROP_DEL:
960 data->ioc_count = lnet_drop_rule_del(attr->fa_src,
964 case LNET_CTL_DROP_RESET:
965 lnet_drop_rule_reset();
968 case LNET_CTL_DROP_LIST:
969 stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
970 if (attr == NULL || stat == NULL)
973 return lnet_drop_rule_list(data->ioc_count, attr, stat);
975 case LNET_CTL_DELAY_ADD:
979 return lnet_delay_rule_add(attr);
981 case LNET_CTL_DELAY_DEL:
985 data->ioc_count = lnet_delay_rule_del(attr->fa_src,
986 attr->fa_dst, false);
989 case LNET_CTL_DELAY_RESET:
990 lnet_delay_rule_reset();
993 case LNET_CTL_DELAY_LIST:
994 stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
995 if (attr == NULL || stat == NULL)
998 return lnet_delay_rule_list(data->ioc_count, attr, stat);
1003 lnet_fault_init(void)
1005 CLASSERT(LNET_PUT_BIT == 1 << LNET_MSG_PUT);
1006 CLASSERT(LNET_ACK_BIT == 1 << LNET_MSG_ACK);
1007 CLASSERT(LNET_GET_BIT == 1 << LNET_MSG_GET);
1008 CLASSERT(LNET_REPLY_BIT == 1 << LNET_MSG_REPLY);
1010 mutex_init(&delay_dd.dd_mutex);
1011 spin_lock_init(&delay_dd.dd_lock);
1012 init_waitqueue_head(&delay_dd.dd_waitq);
1013 init_waitqueue_head(&delay_dd.dd_ctl_waitq);
1014 INIT_LIST_HEAD(&delay_dd.dd_sched_rules);
1020 lnet_fault_fini(void)
1022 lnet_drop_rule_del(0, 0);
1023 lnet_delay_rule_del(0, 0, true);
1025 LASSERT(list_empty(&the_lnet.ln_drop_rules));
1026 LASSERT(list_empty(&the_lnet.ln_delay_rules));
1027 LASSERT(list_empty(&delay_dd.dd_sched_rules));