Whamcloud - gitweb
LU-17705 ptlrpc: replace synchronize_rcu() with rcu_barrier()
[fs/lustre-release.git] / lnet / lnet / net_fault.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; if not, write to the
18  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19  * Boston, MA 021110-1307, USA
20  *
21  * GPL HEADER END
22  */
23 /*
24  * Copyright (c) 2014, 2017, Intel Corporation.
25  */
26 /*
27  * This file is part of Lustre, http://www.lustre.org/
28  * Lustre is a trademark of Sun Microsystems, Inc.
29  *
30  * lnet/lnet/net_fault.c
31  *
32  * Lustre network fault simulation
33  *
34  * Author: liang.zhen@intel.com
35  */
36
37 #define DEBUG_SUBSYSTEM S_LNET
38
39 #include <linux/random.h>
40 #include <lnet/lib-lnet.h>
41 #include <uapi/linux/lnet/lnetctl.h>
42
43 #define LNET_MSG_MASK           (LNET_PUT_BIT | LNET_ACK_BIT | \
44                                  LNET_GET_BIT | LNET_REPLY_BIT)
45
46 struct lnet_drop_rule {
47         /** link chain on the_lnet.ln_drop_rules */
48         struct list_head        dr_link;
49         /** attributes of this rule */
50         struct lnet_fault_attr  dr_attr;
51         /** lock to protect \a dr_drop_at and \a dr_stat */
52         spinlock_t              dr_lock;
53         /**
54          * the message sequence to drop, which means message is dropped when
55          * dr_stat.drs_count == dr_drop_at
56          */
57         unsigned long           dr_drop_at;
58         /**
59          * seconds to drop the next message, it's exclusive with dr_drop_at
60          */
61         time64_t                dr_drop_time;
62         /** baseline to caculate dr_drop_time */
63         time64_t                dr_time_base;
64         /** statistic of dropped messages */
65         struct lnet_fault_stat  dr_stat;
66 };
67
68 static bool
69 lnet_fault_nid_match(lnet_nid_t nid, lnet_nid_t msg_nid)
70 {
71         if (nid == msg_nid || nid == LNET_NID_ANY)
72                 return true;
73
74         if (LNET_NIDNET(nid) != LNET_NIDNET(msg_nid))
75                 return false;
76
77         /* 255.255.255.255@net is wildcard for all addresses in a network */
78         return LNET_NIDADDR(nid) == LNET_NIDADDR(LNET_NID_ANY);
79 }
80
81 static bool
82 lnet_fault_attr_match(struct lnet_fault_attr *attr, lnet_nid_t src,
83                       lnet_nid_t local_nid, lnet_nid_t dst,
84                       unsigned int type, unsigned int portal)
85 {
86         if (!lnet_fault_nid_match(attr->fa_src, src) ||
87             !lnet_fault_nid_match(attr->fa_dst, dst) ||
88             !lnet_fault_nid_match(attr->fa_local_nid, local_nid))
89                 return false;
90
91         if (!(attr->fa_msg_mask & BIT(type)))
92                 return false;
93
94         /* NB: ACK and REPLY have no portal, but they should have been
95          * rejected by message mask */
96         if (attr->fa_ptl_mask != 0 && /* has portal filter */
97             !(attr->fa_ptl_mask & (1ULL << portal)))
98                 return false;
99
100         return true;
101 }
102
103 static int
104 lnet_fault_attr_validate(struct lnet_fault_attr *attr)
105 {
106         if (attr->fa_msg_mask == 0)
107                 attr->fa_msg_mask = LNET_MSG_MASK; /* all message types */
108
109         if (attr->fa_ptl_mask == 0) /* no portal filter */
110                 return 0;
111
112         /* NB: only PUT and GET can be filtered if portal filter has been set */
113         attr->fa_msg_mask &= LNET_GET_BIT | LNET_PUT_BIT;
114         if (attr->fa_msg_mask == 0) {
115                 CDEBUG(D_NET, "can't find valid message type bits %x\n",
116                        attr->fa_msg_mask);
117                 return -EINVAL;
118         }
119         return 0;
120 }
121
122 static void
123 lnet_fault_stat_inc(struct lnet_fault_stat *stat, unsigned int type)
124 {
125         /* NB: fs_counter is NOT updated by this function */
126         switch (type) {
127         case LNET_MSG_PUT:
128                 stat->fs_put++;
129                 return;
130         case LNET_MSG_ACK:
131                 stat->fs_ack++;
132                 return;
133         case LNET_MSG_GET:
134                 stat->fs_get++;
135                 return;
136         case LNET_MSG_REPLY:
137                 stat->fs_reply++;
138                 return;
139         }
140 }
141
142 /**
143  * LNet message drop simulation
144  */
145
146 /**
147  * Add a new drop rule to LNet
148  * There is no check for duplicated drop rule, all rules will be checked for
149  * incoming message.
150  */
151 static int
152 lnet_drop_rule_add(struct lnet_fault_attr *attr)
153 {
154         struct lnet_drop_rule *rule;
155         ENTRY;
156
157         if (!((attr->u.drop.da_rate == 0) ^ (attr->u.drop.da_interval == 0))) {
158                 CDEBUG(D_NET,
159                        "please provide either drop rate or drop interval, "
160                        "but not both at the same time %d/%d\n",
161                        attr->u.drop.da_rate, attr->u.drop.da_interval);
162                 RETURN(-EINVAL);
163         }
164
165         if (lnet_fault_attr_validate(attr) != 0)
166                 RETURN(-EINVAL);
167
168         CFS_ALLOC_PTR(rule);
169         if (rule == NULL)
170                 RETURN(-ENOMEM);
171
172         spin_lock_init(&rule->dr_lock);
173
174         rule->dr_attr = *attr;
175         if (attr->u.drop.da_interval != 0) {
176                 rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
177                 rule->dr_drop_time = ktime_get_seconds() +
178                                      prandom_u32_max(attr->u.drop.da_interval);
179         } else {
180                 rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
181         }
182
183         lnet_net_lock(LNET_LOCK_EX);
184         list_add(&rule->dr_link, &the_lnet.ln_drop_rules);
185         lnet_net_unlock(LNET_LOCK_EX);
186
187         CDEBUG(D_NET, "Added drop rule: src %s, dst %s, rate %d, interval %d\n",
188                libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
189                attr->u.drop.da_rate, attr->u.drop.da_interval);
190         RETURN(0);
191 }
192
193 /**
194  * Remove matched drop rules from lnet, all rules that can match \a src and
195  * \a dst will be removed.
196  * If \a src is zero, then all rules have \a dst as destination will be remove
197  * If \a dst is zero, then all rules have \a src as source will be removed
198  * If both of them are zero, all rules will be removed
199  */
200 static int
201 lnet_drop_rule_del(lnet_nid_t src, lnet_nid_t dst)
202 {
203         struct lnet_drop_rule *rule;
204         struct lnet_drop_rule *tmp;
205         LIST_HEAD(zombies);
206         int n = 0;
207         ENTRY;
208
209         lnet_net_lock(LNET_LOCK_EX);
210         list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) {
211                 if (rule->dr_attr.fa_src != src && src != 0)
212                         continue;
213
214                 if (rule->dr_attr.fa_dst != dst && dst != 0)
215                         continue;
216
217                 list_move(&rule->dr_link, &zombies);
218         }
219         lnet_net_unlock(LNET_LOCK_EX);
220
221         list_for_each_entry_safe(rule, tmp, &zombies, dr_link) {
222                 CDEBUG(D_NET, "Remove drop rule: src %s->dst: %s (1/%d, %d)\n",
223                        libcfs_nid2str(rule->dr_attr.fa_src),
224                        libcfs_nid2str(rule->dr_attr.fa_dst),
225                        rule->dr_attr.u.drop.da_rate,
226                        rule->dr_attr.u.drop.da_interval);
227
228                 list_del(&rule->dr_link);
229                 CFS_FREE_PTR(rule);
230                 n++;
231         }
232
233         RETURN(n);
234 }
235
236 /**
237  * List drop rule at position of \a pos
238  */
239 static int
240 lnet_drop_rule_list(int pos, struct lnet_fault_attr *attr,
241                     struct lnet_fault_stat *stat)
242 {
243         struct lnet_drop_rule *rule;
244         int                    cpt;
245         int                    i = 0;
246         int                    rc = -ENOENT;
247         ENTRY;
248
249         cpt = lnet_net_lock_current();
250         list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
251                 if (i++ < pos)
252                         continue;
253
254                 spin_lock(&rule->dr_lock);
255                 *attr = rule->dr_attr;
256                 *stat = rule->dr_stat;
257                 spin_unlock(&rule->dr_lock);
258                 rc = 0;
259                 break;
260         }
261
262         lnet_net_unlock(cpt);
263         RETURN(rc);
264 }
265
266 /**
267  * reset counters for all drop rules
268  */
269 static void
270 lnet_drop_rule_reset(void)
271 {
272         struct lnet_drop_rule *rule;
273         int                    cpt;
274         ENTRY;
275
276         cpt = lnet_net_lock_current();
277
278         list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
279                 struct lnet_fault_attr *attr = &rule->dr_attr;
280
281                 spin_lock(&rule->dr_lock);
282
283                 memset(&rule->dr_stat, 0, sizeof(rule->dr_stat));
284                 if (attr->u.drop.da_rate != 0) {
285                         rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
286                 } else {
287                         rule->dr_drop_time = ktime_get_seconds() +
288                                              prandom_u32_max(attr->u.drop.da_interval);
289                         rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
290                 }
291                 spin_unlock(&rule->dr_lock);
292         }
293
294         lnet_net_unlock(cpt);
295         EXIT;
296 }
297
298 static void
299 lnet_fault_match_health(enum lnet_msg_hstatus *hstatus, __u32 mask)
300 {
301         int choice;
302         int delta;
303         int best_delta;
304         int i;
305
306         /* assign a random failure */
307         choice = prandom_u32_max(LNET_MSG_STATUS_END - LNET_MSG_STATUS_OK);
308         if (choice == 0)
309                 choice++;
310
311         if (mask == HSTATUS_RANDOM) {
312                 *hstatus = choice;
313                 return;
314         }
315
316         if (mask & BIT(choice)) {
317                 *hstatus = choice;
318                 return;
319         }
320
321         /* round to the closest ON bit */
322         i = HSTATUS_END;
323         best_delta = HSTATUS_END;
324         while (i > 0) {
325                 if (mask & BIT(i)) {
326                         delta = choice - i;
327                         if (delta < 0)
328                                 delta *= -1;
329                         if (delta < best_delta) {
330                                 best_delta = delta;
331                                 choice = i;
332                         }
333                 }
334                 i--;
335         }
336
337         *hstatus = choice;
338 }
339
340 /**
341  * check source/destination NID, portal, message type and drop rate,
342  * decide whether should drop this message or not
343  */
344 static bool
345 drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
346                 lnet_nid_t local_nid, lnet_nid_t dst,
347                 unsigned int type, unsigned int portal,
348                 enum lnet_msg_hstatus *hstatus)
349 {
350         struct lnet_fault_attr  *attr = &rule->dr_attr;
351         bool                     drop;
352
353         if (!lnet_fault_attr_match(attr, src, local_nid, dst, type, portal))
354                 return false;
355
356         if (attr->u.drop.da_drop_all) {
357                 CDEBUG(D_NET, "set to drop all messages\n");
358                 drop = true;
359                 goto drop_matched;
360         }
361
362         /*
363          * if we're trying to match a health status error but it hasn't
364          * been set in the rule, then don't match
365          */
366         if ((hstatus && !attr->u.drop.da_health_error_mask) ||
367             (!hstatus && attr->u.drop.da_health_error_mask))
368                 return false;
369
370         /* match this rule, check drop rate now */
371         spin_lock(&rule->dr_lock);
372         if (attr->u.drop.da_random) {
373                 int value = prandom_u32_max(attr->u.drop.da_interval);
374                 if (value >= (attr->u.drop.da_interval / 2))
375                         drop = true;
376                 else
377                         drop = false;
378         } else if (rule->dr_drop_time != 0) { /* time based drop */
379                 time64_t now = ktime_get_seconds();
380
381                 rule->dr_stat.fs_count++;
382                 drop = now >= rule->dr_drop_time;
383                 if (drop) {
384                         if (now > rule->dr_time_base)
385                                 rule->dr_time_base = now;
386
387                         rule->dr_drop_time = rule->dr_time_base +
388                                              prandom_u32_max(attr->u.drop.da_interval);
389                         rule->dr_time_base += attr->u.drop.da_interval;
390
391                         CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lld\n",
392                                libcfs_nid2str(attr->fa_src),
393                                libcfs_nid2str(attr->fa_dst),
394                                rule->dr_drop_time);
395                 }
396
397         } else { /* rate based drop */
398                 __u64 count;
399
400                 drop = rule->dr_stat.fs_count++ == rule->dr_drop_at;
401                 count = rule->dr_stat.fs_count;
402                 if (do_div(count, attr->u.drop.da_rate) == 0) {
403                         rule->dr_drop_at = rule->dr_stat.fs_count +
404                                            prandom_u32_max(attr->u.drop.da_rate);
405                         CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
406                                libcfs_nid2str(attr->fa_src),
407                                libcfs_nid2str(attr->fa_dst), rule->dr_drop_at);
408                 }
409         }
410
411 drop_matched:
412
413         if (drop) { /* drop this message, update counters */
414                 if (hstatus)
415                         lnet_fault_match_health(hstatus,
416                                 attr->u.drop.da_health_error_mask);
417                 lnet_fault_stat_inc(&rule->dr_stat, type);
418                 rule->dr_stat.u.drop.ds_dropped++;
419         }
420
421         spin_unlock(&rule->dr_lock);
422         return drop;
423 }
424
425 /**
426  * Check if message from \a src to \a dst can match any existed drop rule
427  */
428 bool
429 lnet_drop_rule_match(struct lnet_hdr *hdr,
430                      lnet_nid_t local_nid,
431                      enum lnet_msg_hstatus *hstatus)
432 {
433         lnet_nid_t src = le64_to_cpu(hdr->src_nid);
434         lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
435         unsigned int typ = le32_to_cpu(hdr->type);
436         struct lnet_drop_rule *rule;
437         unsigned int ptl = -1;
438         bool drop = false;
439         int cpt;
440
441         /* NB: if Portal is specified, then only PUT and GET will be
442          * filtered by drop rule */
443         if (typ == LNET_MSG_PUT)
444                 ptl = le32_to_cpu(hdr->msg.put.ptl_index);
445         else if (typ == LNET_MSG_GET)
446                 ptl = le32_to_cpu(hdr->msg.get.ptl_index);
447
448         cpt = lnet_net_lock_current();
449         list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
450                 drop = drop_rule_match(rule, src, local_nid, dst, typ, ptl,
451                                        hstatus);
452                 if (drop)
453                         break;
454         }
455         lnet_net_unlock(cpt);
456
457         return drop;
458 }
459
460 /**
461  * LNet Delay Simulation
462  */
463 /** timestamp (second) to send delayed message */
464 #define msg_delay_send           msg_ev.hdr_data
465
466 struct lnet_delay_rule {
467         /** link chain on the_lnet.ln_delay_rules */
468         struct list_head        dl_link;
469         /** link chain on delay_dd.dd_sched_rules */
470         struct list_head        dl_sched_link;
471         /** attributes of this rule */
472         struct lnet_fault_attr  dl_attr;
473         /** lock to protect \a below members */
474         spinlock_t              dl_lock;
475         /** refcount of delay rule */
476         atomic_t                dl_refcount;
477         /**
478          * the message sequence to delay, which means message is delayed when
479          * dl_stat.fs_count == dl_delay_at
480          */
481         unsigned long           dl_delay_at;
482         /**
483          * seconds to delay the next message, it's exclusive with dl_delay_at
484          */
485         time64_t                dl_delay_time;
486         /** baseline to caculate dl_delay_time */
487         time64_t                dl_time_base;
488         /** seconds until we send the next delayed message */
489         time64_t                dl_msg_send;
490         /** delayed message list */
491         struct list_head        dl_msg_list;
492         /** statistic of delayed messages */
493         struct lnet_fault_stat  dl_stat;
494         /** timer to wakeup delay_daemon */
495         struct timer_list       dl_timer;
496 };
497
498 struct delay_daemon_data {
499         /** serialise rule add/remove */
500         struct mutex            dd_mutex;
501         /** protect rules on \a dd_sched_rules */
502         spinlock_t              dd_lock;
503         /** scheduled delay rules (by timer) */
504         struct list_head        dd_sched_rules;
505         /** deamon thread sleeps at here */
506         wait_queue_head_t       dd_waitq;
507         /** controler (lctl command) wait at here */
508         wait_queue_head_t       dd_ctl_waitq;
509         /** deamon is running */
510         unsigned int            dd_running;
511         /** deamon stopped */
512         unsigned int            dd_stopped;
513 };
514
515 static struct delay_daemon_data delay_dd;
516
517 static void
518 delay_rule_decref(struct lnet_delay_rule *rule)
519 {
520         if (atomic_dec_and_test(&rule->dl_refcount)) {
521                 LASSERT(list_empty(&rule->dl_sched_link));
522                 LASSERT(list_empty(&rule->dl_msg_list));
523                 LASSERT(list_empty(&rule->dl_link));
524
525                 CFS_FREE_PTR(rule);
526         }
527 }
528
529 /**
530  * check source/destination NID, portal, message type and delay rate,
531  * decide whether should delay this message or not
532  */
533 static bool
534 delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
535                 lnet_nid_t dst, unsigned int type, unsigned int portal,
536                 struct lnet_msg *msg)
537 {
538         struct lnet_fault_attr  *attr = &rule->dl_attr;
539         bool                     delay;
540
541         if (!lnet_fault_attr_match(attr, src, LNET_NID_ANY,
542                                    dst, type, portal))
543                 return false;
544
545         /* match this rule, check delay rate now */
546         spin_lock(&rule->dl_lock);
547         if (rule->dl_delay_time != 0) { /* time based delay */
548                 time64_t now = ktime_get_seconds();
549
550                 rule->dl_stat.fs_count++;
551                 delay = now >= rule->dl_delay_time;
552                 if (delay) {
553                         if (now > rule->dl_time_base)
554                                 rule->dl_time_base = now;
555
556                         rule->dl_delay_time = rule->dl_time_base +
557                                               prandom_u32_max(attr->u.delay.la_interval);
558                         rule->dl_time_base += attr->u.delay.la_interval;
559
560                         CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lld\n",
561                                libcfs_nid2str(attr->fa_src),
562                                libcfs_nid2str(attr->fa_dst),
563                                rule->dl_delay_time);
564                 }
565
566         } else { /* rate based delay */
567                 __u64 count;
568
569                 delay = rule->dl_stat.fs_count++ == rule->dl_delay_at;
570                 /* generate the next random rate sequence */
571                 count = rule->dl_stat.fs_count;
572                 if (do_div(count, attr->u.delay.la_rate) == 0) {
573                         rule->dl_delay_at = rule->dl_stat.fs_count +
574                                             prandom_u32_max(attr->u.delay.la_rate);
575                         CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
576                                libcfs_nid2str(attr->fa_src),
577                                libcfs_nid2str(attr->fa_dst), rule->dl_delay_at);
578                 }
579         }
580
581         if (!delay) {
582                 spin_unlock(&rule->dl_lock);
583                 return false;
584         }
585
586         /* delay this message, update counters */
587         lnet_fault_stat_inc(&rule->dl_stat, type);
588         rule->dl_stat.u.delay.ls_delayed++;
589
590         list_add_tail(&msg->msg_list, &rule->dl_msg_list);
591         msg->msg_delay_send = ktime_get_seconds() + attr->u.delay.la_latency;
592         if (rule->dl_msg_send == -1) {
593                 rule->dl_msg_send = msg->msg_delay_send;
594                 mod_timer(&rule->dl_timer,
595                           jiffies + cfs_time_seconds(rule->dl_msg_send));
596         }
597
598         spin_unlock(&rule->dl_lock);
599         return true;
600 }
601
602 /**
603  * check if \a msg can match any Delay Rule, receiving of this message
604  * will be delayed if there is a match.
605  */
606 bool
607 lnet_delay_rule_match_locked(struct lnet_hdr *hdr, struct lnet_msg *msg)
608 {
609         struct lnet_delay_rule  *rule;
610         lnet_nid_t               src = le64_to_cpu(hdr->src_nid);
611         lnet_nid_t               dst = le64_to_cpu(hdr->dest_nid);
612         unsigned int             typ = le32_to_cpu(hdr->type);
613         unsigned int             ptl = -1;
614
615         /* NB: called with hold of lnet_net_lock */
616
617         /* NB: if Portal is specified, then only PUT and GET will be
618          * filtered by delay rule */
619         if (typ == LNET_MSG_PUT)
620                 ptl = le32_to_cpu(hdr->msg.put.ptl_index);
621         else if (typ == LNET_MSG_GET)
622                 ptl = le32_to_cpu(hdr->msg.get.ptl_index);
623
624         list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
625                 if (delay_rule_match(rule, src, dst, typ, ptl, msg))
626                         return true;
627         }
628
629         return false;
630 }
631
632 /** check out delayed messages for send */
633 static void
634 delayed_msg_check(struct lnet_delay_rule *rule, bool all,
635                   struct list_head *msg_list)
636 {
637         struct lnet_msg *msg;
638         struct lnet_msg *tmp;
639         time64_t now = ktime_get_seconds();
640
641         if (!all && cfs_time_seconds(rule->dl_msg_send) > now)
642                 return;
643
644         spin_lock(&rule->dl_lock);
645         list_for_each_entry_safe(msg, tmp, &rule->dl_msg_list, msg_list) {
646                 if (!all && msg->msg_delay_send > now)
647                         break;
648
649                 msg->msg_delay_send = 0;
650                 list_move_tail(&msg->msg_list, msg_list);
651         }
652
653         if (list_empty(&rule->dl_msg_list)) {
654                 del_timer(&rule->dl_timer);
655                 rule->dl_msg_send = -1;
656
657         } else if (!list_empty(msg_list)) {
658                 /* dequeued some timedout messages, update timer for the
659                  * next delayed message on rule */
660                 msg = list_entry(rule->dl_msg_list.next,
661                                  struct lnet_msg, msg_list);
662                 rule->dl_msg_send = msg->msg_delay_send;
663                 mod_timer(&rule->dl_timer,
664                           jiffies + cfs_time_seconds(rule->dl_msg_send));
665         }
666         spin_unlock(&rule->dl_lock);
667 }
668
669 static void
670 delayed_msg_process(struct list_head *msg_list, bool drop)
671 {
672         struct lnet_msg *msg;
673
674         while (!list_empty(msg_list)) {
675                 struct lnet_ni *ni;
676                 int             cpt;
677                 int             rc;
678
679                 msg = list_entry(msg_list->next, struct lnet_msg, msg_list);
680                 LASSERT(msg->msg_rxpeer != NULL);
681                 LASSERT(msg->msg_rxni != NULL);
682
683                 ni = msg->msg_rxni;
684                 cpt = msg->msg_rx_cpt;
685
686                 list_del_init(&msg->msg_list);
687                 if (drop) {
688                         rc = -ECANCELED;
689
690                 } else if (!msg->msg_routing) {
691                         rc = lnet_parse_local(ni, msg);
692                         if (rc == 0)
693                                 continue;
694
695                 } else {
696                         lnet_net_lock(cpt);
697                         rc = lnet_parse_forward_locked(ni, msg);
698                         lnet_net_unlock(cpt);
699
700                         switch (rc) {
701                         case LNET_CREDIT_OK:
702                                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
703                                              0, msg->msg_len, msg->msg_len);
704                                 /* fallthrough */
705                         case LNET_CREDIT_WAIT:
706                                 continue;
707                         default: /* failures */
708                                 break;
709                         }
710                 }
711
712                 lnet_drop_message(ni, cpt, msg->msg_private, msg->msg_len,
713                                   msg->msg_type);
714                 lnet_finalize(msg, rc);
715         }
716 }
717
718 /**
719  * Process delayed messages for scheduled rules
720  * This function can either be called by delay_rule_daemon, or by lnet_finalise
721  */
722 void
723 lnet_delay_rule_check(void)
724 {
725         struct lnet_delay_rule *rule;
726         LIST_HEAD(msgs);
727
728         while (1) {
729                 if (list_empty(&delay_dd.dd_sched_rules))
730                         break;
731
732                 spin_lock_bh(&delay_dd.dd_lock);
733                 if (list_empty(&delay_dd.dd_sched_rules)) {
734                         spin_unlock_bh(&delay_dd.dd_lock);
735                         break;
736                 }
737
738                 rule = list_entry(delay_dd.dd_sched_rules.next,
739                                   struct lnet_delay_rule, dl_sched_link);
740                 list_del_init(&rule->dl_sched_link);
741                 spin_unlock_bh(&delay_dd.dd_lock);
742
743                 delayed_msg_check(rule, false, &msgs);
744                 delay_rule_decref(rule); /* -1 for delay_dd.dd_sched_rules */
745         }
746
747         if (!list_empty(&msgs))
748                 delayed_msg_process(&msgs, false);
749 }
750
751 /** deamon thread to handle delayed messages */
752 static int
753 lnet_delay_rule_daemon(void *arg)
754 {
755         delay_dd.dd_running = 1;
756         wake_up(&delay_dd.dd_ctl_waitq);
757
758         while (delay_dd.dd_running) {
759                 wait_event_interruptible(delay_dd.dd_waitq,
760                                          !delay_dd.dd_running ||
761                                          !list_empty(&delay_dd.dd_sched_rules));
762                 lnet_delay_rule_check();
763         }
764
765         /* in case more rules have been enqueued after my last check */
766         lnet_delay_rule_check();
767         delay_dd.dd_stopped = 1;
768         wake_up(&delay_dd.dd_ctl_waitq);
769
770         return 0;
771 }
772
773 static void
774 delay_timer_cb(cfs_timer_cb_arg_t data)
775 {
776         struct lnet_delay_rule *rule = cfs_from_timer(rule, data, dl_timer);
777
778         spin_lock_bh(&delay_dd.dd_lock);
779         if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
780                 atomic_inc(&rule->dl_refcount);
781                 list_add_tail(&rule->dl_sched_link, &delay_dd.dd_sched_rules);
782                 wake_up(&delay_dd.dd_waitq);
783         }
784         spin_unlock_bh(&delay_dd.dd_lock);
785 }
786
787 /**
788  * Add a new delay rule to LNet
789  * There is no check for duplicated delay rule, all rules will be checked for
790  * incoming message.
791  */
792 int
793 lnet_delay_rule_add(struct lnet_fault_attr *attr)
794 {
795         struct lnet_delay_rule *rule;
796         int                     rc = 0;
797         ENTRY;
798
799         if (!((attr->u.delay.la_rate == 0) ^
800               (attr->u.delay.la_interval == 0))) {
801                 CDEBUG(D_NET,
802                        "please provide either delay rate or delay interval, "
803                        "but not both at the same time %d/%d\n",
804                        attr->u.delay.la_rate, attr->u.delay.la_interval);
805                 RETURN(-EINVAL);
806         }
807
808         if (attr->u.delay.la_latency == 0) {
809                 CDEBUG(D_NET, "delay latency cannot be zero\n");
810                 RETURN(-EINVAL);
811         }
812
813         if (lnet_fault_attr_validate(attr) != 0)
814                 RETURN(-EINVAL);
815
816         CFS_ALLOC_PTR(rule);
817         if (rule == NULL)
818                 RETURN(-ENOMEM);
819
820         mutex_lock(&delay_dd.dd_mutex);
821         if (!delay_dd.dd_running) {
822                 struct task_struct *task;
823
824                 /* NB: although LND threads will process delayed message
825                  * in lnet_finalize, but there is no guarantee that LND
826                  * threads will be waken up if no other message needs to
827                  * be handled.
828                  * Only one daemon thread, performance is not the concern
829                  * of this simualation module.
830                  */
831                 task = kthread_run(lnet_delay_rule_daemon, NULL, "lnet_dd");
832                 if (IS_ERR(task)) {
833                         rc = PTR_ERR(task);
834                         GOTO(failed, rc);
835                 }
836                 wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
837         }
838
839         cfs_timer_setup(&rule->dl_timer, delay_timer_cb,
840                         (unsigned long)rule, 0);
841
842         spin_lock_init(&rule->dl_lock);
843         INIT_LIST_HEAD(&rule->dl_msg_list);
844         INIT_LIST_HEAD(&rule->dl_sched_link);
845
846         rule->dl_attr = *attr;
847         if (attr->u.delay.la_interval != 0) {
848                 rule->dl_time_base = ktime_get_seconds() +
849                                      attr->u.delay.la_interval;
850                 rule->dl_delay_time = ktime_get_seconds() +
851                                       prandom_u32_max(attr->u.delay.la_interval);
852         } else {
853                 rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
854         }
855
856         rule->dl_msg_send = -1;
857
858         lnet_net_lock(LNET_LOCK_EX);
859         atomic_set(&rule->dl_refcount, 1);
860         list_add(&rule->dl_link, &the_lnet.ln_delay_rules);
861         lnet_net_unlock(LNET_LOCK_EX);
862
863         CDEBUG(D_NET, "Added delay rule: src %s, dst %s, rate %d\n",
864                libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
865                attr->u.delay.la_rate);
866
867         mutex_unlock(&delay_dd.dd_mutex);
868         RETURN(0);
869  failed:
870         mutex_unlock(&delay_dd.dd_mutex);
871         CFS_FREE_PTR(rule);
872         return rc;
873 }
874
875 /**
876  * Remove matched Delay Rules from lnet, if \a shutdown is true or both \a src
877  * and \a dst are zero, all rules will be removed, otherwise only matched rules
878  * will be removed.
879  * If \a src is zero, then all rules have \a dst as destination will be remove
880  * If \a dst is zero, then all rules have \a src as source will be removed
881  *
882  * When a delay rule is removed, all delayed messages of this rule will be
883  * processed immediately.
884  */
885 int
886 lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown)
887 {
888         struct lnet_delay_rule *rule;
889         struct lnet_delay_rule *tmp;
890         LIST_HEAD(rule_list);
891         LIST_HEAD(msg_list);
892         int n = 0;
893         bool cleanup;
894         ENTRY;
895
896         if (shutdown)
897                 src = dst = 0;
898
899         mutex_lock(&delay_dd.dd_mutex);
900         lnet_net_lock(LNET_LOCK_EX);
901
902         list_for_each_entry_safe(rule, tmp, &the_lnet.ln_delay_rules, dl_link) {
903                 if (rule->dl_attr.fa_src != src && src != 0)
904                         continue;
905
906                 if (rule->dl_attr.fa_dst != dst && dst != 0)
907                         continue;
908
909                 CDEBUG(D_NET, "Remove delay rule: src %s->dst: %s (1/%d, %d)\n",
910                        libcfs_nid2str(rule->dl_attr.fa_src),
911                        libcfs_nid2str(rule->dl_attr.fa_dst),
912                        rule->dl_attr.u.delay.la_rate,
913                        rule->dl_attr.u.delay.la_interval);
914                 /* refcount is taken over by rule_list */
915                 list_move(&rule->dl_link, &rule_list);
916         }
917
918         /* check if we need to shutdown delay_daemon */
919         cleanup = list_empty(&the_lnet.ln_delay_rules) &&
920                   !list_empty(&rule_list);
921         lnet_net_unlock(LNET_LOCK_EX);
922
923         list_for_each_entry_safe(rule, tmp, &rule_list, dl_link) {
924                 list_del_init(&rule->dl_link);
925
926                 del_timer_sync(&rule->dl_timer);
927                 delayed_msg_check(rule, true, &msg_list);
928                 delay_rule_decref(rule); /* -1 for the_lnet.ln_delay_rules */
929                 n++;
930         }
931
932         if (cleanup) { /* no more delay rule, shutdown delay_daemon */
933                 LASSERT(delay_dd.dd_running);
934                 delay_dd.dd_running = 0;
935                 wake_up(&delay_dd.dd_waitq);
936
937                 while (!delay_dd.dd_stopped)
938                         wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_stopped);
939         }
940         mutex_unlock(&delay_dd.dd_mutex);
941
942         if (!list_empty(&msg_list))
943                 delayed_msg_process(&msg_list, shutdown);
944
945         RETURN(n);
946 }
947
948 /**
949  * List Delay Rule at position of \a pos
950  */
951 int
952 lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr,
953                     struct lnet_fault_stat *stat)
954 {
955         struct lnet_delay_rule *rule;
956         int                     cpt;
957         int                     i = 0;
958         int                     rc = -ENOENT;
959         ENTRY;
960
961         cpt = lnet_net_lock_current();
962         list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
963                 if (i++ < pos)
964                         continue;
965
966                 spin_lock(&rule->dl_lock);
967                 *attr = rule->dl_attr;
968                 *stat = rule->dl_stat;
969                 spin_unlock(&rule->dl_lock);
970                 rc = 0;
971                 break;
972         }
973
974         lnet_net_unlock(cpt);
975         RETURN(rc);
976 }
977
978 /**
979  * reset counters for all Delay Rules
980  */
981 void
982 lnet_delay_rule_reset(void)
983 {
984         struct lnet_delay_rule *rule;
985         int                     cpt;
986         ENTRY;
987
988         cpt = lnet_net_lock_current();
989
990         list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
991                 struct lnet_fault_attr *attr = &rule->dl_attr;
992
993                 spin_lock(&rule->dl_lock);
994
995                 memset(&rule->dl_stat, 0, sizeof(rule->dl_stat));
996                 if (attr->u.delay.la_rate != 0) {
997                         rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
998                 } else {
999                         rule->dl_delay_time = ktime_get_seconds() +
1000                                               prandom_u32_max(attr->u.delay.la_interval);
1001                         rule->dl_time_base = ktime_get_seconds() +
1002                                              attr->u.delay.la_interval;
1003                 }
1004                 spin_unlock(&rule->dl_lock);
1005         }
1006
1007         lnet_net_unlock(cpt);
1008         EXIT;
1009 }
1010
1011 int
1012 lnet_fault_ctl(int opc, struct libcfs_ioctl_data *data)
1013 {
1014         struct lnet_fault_attr *attr;
1015         struct lnet_fault_stat *stat;
1016
1017         attr = (struct lnet_fault_attr *)data->ioc_inlbuf1;
1018
1019         switch (opc) {
1020         default:
1021                 return -EINVAL;
1022
1023         case LNET_CTL_DROP_ADD:
1024                 if (attr == NULL)
1025                         return -EINVAL;
1026
1027                 return lnet_drop_rule_add(attr);
1028
1029         case LNET_CTL_DROP_DEL:
1030                 if (attr == NULL)
1031                         return -EINVAL;
1032
1033                 data->ioc_count = lnet_drop_rule_del(attr->fa_src,
1034                                                      attr->fa_dst);
1035                 return 0;
1036
1037         case LNET_CTL_DROP_RESET:
1038                 lnet_drop_rule_reset();
1039                 return 0;
1040
1041         case LNET_CTL_DROP_LIST:
1042                 stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
1043                 if (attr == NULL || stat == NULL)
1044                         return -EINVAL;
1045
1046                 return lnet_drop_rule_list(data->ioc_count, attr, stat);
1047
1048         case LNET_CTL_DELAY_ADD:
1049                 if (attr == NULL)
1050                         return -EINVAL;
1051
1052                 return lnet_delay_rule_add(attr);
1053
1054         case LNET_CTL_DELAY_DEL:
1055                 if (attr == NULL)
1056                         return -EINVAL;
1057
1058                 data->ioc_count = lnet_delay_rule_del(attr->fa_src,
1059                                                       attr->fa_dst, false);
1060                 return 0;
1061
1062         case LNET_CTL_DELAY_RESET:
1063                 lnet_delay_rule_reset();
1064                 return 0;
1065
1066         case LNET_CTL_DELAY_LIST:
1067                 stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
1068                 if (attr == NULL || stat == NULL)
1069                         return -EINVAL;
1070
1071                 return lnet_delay_rule_list(data->ioc_count, attr, stat);
1072         }
1073 }
1074
1075 int
1076 lnet_fault_init(void)
1077 {
1078         BUILD_BUG_ON(LNET_PUT_BIT != BIT(LNET_MSG_PUT));
1079         BUILD_BUG_ON(LNET_ACK_BIT != BIT(LNET_MSG_ACK));
1080         BUILD_BUG_ON(LNET_GET_BIT != BIT(LNET_MSG_GET));
1081         BUILD_BUG_ON(LNET_REPLY_BIT != BIT(LNET_MSG_REPLY));
1082
1083         mutex_init(&delay_dd.dd_mutex);
1084         spin_lock_init(&delay_dd.dd_lock);
1085         init_waitqueue_head(&delay_dd.dd_waitq);
1086         init_waitqueue_head(&delay_dd.dd_ctl_waitq);
1087         INIT_LIST_HEAD(&delay_dd.dd_sched_rules);
1088
1089         return 0;
1090 }
1091
1092 void
1093 lnet_fault_fini(void)
1094 {
1095         lnet_drop_rule_del(0, 0);
1096         lnet_delay_rule_del(0, 0, true);
1097
1098         LASSERT(list_empty(&the_lnet.ln_drop_rules));
1099         LASSERT(list_empty(&the_lnet.ln_delay_rules));
1100         LASSERT(list_empty(&delay_dd.dd_sched_rules));
1101 }