Whamcloud - gitweb
LU-11470 lnet: drop all rule
[fs/lustre-release.git] / lnet / lnet / net_fault.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; if not, write to the
18  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19  * Boston, MA 021110-1307, USA
20  *
21  * GPL HEADER END
22  */
23 /*
24  * Copyright (c) 2014, 2017, Intel Corporation.
25  */
26 /*
27  * This file is part of Lustre, http://www.lustre.org/
28  * Lustre is a trademark of Sun Microsystems, Inc.
29  *
30  * lnet/lnet/net_fault.c
31  *
32  * Lustre network fault simulation
33  *
34  * Author: liang.zhen@intel.com
35  */
36
37 #define DEBUG_SUBSYSTEM S_LNET
38
39 #include <lnet/lib-lnet.h>
40 #include <uapi/linux/lnet/lnetctl.h>
41
42 #define LNET_MSG_MASK           (LNET_PUT_BIT | LNET_ACK_BIT | \
43                                  LNET_GET_BIT | LNET_REPLY_BIT)
44
45 struct lnet_drop_rule {
46         /** link chain on the_lnet.ln_drop_rules */
47         struct list_head        dr_link;
48         /** attributes of this rule */
49         struct lnet_fault_attr  dr_attr;
50         /** lock to protect \a dr_drop_at and \a dr_stat */
51         spinlock_t              dr_lock;
52         /**
53          * the message sequence to drop, which means message is dropped when
54          * dr_stat.drs_count == dr_drop_at
55          */
56         unsigned long           dr_drop_at;
57         /**
58          * seconds to drop the next message, it's exclusive with dr_drop_at
59          */
60         time64_t                dr_drop_time;
61         /** baseline to caculate dr_drop_time */
62         time64_t                dr_time_base;
63         /** statistic of dropped messages */
64         struct lnet_fault_stat  dr_stat;
65 };
66
67 static bool
68 lnet_fault_nid_match(lnet_nid_t nid, lnet_nid_t msg_nid)
69 {
70         if (nid == msg_nid || nid == LNET_NID_ANY)
71                 return true;
72
73         if (LNET_NIDNET(nid) != LNET_NIDNET(msg_nid))
74                 return false;
75
76         /* 255.255.255.255@net is wildcard for all addresses in a network */
77         return LNET_NIDADDR(nid) == LNET_NIDADDR(LNET_NID_ANY);
78 }
79
80 static bool
81 lnet_fault_attr_match(struct lnet_fault_attr *attr, lnet_nid_t src,
82                       lnet_nid_t local_nid, lnet_nid_t dst,
83                       unsigned int type, unsigned int portal)
84 {
85         if (!lnet_fault_nid_match(attr->fa_src, src) ||
86             !lnet_fault_nid_match(attr->fa_dst, dst) ||
87             !lnet_fault_nid_match(attr->fa_local_nid, local_nid))
88                 return false;
89
90         if (!(attr->fa_msg_mask & (1 << type)))
91                 return false;
92
93         /* NB: ACK and REPLY have no portal, but they should have been
94          * rejected by message mask */
95         if (attr->fa_ptl_mask != 0 && /* has portal filter */
96             !(attr->fa_ptl_mask & (1ULL << portal)))
97                 return false;
98
99         return true;
100 }
101
102 static int
103 lnet_fault_attr_validate(struct lnet_fault_attr *attr)
104 {
105         if (attr->fa_msg_mask == 0)
106                 attr->fa_msg_mask = LNET_MSG_MASK; /* all message types */
107
108         if (attr->fa_ptl_mask == 0) /* no portal filter */
109                 return 0;
110
111         /* NB: only PUT and GET can be filtered if portal filter has been set */
112         attr->fa_msg_mask &= LNET_GET_BIT | LNET_PUT_BIT;
113         if (attr->fa_msg_mask == 0) {
114                 CDEBUG(D_NET, "can't find valid message type bits %x\n",
115                        attr->fa_msg_mask);
116                 return -EINVAL;
117         }
118         return 0;
119 }
120
121 static void
122 lnet_fault_stat_inc(struct lnet_fault_stat *stat, unsigned int type)
123 {
124         /* NB: fs_counter is NOT updated by this function */
125         switch (type) {
126         case LNET_MSG_PUT:
127                 stat->fs_put++;
128                 return;
129         case LNET_MSG_ACK:
130                 stat->fs_ack++;
131                 return;
132         case LNET_MSG_GET:
133                 stat->fs_get++;
134                 return;
135         case LNET_MSG_REPLY:
136                 stat->fs_reply++;
137                 return;
138         }
139 }
140
141 /**
142  * LNet message drop simulation
143  */
144
145 /**
146  * Add a new drop rule to LNet
147  * There is no check for duplicated drop rule, all rules will be checked for
148  * incoming message.
149  */
150 static int
151 lnet_drop_rule_add(struct lnet_fault_attr *attr)
152 {
153         struct lnet_drop_rule *rule;
154         ENTRY;
155
156         if (!((attr->u.drop.da_rate == 0) ^ (attr->u.drop.da_interval == 0))) {
157                 CDEBUG(D_NET,
158                        "please provide either drop rate or drop interval, "
159                        "but not both at the same time %d/%d\n",
160                        attr->u.drop.da_rate, attr->u.drop.da_interval);
161                 RETURN(-EINVAL);
162         }
163
164         if (lnet_fault_attr_validate(attr) != 0)
165                 RETURN(-EINVAL);
166
167         CFS_ALLOC_PTR(rule);
168         if (rule == NULL)
169                 RETURN(-ENOMEM);
170
171         spin_lock_init(&rule->dr_lock);
172
173         rule->dr_attr = *attr;
174         if (attr->u.drop.da_interval != 0) {
175                 rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
176                 rule->dr_drop_time = ktime_get_seconds() +
177                                      cfs_rand() % attr->u.drop.da_interval;
178         } else {
179                 rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
180         }
181
182         lnet_net_lock(LNET_LOCK_EX);
183         list_add(&rule->dr_link, &the_lnet.ln_drop_rules);
184         lnet_net_unlock(LNET_LOCK_EX);
185
186         CDEBUG(D_NET, "Added drop rule: src %s, dst %s, rate %d, interval %d\n",
187                libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
188                attr->u.drop.da_rate, attr->u.drop.da_interval);
189         RETURN(0);
190 }
191
192 /**
193  * Remove matched drop rules from lnet, all rules that can match \a src and
194  * \a dst will be removed.
195  * If \a src is zero, then all rules have \a dst as destination will be remove
196  * If \a dst is zero, then all rules have \a src as source will be removed
197  * If both of them are zero, all rules will be removed
198  */
199 static int
200 lnet_drop_rule_del(lnet_nid_t src, lnet_nid_t dst)
201 {
202         struct lnet_drop_rule *rule;
203         struct lnet_drop_rule *tmp;
204         struct list_head       zombies;
205         int                    n = 0;
206         ENTRY;
207
208         INIT_LIST_HEAD(&zombies);
209
210         lnet_net_lock(LNET_LOCK_EX);
211         list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) {
212                 if (rule->dr_attr.fa_src != src && src != 0)
213                         continue;
214
215                 if (rule->dr_attr.fa_dst != dst && dst != 0)
216                         continue;
217
218                 list_move(&rule->dr_link, &zombies);
219         }
220         lnet_net_unlock(LNET_LOCK_EX);
221
222         list_for_each_entry_safe(rule, tmp, &zombies, dr_link) {
223                 CDEBUG(D_NET, "Remove drop rule: src %s->dst: %s (1/%d, %d)\n",
224                        libcfs_nid2str(rule->dr_attr.fa_src),
225                        libcfs_nid2str(rule->dr_attr.fa_dst),
226                        rule->dr_attr.u.drop.da_rate,
227                        rule->dr_attr.u.drop.da_interval);
228
229                 list_del(&rule->dr_link);
230                 CFS_FREE_PTR(rule);
231                 n++;
232         }
233
234         RETURN(n);
235 }
236
237 /**
238  * List drop rule at position of \a pos
239  */
240 static int
241 lnet_drop_rule_list(int pos, struct lnet_fault_attr *attr,
242                     struct lnet_fault_stat *stat)
243 {
244         struct lnet_drop_rule *rule;
245         int                    cpt;
246         int                    i = 0;
247         int                    rc = -ENOENT;
248         ENTRY;
249
250         cpt = lnet_net_lock_current();
251         list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
252                 if (i++ < pos)
253                         continue;
254
255                 spin_lock(&rule->dr_lock);
256                 *attr = rule->dr_attr;
257                 *stat = rule->dr_stat;
258                 spin_unlock(&rule->dr_lock);
259                 rc = 0;
260                 break;
261         }
262
263         lnet_net_unlock(cpt);
264         RETURN(rc);
265 }
266
267 /**
268  * reset counters for all drop rules
269  */
270 static void
271 lnet_drop_rule_reset(void)
272 {
273         struct lnet_drop_rule *rule;
274         int                    cpt;
275         ENTRY;
276
277         cpt = lnet_net_lock_current();
278
279         list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
280                 struct lnet_fault_attr *attr = &rule->dr_attr;
281
282                 spin_lock(&rule->dr_lock);
283
284                 memset(&rule->dr_stat, 0, sizeof(rule->dr_stat));
285                 if (attr->u.drop.da_rate != 0) {
286                         rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
287                 } else {
288                         rule->dr_drop_time = ktime_get_seconds() +
289                                              cfs_rand() % attr->u.drop.da_interval;
290                         rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
291                 }
292                 spin_unlock(&rule->dr_lock);
293         }
294
295         lnet_net_unlock(cpt);
296         EXIT;
297 }
298
299 static void
300 lnet_fault_match_health(enum lnet_msg_hstatus *hstatus, __u32 mask)
301 {
302         unsigned int random;
303         int choice;
304         int delta;
305         int best_delta;
306         int i;
307
308         /* assign a random failure */
309         random = cfs_rand();
310         choice = random % (LNET_MSG_STATUS_END - LNET_MSG_STATUS_OK);
311         if (choice == 0)
312                 choice++;
313
314         if (mask == HSTATUS_RANDOM) {
315                 *hstatus = choice;
316                 return;
317         }
318
319         if (mask & (1 << choice)) {
320                 *hstatus = choice;
321                 return;
322         }
323
324         /* round to the closest ON bit */
325         i = HSTATUS_END;
326         best_delta = HSTATUS_END;
327         while (i > 0) {
328                 if (mask & (1 << i)) {
329                         delta = choice - i;
330                         if (delta < 0)
331                                 delta *= -1;
332                         if (delta < best_delta) {
333                                 best_delta = delta;
334                                 choice = i;
335                         }
336                 }
337                 i--;
338         }
339
340         *hstatus = choice;
341 }
342
343 /**
344  * check source/destination NID, portal, message type and drop rate,
345  * decide whether should drop this message or not
346  */
347 static bool
348 drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
349                 lnet_nid_t local_nid, lnet_nid_t dst,
350                 unsigned int type, unsigned int portal,
351                 enum lnet_msg_hstatus *hstatus)
352 {
353         struct lnet_fault_attr  *attr = &rule->dr_attr;
354         bool                     drop;
355
356         if (!lnet_fault_attr_match(attr, src, local_nid, dst, type, portal))
357                 return false;
358
359         if (attr->u.drop.da_drop_all) {
360                 CDEBUG(D_NET, "set to drop all messages\n");
361                 drop = true;
362                 goto drop_matched;
363         }
364
365         /*
366          * if we're trying to match a health status error but it hasn't
367          * been set in the rule, then don't match
368          */
369         if ((hstatus && !attr->u.drop.da_health_error_mask) ||
370             (!hstatus && attr->u.drop.da_health_error_mask))
371                 return false;
372
373         /* match this rule, check drop rate now */
374         spin_lock(&rule->dr_lock);
375         if (attr->u.drop.da_random) {
376                 int value = cfs_rand() % attr->u.drop.da_interval;
377                 if (value >= (attr->u.drop.da_interval / 2))
378                         drop = true;
379                 else
380                         drop = false;
381         } else if (rule->dr_drop_time != 0) { /* time based drop */
382                 time64_t now = ktime_get_seconds();
383
384                 rule->dr_stat.fs_count++;
385                 drop = now >= rule->dr_drop_time;
386                 if (drop) {
387                         if (now > rule->dr_time_base)
388                                 rule->dr_time_base = now;
389
390                         rule->dr_drop_time = rule->dr_time_base +
391                                              cfs_rand() % attr->u.drop.da_interval;
392                         rule->dr_time_base += attr->u.drop.da_interval;
393
394                         CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lld\n",
395                                libcfs_nid2str(attr->fa_src),
396                                libcfs_nid2str(attr->fa_dst),
397                                rule->dr_drop_time);
398                 }
399
400         } else { /* rate based drop */
401                 __u64 count;
402
403                 drop = rule->dr_stat.fs_count++ == rule->dr_drop_at;
404                 count = rule->dr_stat.fs_count;
405                 if (do_div(count, attr->u.drop.da_rate) == 0) {
406                         rule->dr_drop_at = rule->dr_stat.fs_count +
407                                            cfs_rand() % attr->u.drop.da_rate;
408                         CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
409                                libcfs_nid2str(attr->fa_src),
410                                libcfs_nid2str(attr->fa_dst), rule->dr_drop_at);
411                 }
412         }
413
414 drop_matched:
415
416         if (drop) { /* drop this message, update counters */
417                 if (hstatus)
418                         lnet_fault_match_health(hstatus,
419                                 attr->u.drop.da_health_error_mask);
420                 lnet_fault_stat_inc(&rule->dr_stat, type);
421                 rule->dr_stat.u.drop.ds_dropped++;
422         }
423
424         spin_unlock(&rule->dr_lock);
425         return drop;
426 }
427
428 /**
429  * Check if message from \a src to \a dst can match any existed drop rule
430  */
431 bool
432 lnet_drop_rule_match(struct lnet_hdr *hdr,
433                      lnet_nid_t local_nid,
434                      enum lnet_msg_hstatus *hstatus)
435 {
436         lnet_nid_t src = le64_to_cpu(hdr->src_nid);
437         lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
438         unsigned int typ = le32_to_cpu(hdr->type);
439         struct lnet_drop_rule *rule;
440         unsigned int ptl = -1;
441         bool drop = false;
442         int cpt;
443
444         /* NB: if Portal is specified, then only PUT and GET will be
445          * filtered by drop rule */
446         if (typ == LNET_MSG_PUT)
447                 ptl = le32_to_cpu(hdr->msg.put.ptl_index);
448         else if (typ == LNET_MSG_GET)
449                 ptl = le32_to_cpu(hdr->msg.get.ptl_index);
450
451         cpt = lnet_net_lock_current();
452         list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
453                 drop = drop_rule_match(rule, src, local_nid, dst, typ, ptl,
454                                        hstatus);
455                 if (drop)
456                         break;
457         }
458         lnet_net_unlock(cpt);
459
460         return drop;
461 }
462
463 /**
464  * LNet Delay Simulation
465  */
466 /** timestamp (second) to send delayed message */
467 #define msg_delay_send           msg_ev.hdr_data
468
469 struct lnet_delay_rule {
470         /** link chain on the_lnet.ln_delay_rules */
471         struct list_head        dl_link;
472         /** link chain on delay_dd.dd_sched_rules */
473         struct list_head        dl_sched_link;
474         /** attributes of this rule */
475         struct lnet_fault_attr  dl_attr;
476         /** lock to protect \a below members */
477         spinlock_t              dl_lock;
478         /** refcount of delay rule */
479         atomic_t                dl_refcount;
480         /**
481          * the message sequence to delay, which means message is delayed when
482          * dl_stat.fs_count == dl_delay_at
483          */
484         unsigned long           dl_delay_at;
485         /**
486          * seconds to delay the next message, it's exclusive with dl_delay_at
487          */
488         time64_t                dl_delay_time;
489         /** baseline to caculate dl_delay_time */
490         time64_t                dl_time_base;
491         /** jiffies to send the next delayed message */
492         unsigned long           dl_msg_send;
493         /** delayed message list */
494         struct list_head        dl_msg_list;
495         /** statistic of delayed messages */
496         struct lnet_fault_stat  dl_stat;
497         /** timer to wakeup delay_daemon */
498         struct timer_list       dl_timer;
499 };
500
501 struct delay_daemon_data {
502         /** serialise rule add/remove */
503         struct mutex            dd_mutex;
504         /** protect rules on \a dd_sched_rules */
505         spinlock_t              dd_lock;
506         /** scheduled delay rules (by timer) */
507         struct list_head        dd_sched_rules;
508         /** deamon thread sleeps at here */
509         wait_queue_head_t       dd_waitq;
510         /** controler (lctl command) wait at here */
511         wait_queue_head_t       dd_ctl_waitq;
512         /** deamon is running */
513         unsigned int            dd_running;
514         /** deamon stopped */
515         unsigned int            dd_stopped;
516 };
517
518 static struct delay_daemon_data delay_dd;
519
520 static void
521 delay_rule_decref(struct lnet_delay_rule *rule)
522 {
523         if (atomic_dec_and_test(&rule->dl_refcount)) {
524                 LASSERT(list_empty(&rule->dl_sched_link));
525                 LASSERT(list_empty(&rule->dl_msg_list));
526                 LASSERT(list_empty(&rule->dl_link));
527
528                 CFS_FREE_PTR(rule);
529         }
530 }
531
532 /**
533  * check source/destination NID, portal, message type and delay rate,
534  * decide whether should delay this message or not
535  */
536 static bool
537 delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
538                 lnet_nid_t dst, unsigned int type, unsigned int portal,
539                 struct lnet_msg *msg)
540 {
541         struct lnet_fault_attr  *attr = &rule->dl_attr;
542         bool                     delay;
543
544         if (!lnet_fault_attr_match(attr, src, LNET_NID_ANY,
545                                    dst, type, portal))
546                 return false;
547
548         /* match this rule, check delay rate now */
549         spin_lock(&rule->dl_lock);
550         if (rule->dl_delay_time != 0) { /* time based delay */
551                 time64_t now = ktime_get_seconds();
552
553                 rule->dl_stat.fs_count++;
554                 delay = now >= rule->dl_delay_time;
555                 if (delay) {
556                         if (now > rule->dl_time_base)
557                                 rule->dl_time_base = now;
558
559                         rule->dl_delay_time = rule->dl_time_base +
560                                               cfs_rand() % attr->u.delay.la_interval;
561                         rule->dl_time_base += attr->u.delay.la_interval;
562
563                         CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lld\n",
564                                libcfs_nid2str(attr->fa_src),
565                                libcfs_nid2str(attr->fa_dst),
566                                rule->dl_delay_time);
567                 }
568
569         } else { /* rate based delay */
570                 __u64 count;
571
572                 delay = rule->dl_stat.fs_count++ == rule->dl_delay_at;
573                 /* generate the next random rate sequence */
574                 count = rule->dl_stat.fs_count;
575                 if (do_div(count, attr->u.delay.la_rate) == 0) {
576                         rule->dl_delay_at = rule->dl_stat.fs_count +
577                                             cfs_rand() % attr->u.delay.la_rate;
578                         CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
579                                libcfs_nid2str(attr->fa_src),
580                                libcfs_nid2str(attr->fa_dst), rule->dl_delay_at);
581                 }
582         }
583
584         if (!delay) {
585                 spin_unlock(&rule->dl_lock);
586                 return false;
587         }
588
589         /* delay this message, update counters */
590         lnet_fault_stat_inc(&rule->dl_stat, type);
591         rule->dl_stat.u.delay.ls_delayed++;
592
593         list_add_tail(&msg->msg_list, &rule->dl_msg_list);
594         msg->msg_delay_send = ktime_get_seconds() + attr->u.delay.la_latency;
595         if (rule->dl_msg_send == -1) {
596                 rule->dl_msg_send = msg->msg_delay_send;
597                 mod_timer(&rule->dl_timer, rule->dl_msg_send);
598         }
599
600         spin_unlock(&rule->dl_lock);
601         return true;
602 }
603
604 /**
605  * check if \a msg can match any Delay Rule, receiving of this message
606  * will be delayed if there is a match.
607  */
608 bool
609 lnet_delay_rule_match_locked(struct lnet_hdr *hdr, struct lnet_msg *msg)
610 {
611         struct lnet_delay_rule  *rule;
612         lnet_nid_t               src = le64_to_cpu(hdr->src_nid);
613         lnet_nid_t               dst = le64_to_cpu(hdr->dest_nid);
614         unsigned int             typ = le32_to_cpu(hdr->type);
615         unsigned int             ptl = -1;
616
617         /* NB: called with hold of lnet_net_lock */
618
619         /* NB: if Portal is specified, then only PUT and GET will be
620          * filtered by delay rule */
621         if (typ == LNET_MSG_PUT)
622                 ptl = le32_to_cpu(hdr->msg.put.ptl_index);
623         else if (typ == LNET_MSG_GET)
624                 ptl = le32_to_cpu(hdr->msg.get.ptl_index);
625
626         list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
627                 if (delay_rule_match(rule, src, dst, typ, ptl, msg))
628                         return true;
629         }
630
631         return false;
632 }
633
634 /** check out delayed messages for send */
635 static void
636 delayed_msg_check(struct lnet_delay_rule *rule, bool all,
637                   struct list_head *msg_list)
638 {
639         struct lnet_msg *msg;
640         struct lnet_msg *tmp;
641         time64_t now = ktime_get_seconds();
642
643         if (!all && cfs_time_seconds(rule->dl_msg_send) > now)
644                 return;
645
646         spin_lock(&rule->dl_lock);
647         list_for_each_entry_safe(msg, tmp, &rule->dl_msg_list, msg_list) {
648                 if (!all && msg->msg_delay_send > now)
649                         break;
650
651                 msg->msg_delay_send = 0;
652                 list_move_tail(&msg->msg_list, msg_list);
653         }
654
655         if (list_empty(&rule->dl_msg_list)) {
656                 del_timer(&rule->dl_timer);
657                 rule->dl_msg_send = -1;
658
659         } else if (!list_empty(msg_list)) {
660                 /* dequeued some timedout messages, update timer for the
661                  * next delayed message on rule */
662                 msg = list_entry(rule->dl_msg_list.next,
663                                  struct lnet_msg, msg_list);
664                 rule->dl_msg_send = msg->msg_delay_send;
665                 mod_timer(&rule->dl_timer, rule->dl_msg_send);
666         }
667         spin_unlock(&rule->dl_lock);
668 }
669
670 static void
671 delayed_msg_process(struct list_head *msg_list, bool drop)
672 {
673         struct lnet_msg *msg;
674
675         while (!list_empty(msg_list)) {
676                 struct lnet_ni *ni;
677                 int             cpt;
678                 int             rc;
679
680                 msg = list_entry(msg_list->next, struct lnet_msg, msg_list);
681                 LASSERT(msg->msg_rxpeer != NULL);
682                 LASSERT(msg->msg_rxni != NULL);
683
684                 ni = msg->msg_rxni;
685                 cpt = msg->msg_rx_cpt;
686
687                 list_del_init(&msg->msg_list);
688                 if (drop) {
689                         rc = -ECANCELED;
690
691                 } else if (!msg->msg_routing) {
692                         rc = lnet_parse_local(ni, msg);
693                         if (rc == 0)
694                                 continue;
695
696                 } else {
697                         lnet_net_lock(cpt);
698                         rc = lnet_parse_forward_locked(ni, msg);
699                         lnet_net_unlock(cpt);
700
701                         switch (rc) {
702                         case LNET_CREDIT_OK:
703                                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
704                                              0, msg->msg_len, msg->msg_len);
705                         case LNET_CREDIT_WAIT:
706                                 continue;
707                         default: /* failures */
708                                 break;
709                         }
710                 }
711
712                 lnet_drop_message(ni, cpt, msg->msg_private, msg->msg_len,
713                                   msg->msg_type);
714                 lnet_finalize(msg, rc);
715         }
716 }
717
718 /**
719  * Process delayed messages for scheduled rules
720  * This function can either be called by delay_rule_daemon, or by lnet_finalise
721  */
722 void
723 lnet_delay_rule_check(void)
724 {
725         struct lnet_delay_rule  *rule;
726         struct list_head         msgs;
727
728         INIT_LIST_HEAD(&msgs);
729         while (1) {
730                 if (list_empty(&delay_dd.dd_sched_rules))
731                         break;
732
733                 spin_lock_bh(&delay_dd.dd_lock);
734                 if (list_empty(&delay_dd.dd_sched_rules)) {
735                         spin_unlock_bh(&delay_dd.dd_lock);
736                         break;
737                 }
738
739                 rule = list_entry(delay_dd.dd_sched_rules.next,
740                                   struct lnet_delay_rule, dl_sched_link);
741                 list_del_init(&rule->dl_sched_link);
742                 spin_unlock_bh(&delay_dd.dd_lock);
743
744                 delayed_msg_check(rule, false, &msgs);
745                 delay_rule_decref(rule); /* -1 for delay_dd.dd_sched_rules */
746         }
747
748         if (!list_empty(&msgs))
749                 delayed_msg_process(&msgs, false);
750 }
751
752 /** deamon thread to handle delayed messages */
753 static int
754 lnet_delay_rule_daemon(void *arg)
755 {
756         delay_dd.dd_running = 1;
757         wake_up(&delay_dd.dd_ctl_waitq);
758
759         while (delay_dd.dd_running) {
760                 wait_event_interruptible(delay_dd.dd_waitq,
761                                          !delay_dd.dd_running ||
762                                          !list_empty(&delay_dd.dd_sched_rules));
763                 lnet_delay_rule_check();
764         }
765
766         /* in case more rules have been enqueued after my last check */
767         lnet_delay_rule_check();
768         delay_dd.dd_stopped = 1;
769         wake_up(&delay_dd.dd_ctl_waitq);
770
771         return 0;
772 }
773
774 static void
775 delay_timer_cb(cfs_timer_cb_arg_t data)
776 {
777         struct lnet_delay_rule *rule = cfs_from_timer(rule, data, dl_timer);
778
779         spin_lock_bh(&delay_dd.dd_lock);
780         if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
781                 atomic_inc(&rule->dl_refcount);
782                 list_add_tail(&rule->dl_sched_link, &delay_dd.dd_sched_rules);
783                 wake_up(&delay_dd.dd_waitq);
784         }
785         spin_unlock_bh(&delay_dd.dd_lock);
786 }
787
788 /**
789  * Add a new delay rule to LNet
790  * There is no check for duplicated delay rule, all rules will be checked for
791  * incoming message.
792  */
793 int
794 lnet_delay_rule_add(struct lnet_fault_attr *attr)
795 {
796         struct lnet_delay_rule *rule;
797         int                     rc = 0;
798         ENTRY;
799
800         if (!((attr->u.delay.la_rate == 0) ^
801               (attr->u.delay.la_interval == 0))) {
802                 CDEBUG(D_NET,
803                        "please provide either delay rate or delay interval, "
804                        "but not both at the same time %d/%d\n",
805                        attr->u.delay.la_rate, attr->u.delay.la_interval);
806                 RETURN(-EINVAL);
807         }
808
809         if (attr->u.delay.la_latency == 0) {
810                 CDEBUG(D_NET, "delay latency cannot be zero\n");
811                 RETURN(-EINVAL);
812         }
813
814         if (lnet_fault_attr_validate(attr) != 0)
815                 RETURN(-EINVAL);
816
817         CFS_ALLOC_PTR(rule);
818         if (rule == NULL)
819                 RETURN(-ENOMEM);
820
821         mutex_lock(&delay_dd.dd_mutex);
822         if (!delay_dd.dd_running) {
823                 struct task_struct *task;
824
825                 /* NB: although LND threads will process delayed message
826                  * in lnet_finalize, but there is no guarantee that LND
827                  * threads will be waken up if no other message needs to
828                  * be handled.
829                  * Only one daemon thread, performance is not the concern
830                  * of this simualation module.
831                  */
832                 task = kthread_run(lnet_delay_rule_daemon, NULL, "lnet_dd");
833                 if (IS_ERR(task)) {
834                         rc = PTR_ERR(task);
835                         GOTO(failed, rc);
836                 }
837                 wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
838         }
839
840         cfs_timer_setup(&rule->dl_timer, delay_timer_cb,
841                         (unsigned long)rule, 0);
842
843         spin_lock_init(&rule->dl_lock);
844         INIT_LIST_HEAD(&rule->dl_msg_list);
845         INIT_LIST_HEAD(&rule->dl_sched_link);
846
847         rule->dl_attr = *attr;
848         if (attr->u.delay.la_interval != 0) {
849                 rule->dl_time_base = ktime_get_seconds() +
850                                      attr->u.delay.la_interval;
851                 rule->dl_delay_time = ktime_get_seconds() +
852                                       cfs_rand() % attr->u.delay.la_interval;
853         } else {
854                 rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
855         }
856
857         rule->dl_msg_send = -1;
858
859         lnet_net_lock(LNET_LOCK_EX);
860         atomic_set(&rule->dl_refcount, 1);
861         list_add(&rule->dl_link, &the_lnet.ln_delay_rules);
862         lnet_net_unlock(LNET_LOCK_EX);
863
864         CDEBUG(D_NET, "Added delay rule: src %s, dst %s, rate %d\n",
865                libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
866                attr->u.delay.la_rate);
867
868         mutex_unlock(&delay_dd.dd_mutex);
869         RETURN(0);
870  failed:
871         mutex_unlock(&delay_dd.dd_mutex);
872         CFS_FREE_PTR(rule);
873         return rc;
874 }
875
876 /**
877  * Remove matched Delay Rules from lnet, if \a shutdown is true or both \a src
878  * and \a dst are zero, all rules will be removed, otherwise only matched rules
879  * will be removed.
880  * If \a src is zero, then all rules have \a dst as destination will be remove
881  * If \a dst is zero, then all rules have \a src as source will be removed
882  *
883  * When a delay rule is removed, all delayed messages of this rule will be
884  * processed immediately.
885  */
886 int
887 lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown)
888 {
889         struct lnet_delay_rule *rule;
890         struct lnet_delay_rule  *tmp;
891         struct list_head        rule_list;
892         struct list_head        msg_list;
893         int                     n = 0;
894         bool                    cleanup;
895         ENTRY;
896
897         INIT_LIST_HEAD(&rule_list);
898         INIT_LIST_HEAD(&msg_list);
899
900         if (shutdown)
901                 src = dst = 0;
902
903         mutex_lock(&delay_dd.dd_mutex);
904         lnet_net_lock(LNET_LOCK_EX);
905
906         list_for_each_entry_safe(rule, tmp, &the_lnet.ln_delay_rules, dl_link) {
907                 if (rule->dl_attr.fa_src != src && src != 0)
908                         continue;
909
910                 if (rule->dl_attr.fa_dst != dst && dst != 0)
911                         continue;
912
913                 CDEBUG(D_NET, "Remove delay rule: src %s->dst: %s (1/%d, %d)\n",
914                        libcfs_nid2str(rule->dl_attr.fa_src),
915                        libcfs_nid2str(rule->dl_attr.fa_dst),
916                        rule->dl_attr.u.delay.la_rate,
917                        rule->dl_attr.u.delay.la_interval);
918                 /* refcount is taken over by rule_list */
919                 list_move(&rule->dl_link, &rule_list);
920         }
921
922         /* check if we need to shutdown delay_daemon */
923         cleanup = list_empty(&the_lnet.ln_delay_rules) &&
924                   !list_empty(&rule_list);
925         lnet_net_unlock(LNET_LOCK_EX);
926
927         list_for_each_entry_safe(rule, tmp, &rule_list, dl_link) {
928                 list_del_init(&rule->dl_link);
929
930                 del_timer_sync(&rule->dl_timer);
931                 delayed_msg_check(rule, true, &msg_list);
932                 delay_rule_decref(rule); /* -1 for the_lnet.ln_delay_rules */
933                 n++;
934         }
935
936         if (cleanup) { /* no more delay rule, shutdown delay_daemon */
937                 LASSERT(delay_dd.dd_running);
938                 delay_dd.dd_running = 0;
939                 wake_up(&delay_dd.dd_waitq);
940
941                 while (!delay_dd.dd_stopped)
942                         wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_stopped);
943         }
944         mutex_unlock(&delay_dd.dd_mutex);
945
946         if (!list_empty(&msg_list))
947                 delayed_msg_process(&msg_list, shutdown);
948
949         RETURN(n);
950 }
951
952 /**
953  * List Delay Rule at position of \a pos
954  */
955 int
956 lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr,
957                     struct lnet_fault_stat *stat)
958 {
959         struct lnet_delay_rule *rule;
960         int                     cpt;
961         int                     i = 0;
962         int                     rc = -ENOENT;
963         ENTRY;
964
965         cpt = lnet_net_lock_current();
966         list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
967                 if (i++ < pos)
968                         continue;
969
970                 spin_lock(&rule->dl_lock);
971                 *attr = rule->dl_attr;
972                 *stat = rule->dl_stat;
973                 spin_unlock(&rule->dl_lock);
974                 rc = 0;
975                 break;
976         }
977
978         lnet_net_unlock(cpt);
979         RETURN(rc);
980 }
981
982 /**
983  * reset counters for all Delay Rules
984  */
985 void
986 lnet_delay_rule_reset(void)
987 {
988         struct lnet_delay_rule *rule;
989         int                     cpt;
990         ENTRY;
991
992         cpt = lnet_net_lock_current();
993
994         list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
995                 struct lnet_fault_attr *attr = &rule->dl_attr;
996
997                 spin_lock(&rule->dl_lock);
998
999                 memset(&rule->dl_stat, 0, sizeof(rule->dl_stat));
1000                 if (attr->u.delay.la_rate != 0) {
1001                         rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
1002                 } else {
1003                         rule->dl_delay_time = ktime_get_seconds() +
1004                                               cfs_rand() % attr->u.delay.la_interval;
1005                         rule->dl_time_base = ktime_get_seconds() +
1006                                              attr->u.delay.la_interval;
1007                 }
1008                 spin_unlock(&rule->dl_lock);
1009         }
1010
1011         lnet_net_unlock(cpt);
1012         EXIT;
1013 }
1014
1015 int
1016 lnet_fault_ctl(int opc, struct libcfs_ioctl_data *data)
1017 {
1018         struct lnet_fault_attr *attr;
1019         struct lnet_fault_stat *stat;
1020
1021         attr = (struct lnet_fault_attr *)data->ioc_inlbuf1;
1022
1023         switch (opc) {
1024         default:
1025                 return -EINVAL;
1026
1027         case LNET_CTL_DROP_ADD:
1028                 if (attr == NULL)
1029                         return -EINVAL;
1030
1031                 return lnet_drop_rule_add(attr);
1032
1033         case LNET_CTL_DROP_DEL:
1034                 if (attr == NULL)
1035                         return -EINVAL;
1036
1037                 data->ioc_count = lnet_drop_rule_del(attr->fa_src,
1038                                                      attr->fa_dst);
1039                 return 0;
1040
1041         case LNET_CTL_DROP_RESET:
1042                 lnet_drop_rule_reset();
1043                 return 0;
1044
1045         case LNET_CTL_DROP_LIST:
1046                 stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
1047                 if (attr == NULL || stat == NULL)
1048                         return -EINVAL;
1049
1050                 return lnet_drop_rule_list(data->ioc_count, attr, stat);
1051
1052         case LNET_CTL_DELAY_ADD:
1053                 if (attr == NULL)
1054                         return -EINVAL;
1055
1056                 return lnet_delay_rule_add(attr);
1057
1058         case LNET_CTL_DELAY_DEL:
1059                 if (attr == NULL)
1060                         return -EINVAL;
1061
1062                 data->ioc_count = lnet_delay_rule_del(attr->fa_src,
1063                                                       attr->fa_dst, false);
1064                 return 0;
1065
1066         case LNET_CTL_DELAY_RESET:
1067                 lnet_delay_rule_reset();
1068                 return 0;
1069
1070         case LNET_CTL_DELAY_LIST:
1071                 stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
1072                 if (attr == NULL || stat == NULL)
1073                         return -EINVAL;
1074
1075                 return lnet_delay_rule_list(data->ioc_count, attr, stat);
1076         }
1077 }
1078
1079 int
1080 lnet_fault_init(void)
1081 {
1082         CLASSERT(LNET_PUT_BIT == 1 << LNET_MSG_PUT);
1083         CLASSERT(LNET_ACK_BIT == 1 << LNET_MSG_ACK);
1084         CLASSERT(LNET_GET_BIT == 1 << LNET_MSG_GET);
1085         CLASSERT(LNET_REPLY_BIT == 1 << LNET_MSG_REPLY);
1086
1087         mutex_init(&delay_dd.dd_mutex);
1088         spin_lock_init(&delay_dd.dd_lock);
1089         init_waitqueue_head(&delay_dd.dd_waitq);
1090         init_waitqueue_head(&delay_dd.dd_ctl_waitq);
1091         INIT_LIST_HEAD(&delay_dd.dd_sched_rules);
1092
1093         return 0;
1094 }
1095
1096 void
1097 lnet_fault_fini(void)
1098 {
1099         lnet_drop_rule_del(0, 0);
1100         lnet_delay_rule_del(0, 0, true);
1101
1102         LASSERT(list_empty(&the_lnet.ln_drop_rules));
1103         LASSERT(list_empty(&the_lnet.ln_delay_rules));
1104         LASSERT(list_empty(&delay_dd.dd_sched_rules));
1105 }