Whamcloud - gitweb
LU-14627 lnet: Allow delayed sends
[fs/lustre-release.git] / lnet / lnet / net_fault.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; if not, write to the
18  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19  * Boston, MA 021110-1307, USA
20  *
21  * GPL HEADER END
22  */
23 /*
24  * Copyright (c) 2014, 2017, Intel Corporation.
25  */
26 /*
27  * This file is part of Lustre, http://www.lustre.org/
28  *
29  * lnet/lnet/net_fault.c
30  *
31  * Lustre network fault simulation
32  *
33  * Author: liang.zhen@intel.com
34  */
35
36 #define DEBUG_SUBSYSTEM S_LNET
37
38 #include <linux/random.h>
39 #include <lnet/lib-lnet.h>
40 #include <uapi/linux/lnet/lnetctl.h>
41
42 #define LNET_MSG_MASK           (LNET_PUT_BIT | LNET_ACK_BIT | \
43                                  LNET_GET_BIT | LNET_REPLY_BIT)
44
45 struct lnet_drop_rule {
46         /** link chain on the_lnet.ln_drop_rules */
47         struct list_head        dr_link;
48         /** attributes of this rule */
49         struct lnet_fault_attr  dr_attr;
50         /** lock to protect \a dr_drop_at and \a dr_stat */
51         spinlock_t              dr_lock;
52         /**
53          * the message sequence to drop, which means message is dropped when
54          * dr_stat.drs_count == dr_drop_at
55          */
56         unsigned long           dr_drop_at;
57         /**
58          * seconds to drop the next message, it's exclusive with dr_drop_at
59          */
60         time64_t                dr_drop_time;
61         /** baseline to caculate dr_drop_time */
62         time64_t                dr_time_base;
63         /** statistic of dropped messages */
64         struct lnet_fault_stat  dr_stat;
65 };
66
67 static bool
68 lnet_fault_nid_match(lnet_nid_t nid, lnet_nid_t msg_nid)
69 {
70         if (nid == msg_nid || nid == LNET_NID_ANY)
71                 return true;
72
73         if (LNET_NIDNET(nid) != LNET_NIDNET(msg_nid))
74                 return false;
75
76         /* 255.255.255.255@net is wildcard for all addresses in a network */
77         return LNET_NIDADDR(nid) == LNET_NIDADDR(LNET_NID_ANY);
78 }
79
80 static bool
81 lnet_fault_attr_match(struct lnet_fault_attr *attr, lnet_nid_t src,
82                       lnet_nid_t local_nid, lnet_nid_t dst,
83                       unsigned int type, unsigned int portal)
84 {
85         if (!lnet_fault_nid_match(attr->fa_src, src) ||
86             !lnet_fault_nid_match(attr->fa_dst, dst) ||
87             !lnet_fault_nid_match(attr->fa_local_nid, local_nid))
88                 return false;
89
90         if (!(attr->fa_msg_mask & BIT(type)))
91                 return false;
92
93         /* NB: ACK and REPLY have no portal, but they should have been
94          * rejected by message mask */
95         if (attr->fa_ptl_mask != 0 && /* has portal filter */
96             !(attr->fa_ptl_mask & (1ULL << portal)))
97                 return false;
98
99         return true;
100 }
101
102 static int
103 lnet_fault_attr_validate(struct lnet_fault_attr *attr)
104 {
105         if (attr->fa_msg_mask == 0)
106                 attr->fa_msg_mask = LNET_MSG_MASK; /* all message types */
107
108         if (attr->fa_ptl_mask == 0) /* no portal filter */
109                 return 0;
110
111         /* NB: only PUT and GET can be filtered if portal filter has been set */
112         attr->fa_msg_mask &= LNET_GET_BIT | LNET_PUT_BIT;
113         if (attr->fa_msg_mask == 0) {
114                 CDEBUG(D_NET, "can't find valid message type bits %x\n",
115                        attr->fa_msg_mask);
116                 return -EINVAL;
117         }
118         return 0;
119 }
120
121 static void
122 lnet_fault_stat_inc(struct lnet_fault_stat *stat, unsigned int type)
123 {
124         /* NB: fs_counter is NOT updated by this function */
125         switch (type) {
126         case LNET_MSG_PUT:
127                 stat->fs_put++;
128                 return;
129         case LNET_MSG_ACK:
130                 stat->fs_ack++;
131                 return;
132         case LNET_MSG_GET:
133                 stat->fs_get++;
134                 return;
135         case LNET_MSG_REPLY:
136                 stat->fs_reply++;
137                 return;
138         }
139 }
140
141 /**
142  * LNet message drop simulation
143  */
144
145 /**
146  * Add a new drop rule to LNet
147  * There is no check for duplicated drop rule, all rules will be checked for
148  * incoming message.
149  */
150 static int
151 lnet_drop_rule_add(struct lnet_fault_attr *attr)
152 {
153         struct lnet_drop_rule *rule;
154         ENTRY;
155
156         if (!((attr->u.drop.da_rate == 0) ^ (attr->u.drop.da_interval == 0))) {
157                 CDEBUG(D_NET,
158                        "please provide either drop rate or drop interval, "
159                        "but not both at the same time %d/%d\n",
160                        attr->u.drop.da_rate, attr->u.drop.da_interval);
161                 RETURN(-EINVAL);
162         }
163
164         if (lnet_fault_attr_validate(attr) != 0)
165                 RETURN(-EINVAL);
166
167         CFS_ALLOC_PTR(rule);
168         if (rule == NULL)
169                 RETURN(-ENOMEM);
170
171         spin_lock_init(&rule->dr_lock);
172
173         rule->dr_attr = *attr;
174         if (attr->u.drop.da_interval != 0) {
175                 rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
176                 rule->dr_drop_time = ktime_get_seconds() +
177                                      prandom_u32_max(attr->u.drop.da_interval);
178         } else {
179                 rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
180         }
181
182         lnet_net_lock(LNET_LOCK_EX);
183         list_add(&rule->dr_link, &the_lnet.ln_drop_rules);
184         lnet_net_unlock(LNET_LOCK_EX);
185
186         CDEBUG(D_NET, "Added drop rule: src %s, dst %s, rate %d, interval %d\n",
187                libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
188                attr->u.drop.da_rate, attr->u.drop.da_interval);
189         RETURN(0);
190 }
191
192 /**
193  * Remove matched drop rules from lnet, all rules that can match \a src and
194  * \a dst will be removed.
195  * If \a src is zero, then all rules have \a dst as destination will be remove
196  * If \a dst is zero, then all rules have \a src as source will be removed
197  * If both of them are zero, all rules will be removed
198  */
199 static int
200 lnet_drop_rule_del(lnet_nid_t src, lnet_nid_t dst)
201 {
202         struct lnet_drop_rule *rule;
203         struct lnet_drop_rule *tmp;
204         LIST_HEAD(zombies);
205         int n = 0;
206         ENTRY;
207
208         lnet_net_lock(LNET_LOCK_EX);
209         list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) {
210                 if (rule->dr_attr.fa_src != src && src != 0)
211                         continue;
212
213                 if (rule->dr_attr.fa_dst != dst && dst != 0)
214                         continue;
215
216                 list_move(&rule->dr_link, &zombies);
217         }
218         lnet_net_unlock(LNET_LOCK_EX);
219
220         list_for_each_entry_safe(rule, tmp, &zombies, dr_link) {
221                 CDEBUG(D_NET, "Remove drop rule: src %s->dst: %s (1/%d, %d)\n",
222                        libcfs_nid2str(rule->dr_attr.fa_src),
223                        libcfs_nid2str(rule->dr_attr.fa_dst),
224                        rule->dr_attr.u.drop.da_rate,
225                        rule->dr_attr.u.drop.da_interval);
226
227                 list_del(&rule->dr_link);
228                 CFS_FREE_PTR(rule);
229                 n++;
230         }
231
232         RETURN(n);
233 }
234
235 /**
236  * List drop rule at position of \a pos
237  */
238 static int
239 lnet_drop_rule_list(int pos, struct lnet_fault_attr *attr,
240                     struct lnet_fault_stat *stat)
241 {
242         struct lnet_drop_rule *rule;
243         int                    cpt;
244         int                    i = 0;
245         int                    rc = -ENOENT;
246         ENTRY;
247
248         cpt = lnet_net_lock_current();
249         list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
250                 if (i++ < pos)
251                         continue;
252
253                 spin_lock(&rule->dr_lock);
254                 *attr = rule->dr_attr;
255                 *stat = rule->dr_stat;
256                 spin_unlock(&rule->dr_lock);
257                 rc = 0;
258                 break;
259         }
260
261         lnet_net_unlock(cpt);
262         RETURN(rc);
263 }
264
265 /**
266  * reset counters for all drop rules
267  */
268 static void
269 lnet_drop_rule_reset(void)
270 {
271         struct lnet_drop_rule *rule;
272         int                    cpt;
273         ENTRY;
274
275         cpt = lnet_net_lock_current();
276
277         list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
278                 struct lnet_fault_attr *attr = &rule->dr_attr;
279
280                 spin_lock(&rule->dr_lock);
281
282                 memset(&rule->dr_stat, 0, sizeof(rule->dr_stat));
283                 if (attr->u.drop.da_rate != 0) {
284                         rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
285                 } else {
286                         rule->dr_drop_time = ktime_get_seconds() +
287                                              prandom_u32_max(attr->u.drop.da_interval);
288                         rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
289                 }
290                 spin_unlock(&rule->dr_lock);
291         }
292
293         lnet_net_unlock(cpt);
294         EXIT;
295 }
296
297 static void
298 lnet_fault_match_health(enum lnet_msg_hstatus *hstatus, __u32 mask)
299 {
300         int choice;
301         int delta;
302         int best_delta;
303         int i;
304
305         /* assign a random failure */
306         choice = prandom_u32_max(LNET_MSG_STATUS_END - LNET_MSG_STATUS_OK);
307         if (choice == 0)
308                 choice++;
309
310         if (mask == HSTATUS_RANDOM) {
311                 *hstatus = choice;
312                 return;
313         }
314
315         if (mask & BIT(choice)) {
316                 *hstatus = choice;
317                 return;
318         }
319
320         /* round to the closest ON bit */
321         i = HSTATUS_END;
322         best_delta = HSTATUS_END;
323         while (i > 0) {
324                 if (mask & BIT(i)) {
325                         delta = choice - i;
326                         if (delta < 0)
327                                 delta *= -1;
328                         if (delta < best_delta) {
329                                 best_delta = delta;
330                                 choice = i;
331                         }
332                 }
333                 i--;
334         }
335
336         *hstatus = choice;
337 }
338
339 /**
340  * check source/destination NID, portal, message type and drop rate,
341  * decide whether should drop this message or not
342  */
343 static bool
344 drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
345                 lnet_nid_t local_nid, lnet_nid_t dst,
346                 unsigned int type, unsigned int portal,
347                 enum lnet_msg_hstatus *hstatus)
348 {
349         struct lnet_fault_attr  *attr = &rule->dr_attr;
350         bool                     drop;
351
352         if (!lnet_fault_attr_match(attr, src, local_nid, dst, type, portal))
353                 return false;
354
355         if (attr->u.drop.da_drop_all) {
356                 CDEBUG(D_NET, "set to drop all messages\n");
357                 drop = true;
358                 goto drop_matched;
359         }
360
361         /*
362          * if we're trying to match a health status error but it hasn't
363          * been set in the rule, then don't match
364          */
365         if ((hstatus && !attr->u.drop.da_health_error_mask) ||
366             (!hstatus && attr->u.drop.da_health_error_mask))
367                 return false;
368
369         /* match this rule, check drop rate now */
370         spin_lock(&rule->dr_lock);
371         if (attr->u.drop.da_random) {
372                 int value = prandom_u32_max(attr->u.drop.da_interval);
373                 if (value >= (attr->u.drop.da_interval / 2))
374                         drop = true;
375                 else
376                         drop = false;
377         } else if (rule->dr_drop_time != 0) { /* time based drop */
378                 time64_t now = ktime_get_seconds();
379
380                 rule->dr_stat.fs_count++;
381                 drop = now >= rule->dr_drop_time;
382                 if (drop) {
383                         if (now > rule->dr_time_base)
384                                 rule->dr_time_base = now;
385
386                         rule->dr_drop_time = rule->dr_time_base +
387                                              prandom_u32_max(attr->u.drop.da_interval);
388                         rule->dr_time_base += attr->u.drop.da_interval;
389
390                         CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lld\n",
391                                libcfs_nid2str(attr->fa_src),
392                                libcfs_nid2str(attr->fa_dst),
393                                rule->dr_drop_time);
394                 }
395
396         } else { /* rate based drop */
397                 __u64 count;
398
399                 drop = rule->dr_stat.fs_count++ == rule->dr_drop_at;
400                 count = rule->dr_stat.fs_count;
401                 if (do_div(count, attr->u.drop.da_rate) == 0) {
402                         rule->dr_drop_at = rule->dr_stat.fs_count +
403                                            prandom_u32_max(attr->u.drop.da_rate);
404                         CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
405                                libcfs_nid2str(attr->fa_src),
406                                libcfs_nid2str(attr->fa_dst), rule->dr_drop_at);
407                 }
408         }
409
410 drop_matched:
411
412         if (drop) { /* drop this message, update counters */
413                 if (hstatus)
414                         lnet_fault_match_health(hstatus,
415                                 attr->u.drop.da_health_error_mask);
416                 lnet_fault_stat_inc(&rule->dr_stat, type);
417                 rule->dr_stat.u.drop.ds_dropped++;
418         }
419
420         spin_unlock(&rule->dr_lock);
421         return drop;
422 }
423
424 /**
425  * Check if message from \a src to \a dst can match any existed drop rule
426  */
427 bool
428 lnet_drop_rule_match(struct lnet_hdr *hdr,
429                      lnet_nid_t local_nid,
430                      enum lnet_msg_hstatus *hstatus)
431 {
432         lnet_nid_t src = le64_to_cpu(hdr->src_nid);
433         lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
434         unsigned int typ = le32_to_cpu(hdr->type);
435         struct lnet_drop_rule *rule;
436         unsigned int ptl = -1;
437         bool drop = false;
438         int cpt;
439
440         /* NB: if Portal is specified, then only PUT and GET will be
441          * filtered by drop rule */
442         if (typ == LNET_MSG_PUT)
443                 ptl = le32_to_cpu(hdr->msg.put.ptl_index);
444         else if (typ == LNET_MSG_GET)
445                 ptl = le32_to_cpu(hdr->msg.get.ptl_index);
446
447         cpt = lnet_net_lock_current();
448         list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
449                 drop = drop_rule_match(rule, src, local_nid, dst, typ, ptl,
450                                        hstatus);
451                 if (drop)
452                         break;
453         }
454         lnet_net_unlock(cpt);
455
456         return drop;
457 }
458
459 /**
460  * LNet Delay Simulation
461  */
462 /** timestamp (second) to send delayed message */
463 #define msg_delay_send           msg_ev.hdr_data
464
465 struct lnet_delay_rule {
466         /** link chain on the_lnet.ln_delay_rules */
467         struct list_head        dl_link;
468         /** link chain on delay_dd.dd_sched_rules */
469         struct list_head        dl_sched_link;
470         /** attributes of this rule */
471         struct lnet_fault_attr  dl_attr;
472         /** lock to protect \a below members */
473         spinlock_t              dl_lock;
474         /** refcount of delay rule */
475         atomic_t                dl_refcount;
476         /**
477          * the message sequence to delay, which means message is delayed when
478          * dl_stat.fs_count == dl_delay_at
479          */
480         unsigned long           dl_delay_at;
481         /**
482          * seconds to delay the next message, it's exclusive with dl_delay_at
483          */
484         time64_t                dl_delay_time;
485         /** baseline to caculate dl_delay_time */
486         time64_t                dl_time_base;
487         /** seconds until we send the next delayed message */
488         time64_t                dl_msg_send;
489         /** delayed message list */
490         struct list_head        dl_msg_list;
491         /** statistic of delayed messages */
492         struct lnet_fault_stat  dl_stat;
493         /** timer to wakeup delay_daemon */
494         struct timer_list       dl_timer;
495 };
496
497 struct delay_daemon_data {
498         /** serialise rule add/remove */
499         struct mutex            dd_mutex;
500         /** protect rules on \a dd_sched_rules */
501         spinlock_t              dd_lock;
502         /** scheduled delay rules (by timer) */
503         struct list_head        dd_sched_rules;
504         /** deamon thread sleeps at here */
505         wait_queue_head_t       dd_waitq;
506         /** controler (lctl command) wait at here */
507         wait_queue_head_t       dd_ctl_waitq;
508         /** deamon is running */
509         unsigned int            dd_running;
510         /** deamon stopped */
511         unsigned int            dd_stopped;
512 };
513
514 static struct delay_daemon_data delay_dd;
515
516 static void
517 delay_rule_decref(struct lnet_delay_rule *rule)
518 {
519         if (atomic_dec_and_test(&rule->dl_refcount)) {
520                 LASSERT(list_empty(&rule->dl_sched_link));
521                 LASSERT(list_empty(&rule->dl_msg_list));
522                 LASSERT(list_empty(&rule->dl_link));
523
524                 CFS_FREE_PTR(rule);
525         }
526 }
527
528 /**
529  * check source/destination NID, portal, message type and delay rate,
530  * decide whether should delay this message or not
531  */
532 static bool
533 delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
534                 lnet_nid_t dst, unsigned int type, unsigned int portal,
535                 struct lnet_msg *msg)
536 {
537         struct lnet_fault_attr *attr = &rule->dl_attr;
538         bool delay;
539         time64_t now = ktime_get_seconds();
540
541         if (!lnet_fault_attr_match(attr, src, LNET_NID_ANY,
542                                    dst, type, portal))
543                 return false;
544
545         /* match this rule, check delay rate now */
546         spin_lock(&rule->dl_lock);
547         if (rule->dl_delay_time != 0) { /* time based delay */
548                 rule->dl_stat.fs_count++;
549                 delay = now >= rule->dl_delay_time;
550                 if (delay) {
551                         if (now > rule->dl_time_base)
552                                 rule->dl_time_base = now;
553
554                         rule->dl_delay_time = rule->dl_time_base +
555                                               prandom_u32_max(attr->u.delay.la_interval);
556                         rule->dl_time_base += attr->u.delay.la_interval;
557
558                         CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lld\n",
559                                libcfs_nid2str(attr->fa_src),
560                                libcfs_nid2str(attr->fa_dst),
561                                rule->dl_delay_time);
562                 }
563
564         } else { /* rate based delay */
565                 __u64 count;
566
567                 delay = rule->dl_stat.fs_count++ == rule->dl_delay_at;
568                 /* generate the next random rate sequence */
569                 count = rule->dl_stat.fs_count;
570                 if (do_div(count, attr->u.delay.la_rate) == 0) {
571                         rule->dl_delay_at = rule->dl_stat.fs_count +
572                                             prandom_u32_max(attr->u.delay.la_rate);
573                         CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
574                                libcfs_nid2str(attr->fa_src),
575                                libcfs_nid2str(attr->fa_dst), rule->dl_delay_at);
576                 }
577         }
578
579         if (!delay) {
580                 spin_unlock(&rule->dl_lock);
581                 return false;
582         }
583
584         /* delay this message, update counters */
585         lnet_fault_stat_inc(&rule->dl_stat, type);
586         rule->dl_stat.u.delay.ls_delayed++;
587
588         list_add_tail(&msg->msg_list, &rule->dl_msg_list);
589         msg->msg_delay_send = now + attr->u.delay.la_latency;
590         if (rule->dl_msg_send == -1) {
591                 rule->dl_msg_send = msg->msg_delay_send;
592                 mod_timer(&rule->dl_timer,
593                           jiffies + cfs_time_seconds(attr->u.delay.la_latency));
594         }
595
596         spin_unlock(&rule->dl_lock);
597         return true;
598 }
599
600 /**
601  * check if \a msg can match any Delay Rule, receiving of this message
602  * will be delayed if there is a match.
603  */
604 bool
605 lnet_delay_rule_match_locked(struct lnet_hdr *hdr, struct lnet_msg *msg)
606 {
607         struct lnet_delay_rule  *rule;
608         lnet_nid_t               src = le64_to_cpu(hdr->src_nid);
609         lnet_nid_t               dst = le64_to_cpu(hdr->dest_nid);
610         unsigned int             typ = le32_to_cpu(hdr->type);
611         unsigned int             ptl = -1;
612
613         /* NB: called with hold of lnet_net_lock */
614
615         /* NB: if Portal is specified, then only PUT and GET will be
616          * filtered by delay rule */
617         if (typ == LNET_MSG_PUT)
618                 ptl = le32_to_cpu(hdr->msg.put.ptl_index);
619         else if (typ == LNET_MSG_GET)
620                 ptl = le32_to_cpu(hdr->msg.get.ptl_index);
621
622         list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
623                 if (delay_rule_match(rule, src, dst, typ, ptl, msg))
624                         return true;
625         }
626
627         return false;
628 }
629
630 /** check out delayed messages for send */
631 static void
632 delayed_msg_check(struct lnet_delay_rule *rule, bool all,
633                   struct list_head *msg_list)
634 {
635         struct lnet_msg *msg;
636         struct lnet_msg *tmp;
637         time64_t now = ktime_get_seconds();
638
639         if (!all && rule->dl_msg_send > now)
640                 return;
641
642         spin_lock(&rule->dl_lock);
643         list_for_each_entry_safe(msg, tmp, &rule->dl_msg_list, msg_list) {
644                 if (!all && msg->msg_delay_send > now)
645                         break;
646
647                 msg->msg_delay_send = 0;
648                 list_move_tail(&msg->msg_list, msg_list);
649         }
650
651         if (list_empty(&rule->dl_msg_list)) {
652                 del_timer(&rule->dl_timer);
653                 rule->dl_msg_send = -1;
654
655         } else if (!list_empty(msg_list)) {
656                 /* dequeued some timedout messages, update timer for the
657                  * next delayed message on rule */
658                 msg = list_entry(rule->dl_msg_list.next,
659                                  struct lnet_msg, msg_list);
660                 rule->dl_msg_send = msg->msg_delay_send;
661                 mod_timer(&rule->dl_timer,
662                           jiffies +
663                           cfs_time_seconds(msg->msg_delay_send - now));
664         }
665         spin_unlock(&rule->dl_lock);
666 }
667
668 static void
669 delayed_msg_process(struct list_head *msg_list, bool drop)
670 {
671         struct lnet_msg *msg;
672
673         while (!list_empty(msg_list)) {
674                 struct lnet_ni *ni;
675                 int             cpt;
676                 int             rc;
677
678                 msg = list_entry(msg_list->next, struct lnet_msg, msg_list);
679
680                 if (msg->msg_sending) {
681                         /* Delayed send */
682                         list_del_init(&msg->msg_list);
683                         ni = msg->msg_txni;
684                         CDEBUG(D_NET, "TRACE: msg %p %s -> %s : %s\n", msg,
685                                libcfs_nid2str(ni->ni_nid),
686                                libcfs_nid2str(msg->msg_txpeer->lpni_nid),
687                                lnet_msgtyp2str(msg->msg_type));
688                         lnet_ni_send(ni, msg);
689                         continue;
690                 }
691
692                 /* Delayed receive */
693                 LASSERT(msg->msg_rxpeer != NULL);
694                 LASSERT(msg->msg_rxni != NULL);
695
696                 ni = msg->msg_rxni;
697                 cpt = msg->msg_rx_cpt;
698
699                 list_del_init(&msg->msg_list);
700                 if (drop) {
701                         rc = -ECANCELED;
702
703                 } else if (!msg->msg_routing) {
704                         rc = lnet_parse_local(ni, msg);
705                         if (rc == 0)
706                                 continue;
707
708                 } else {
709                         lnet_net_lock(cpt);
710                         rc = lnet_parse_forward_locked(ni, msg);
711                         lnet_net_unlock(cpt);
712
713                         switch (rc) {
714                         case LNET_CREDIT_OK:
715                                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
716                                              0, msg->msg_len, msg->msg_len);
717                                 /* fallthrough */
718                         case LNET_CREDIT_WAIT:
719                                 continue;
720                         default: /* failures */
721                                 break;
722                         }
723                 }
724
725                 lnet_drop_message(ni, cpt, msg->msg_private, msg->msg_len,
726                                   msg->msg_type);
727                 lnet_finalize(msg, rc);
728         }
729 }
730
731 /**
732  * Process delayed messages for scheduled rules
733  * This function can either be called by delay_rule_daemon, or by lnet_finalise
734  */
735 void
736 lnet_delay_rule_check(void)
737 {
738         struct lnet_delay_rule *rule;
739         LIST_HEAD(msgs);
740
741         while (1) {
742                 if (list_empty(&delay_dd.dd_sched_rules))
743                         break;
744
745                 spin_lock_bh(&delay_dd.dd_lock);
746                 if (list_empty(&delay_dd.dd_sched_rules)) {
747                         spin_unlock_bh(&delay_dd.dd_lock);
748                         break;
749                 }
750
751                 rule = list_entry(delay_dd.dd_sched_rules.next,
752                                   struct lnet_delay_rule, dl_sched_link);
753                 list_del_init(&rule->dl_sched_link);
754                 spin_unlock_bh(&delay_dd.dd_lock);
755
756                 delayed_msg_check(rule, false, &msgs);
757                 delay_rule_decref(rule); /* -1 for delay_dd.dd_sched_rules */
758         }
759
760         if (!list_empty(&msgs))
761                 delayed_msg_process(&msgs, false);
762 }
763
764 /** deamon thread to handle delayed messages */
765 static int
766 lnet_delay_rule_daemon(void *arg)
767 {
768         delay_dd.dd_running = 1;
769         wake_up(&delay_dd.dd_ctl_waitq);
770
771         while (delay_dd.dd_running) {
772                 wait_event_interruptible(delay_dd.dd_waitq,
773                                          !delay_dd.dd_running ||
774                                          !list_empty(&delay_dd.dd_sched_rules));
775                 lnet_delay_rule_check();
776         }
777
778         /* in case more rules have been enqueued after my last check */
779         lnet_delay_rule_check();
780         delay_dd.dd_stopped = 1;
781         wake_up(&delay_dd.dd_ctl_waitq);
782
783         return 0;
784 }
785
786 static void
787 delay_timer_cb(cfs_timer_cb_arg_t data)
788 {
789         struct lnet_delay_rule *rule = cfs_from_timer(rule, data, dl_timer);
790
791         spin_lock_bh(&delay_dd.dd_lock);
792         if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
793                 atomic_inc(&rule->dl_refcount);
794                 list_add_tail(&rule->dl_sched_link, &delay_dd.dd_sched_rules);
795                 wake_up(&delay_dd.dd_waitq);
796         }
797         spin_unlock_bh(&delay_dd.dd_lock);
798 }
799
800 /**
801  * Add a new delay rule to LNet
802  * There is no check for duplicated delay rule, all rules will be checked for
803  * incoming message.
804  */
805 int
806 lnet_delay_rule_add(struct lnet_fault_attr *attr)
807 {
808         struct lnet_delay_rule *rule;
809         int                     rc = 0;
810         ENTRY;
811
812         if (!((attr->u.delay.la_rate == 0) ^
813               (attr->u.delay.la_interval == 0))) {
814                 CDEBUG(D_NET,
815                        "please provide either delay rate or delay interval, "
816                        "but not both at the same time %d/%d\n",
817                        attr->u.delay.la_rate, attr->u.delay.la_interval);
818                 RETURN(-EINVAL);
819         }
820
821         if (attr->u.delay.la_latency == 0) {
822                 CDEBUG(D_NET, "delay latency cannot be zero\n");
823                 RETURN(-EINVAL);
824         }
825
826         if (lnet_fault_attr_validate(attr) != 0)
827                 RETURN(-EINVAL);
828
829         CFS_ALLOC_PTR(rule);
830         if (rule == NULL)
831                 RETURN(-ENOMEM);
832
833         mutex_lock(&delay_dd.dd_mutex);
834         if (!delay_dd.dd_running) {
835                 struct task_struct *task;
836
837                 /* NB: although LND threads will process delayed message
838                  * in lnet_finalize, but there is no guarantee that LND
839                  * threads will be waken up if no other message needs to
840                  * be handled.
841                  * Only one daemon thread, performance is not the concern
842                  * of this simualation module.
843                  */
844                 task = kthread_run(lnet_delay_rule_daemon, NULL, "lnet_dd");
845                 if (IS_ERR(task)) {
846                         rc = PTR_ERR(task);
847                         GOTO(failed, rc);
848                 }
849                 wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
850         }
851
852         cfs_timer_setup(&rule->dl_timer, delay_timer_cb,
853                         (unsigned long)rule, 0);
854
855         spin_lock_init(&rule->dl_lock);
856         INIT_LIST_HEAD(&rule->dl_msg_list);
857         INIT_LIST_HEAD(&rule->dl_sched_link);
858
859         rule->dl_attr = *attr;
860         if (attr->u.delay.la_interval != 0) {
861                 rule->dl_time_base = ktime_get_seconds() +
862                                      attr->u.delay.la_interval;
863                 rule->dl_delay_time = ktime_get_seconds() +
864                                       prandom_u32_max(attr->u.delay.la_interval);
865         } else {
866                 rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
867         }
868
869         rule->dl_msg_send = -1;
870
871         lnet_net_lock(LNET_LOCK_EX);
872         atomic_set(&rule->dl_refcount, 1);
873         list_add(&rule->dl_link, &the_lnet.ln_delay_rules);
874         lnet_net_unlock(LNET_LOCK_EX);
875
876         CDEBUG(D_NET, "Added delay rule: src %s, dst %s, rate %d\n",
877                libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
878                attr->u.delay.la_rate);
879
880         mutex_unlock(&delay_dd.dd_mutex);
881         RETURN(0);
882  failed:
883         mutex_unlock(&delay_dd.dd_mutex);
884         CFS_FREE_PTR(rule);
885         return rc;
886 }
887
888 /**
889  * Remove matched Delay Rules from lnet, if \a shutdown is true or both \a src
890  * and \a dst are zero, all rules will be removed, otherwise only matched rules
891  * will be removed.
892  * If \a src is zero, then all rules have \a dst as destination will be remove
893  * If \a dst is zero, then all rules have \a src as source will be removed
894  *
895  * When a delay rule is removed, all delayed messages of this rule will be
896  * processed immediately.
897  */
898 int
899 lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown)
900 {
901         struct lnet_delay_rule *rule;
902         struct lnet_delay_rule *tmp;
903         LIST_HEAD(rule_list);
904         LIST_HEAD(msg_list);
905         int n = 0;
906         bool cleanup;
907         ENTRY;
908
909         if (shutdown)
910                 src = dst = 0;
911
912         mutex_lock(&delay_dd.dd_mutex);
913         lnet_net_lock(LNET_LOCK_EX);
914
915         list_for_each_entry_safe(rule, tmp, &the_lnet.ln_delay_rules, dl_link) {
916                 if (rule->dl_attr.fa_src != src && src != 0)
917                         continue;
918
919                 if (rule->dl_attr.fa_dst != dst && dst != 0)
920                         continue;
921
922                 CDEBUG(D_NET, "Remove delay rule: src %s->dst: %s (1/%d, %d)\n",
923                        libcfs_nid2str(rule->dl_attr.fa_src),
924                        libcfs_nid2str(rule->dl_attr.fa_dst),
925                        rule->dl_attr.u.delay.la_rate,
926                        rule->dl_attr.u.delay.la_interval);
927                 /* refcount is taken over by rule_list */
928                 list_move(&rule->dl_link, &rule_list);
929         }
930
931         /* check if we need to shutdown delay_daemon */
932         cleanup = list_empty(&the_lnet.ln_delay_rules) &&
933                   !list_empty(&rule_list);
934         lnet_net_unlock(LNET_LOCK_EX);
935
936         list_for_each_entry_safe(rule, tmp, &rule_list, dl_link) {
937                 list_del_init(&rule->dl_link);
938
939                 del_timer_sync(&rule->dl_timer);
940                 delayed_msg_check(rule, true, &msg_list);
941                 delay_rule_decref(rule); /* -1 for the_lnet.ln_delay_rules */
942                 n++;
943         }
944
945         if (cleanup) { /* no more delay rule, shutdown delay_daemon */
946                 LASSERT(delay_dd.dd_running);
947                 delay_dd.dd_running = 0;
948                 wake_up(&delay_dd.dd_waitq);
949
950                 while (!delay_dd.dd_stopped)
951                         wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_stopped);
952         }
953         mutex_unlock(&delay_dd.dd_mutex);
954
955         if (!list_empty(&msg_list))
956                 delayed_msg_process(&msg_list, shutdown);
957
958         RETURN(n);
959 }
960
961 /**
962  * List Delay Rule at position of \a pos
963  */
964 int
965 lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr,
966                     struct lnet_fault_stat *stat)
967 {
968         struct lnet_delay_rule *rule;
969         int                     cpt;
970         int                     i = 0;
971         int                     rc = -ENOENT;
972         ENTRY;
973
974         cpt = lnet_net_lock_current();
975         list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
976                 if (i++ < pos)
977                         continue;
978
979                 spin_lock(&rule->dl_lock);
980                 *attr = rule->dl_attr;
981                 *stat = rule->dl_stat;
982                 spin_unlock(&rule->dl_lock);
983                 rc = 0;
984                 break;
985         }
986
987         lnet_net_unlock(cpt);
988         RETURN(rc);
989 }
990
991 /**
992  * reset counters for all Delay Rules
993  */
994 void
995 lnet_delay_rule_reset(void)
996 {
997         struct lnet_delay_rule *rule;
998         int                     cpt;
999         ENTRY;
1000
1001         cpt = lnet_net_lock_current();
1002
1003         list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
1004                 struct lnet_fault_attr *attr = &rule->dl_attr;
1005
1006                 spin_lock(&rule->dl_lock);
1007
1008                 memset(&rule->dl_stat, 0, sizeof(rule->dl_stat));
1009                 if (attr->u.delay.la_rate != 0) {
1010                         rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
1011                 } else {
1012                         rule->dl_delay_time = ktime_get_seconds() +
1013                                               prandom_u32_max(attr->u.delay.la_interval);
1014                         rule->dl_time_base = ktime_get_seconds() +
1015                                              attr->u.delay.la_interval;
1016                 }
1017                 spin_unlock(&rule->dl_lock);
1018         }
1019
1020         lnet_net_unlock(cpt);
1021         EXIT;
1022 }
1023
1024 int
1025 lnet_fault_ctl(int opc, struct libcfs_ioctl_data *data)
1026 {
1027         struct lnet_fault_attr *attr;
1028         struct lnet_fault_stat *stat;
1029
1030         attr = (struct lnet_fault_attr *)data->ioc_inlbuf1;
1031
1032         switch (opc) {
1033         default:
1034                 return -EINVAL;
1035
1036         case LNET_CTL_DROP_ADD:
1037                 if (attr == NULL)
1038                         return -EINVAL;
1039
1040                 return lnet_drop_rule_add(attr);
1041
1042         case LNET_CTL_DROP_DEL:
1043                 if (attr == NULL)
1044                         return -EINVAL;
1045
1046                 data->ioc_count = lnet_drop_rule_del(attr->fa_src,
1047                                                      attr->fa_dst);
1048                 return 0;
1049
1050         case LNET_CTL_DROP_RESET:
1051                 lnet_drop_rule_reset();
1052                 return 0;
1053
1054         case LNET_CTL_DROP_LIST:
1055                 stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
1056                 if (attr == NULL || stat == NULL)
1057                         return -EINVAL;
1058
1059                 return lnet_drop_rule_list(data->ioc_count, attr, stat);
1060
1061         case LNET_CTL_DELAY_ADD:
1062                 if (attr == NULL)
1063                         return -EINVAL;
1064
1065                 return lnet_delay_rule_add(attr);
1066
1067         case LNET_CTL_DELAY_DEL:
1068                 if (attr == NULL)
1069                         return -EINVAL;
1070
1071                 data->ioc_count = lnet_delay_rule_del(attr->fa_src,
1072                                                       attr->fa_dst, false);
1073                 return 0;
1074
1075         case LNET_CTL_DELAY_RESET:
1076                 lnet_delay_rule_reset();
1077                 return 0;
1078
1079         case LNET_CTL_DELAY_LIST:
1080                 stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
1081                 if (attr == NULL || stat == NULL)
1082                         return -EINVAL;
1083
1084                 return lnet_delay_rule_list(data->ioc_count, attr, stat);
1085         }
1086 }
1087
1088 int
1089 lnet_fault_init(void)
1090 {
1091         BUILD_BUG_ON(LNET_PUT_BIT != BIT(LNET_MSG_PUT));
1092         BUILD_BUG_ON(LNET_ACK_BIT != BIT(LNET_MSG_ACK));
1093         BUILD_BUG_ON(LNET_GET_BIT != BIT(LNET_MSG_GET));
1094         BUILD_BUG_ON(LNET_REPLY_BIT != BIT(LNET_MSG_REPLY));
1095
1096         mutex_init(&delay_dd.dd_mutex);
1097         spin_lock_init(&delay_dd.dd_lock);
1098         init_waitqueue_head(&delay_dd.dd_waitq);
1099         init_waitqueue_head(&delay_dd.dd_ctl_waitq);
1100         INIT_LIST_HEAD(&delay_dd.dd_sched_rules);
1101
1102         return 0;
1103 }
1104
1105 void
1106 lnet_fault_fini(void)
1107 {
1108         lnet_drop_rule_del(0, 0);
1109         lnet_delay_rule_del(0, 0, true);
1110
1111         LASSERT(list_empty(&the_lnet.ln_drop_rules));
1112         LASSERT(list_empty(&the_lnet.ln_delay_rules));
1113         LASSERT(list_empty(&delay_dd.dd_sched_rules));
1114 }