Whamcloud - gitweb
LU-12923 lustre: Replace CLASSERT() with BUILD_BUG_ON()
[fs/lustre-release.git] / lnet / lnet / net_fault.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; if not, write to the
18  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19  * Boston, MA 021110-1307, USA
20  *
21  * GPL HEADER END
22  */
23 /*
24  * Copyright (c) 2014, 2017, Intel Corporation.
25  */
26 /*
27  * This file is part of Lustre, http://www.lustre.org/
28  * Lustre is a trademark of Sun Microsystems, Inc.
29  *
30  * lnet/lnet/net_fault.c
31  *
32  * Lustre network fault simulation
33  *
34  * Author: liang.zhen@intel.com
35  */
36
37 #define DEBUG_SUBSYSTEM S_LNET
38
39 #include <linux/random.h>
40 #include <lnet/lib-lnet.h>
41 #include <uapi/linux/lnet/lnetctl.h>
42
43 #define LNET_MSG_MASK           (LNET_PUT_BIT | LNET_ACK_BIT | \
44                                  LNET_GET_BIT | LNET_REPLY_BIT)
45
46 struct lnet_drop_rule {
47         /** link chain on the_lnet.ln_drop_rules */
48         struct list_head        dr_link;
49         /** attributes of this rule */
50         struct lnet_fault_attr  dr_attr;
51         /** lock to protect \a dr_drop_at and \a dr_stat */
52         spinlock_t              dr_lock;
53         /**
54          * the message sequence to drop, which means message is dropped when
55          * dr_stat.drs_count == dr_drop_at
56          */
57         unsigned long           dr_drop_at;
58         /**
59          * seconds to drop the next message, it's exclusive with dr_drop_at
60          */
61         time64_t                dr_drop_time;
62         /** baseline to caculate dr_drop_time */
63         time64_t                dr_time_base;
64         /** statistic of dropped messages */
65         struct lnet_fault_stat  dr_stat;
66 };
67
68 static bool
69 lnet_fault_nid_match(lnet_nid_t nid, lnet_nid_t msg_nid)
70 {
71         if (nid == msg_nid || nid == LNET_NID_ANY)
72                 return true;
73
74         if (LNET_NIDNET(nid) != LNET_NIDNET(msg_nid))
75                 return false;
76
77         /* 255.255.255.255@net is wildcard for all addresses in a network */
78         return LNET_NIDADDR(nid) == LNET_NIDADDR(LNET_NID_ANY);
79 }
80
81 static bool
82 lnet_fault_attr_match(struct lnet_fault_attr *attr, lnet_nid_t src,
83                       lnet_nid_t local_nid, lnet_nid_t dst,
84                       unsigned int type, unsigned int portal)
85 {
86         if (!lnet_fault_nid_match(attr->fa_src, src) ||
87             !lnet_fault_nid_match(attr->fa_dst, dst) ||
88             !lnet_fault_nid_match(attr->fa_local_nid, local_nid))
89                 return false;
90
91         if (!(attr->fa_msg_mask & (1 << type)))
92                 return false;
93
94         /* NB: ACK and REPLY have no portal, but they should have been
95          * rejected by message mask */
96         if (attr->fa_ptl_mask != 0 && /* has portal filter */
97             !(attr->fa_ptl_mask & (1ULL << portal)))
98                 return false;
99
100         return true;
101 }
102
103 static int
104 lnet_fault_attr_validate(struct lnet_fault_attr *attr)
105 {
106         if (attr->fa_msg_mask == 0)
107                 attr->fa_msg_mask = LNET_MSG_MASK; /* all message types */
108
109         if (attr->fa_ptl_mask == 0) /* no portal filter */
110                 return 0;
111
112         /* NB: only PUT and GET can be filtered if portal filter has been set */
113         attr->fa_msg_mask &= LNET_GET_BIT | LNET_PUT_BIT;
114         if (attr->fa_msg_mask == 0) {
115                 CDEBUG(D_NET, "can't find valid message type bits %x\n",
116                        attr->fa_msg_mask);
117                 return -EINVAL;
118         }
119         return 0;
120 }
121
122 static void
123 lnet_fault_stat_inc(struct lnet_fault_stat *stat, unsigned int type)
124 {
125         /* NB: fs_counter is NOT updated by this function */
126         switch (type) {
127         case LNET_MSG_PUT:
128                 stat->fs_put++;
129                 return;
130         case LNET_MSG_ACK:
131                 stat->fs_ack++;
132                 return;
133         case LNET_MSG_GET:
134                 stat->fs_get++;
135                 return;
136         case LNET_MSG_REPLY:
137                 stat->fs_reply++;
138                 return;
139         }
140 }
141
142 /**
143  * LNet message drop simulation
144  */
145
146 /**
147  * Add a new drop rule to LNet
148  * There is no check for duplicated drop rule, all rules will be checked for
149  * incoming message.
150  */
151 static int
152 lnet_drop_rule_add(struct lnet_fault_attr *attr)
153 {
154         struct lnet_drop_rule *rule;
155         ENTRY;
156
157         if (!((attr->u.drop.da_rate == 0) ^ (attr->u.drop.da_interval == 0))) {
158                 CDEBUG(D_NET,
159                        "please provide either drop rate or drop interval, "
160                        "but not both at the same time %d/%d\n",
161                        attr->u.drop.da_rate, attr->u.drop.da_interval);
162                 RETURN(-EINVAL);
163         }
164
165         if (lnet_fault_attr_validate(attr) != 0)
166                 RETURN(-EINVAL);
167
168         CFS_ALLOC_PTR(rule);
169         if (rule == NULL)
170                 RETURN(-ENOMEM);
171
172         spin_lock_init(&rule->dr_lock);
173
174         rule->dr_attr = *attr;
175         if (attr->u.drop.da_interval != 0) {
176                 rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
177                 rule->dr_drop_time = ktime_get_seconds() +
178                                      prandom_u32_max(attr->u.drop.da_interval);
179         } else {
180                 rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
181         }
182
183         lnet_net_lock(LNET_LOCK_EX);
184         list_add(&rule->dr_link, &the_lnet.ln_drop_rules);
185         lnet_net_unlock(LNET_LOCK_EX);
186
187         CDEBUG(D_NET, "Added drop rule: src %s, dst %s, rate %d, interval %d\n",
188                libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
189                attr->u.drop.da_rate, attr->u.drop.da_interval);
190         RETURN(0);
191 }
192
193 /**
194  * Remove matched drop rules from lnet, all rules that can match \a src and
195  * \a dst will be removed.
196  * If \a src is zero, then all rules have \a dst as destination will be remove
197  * If \a dst is zero, then all rules have \a src as source will be removed
198  * If both of them are zero, all rules will be removed
199  */
200 static int
201 lnet_drop_rule_del(lnet_nid_t src, lnet_nid_t dst)
202 {
203         struct lnet_drop_rule *rule;
204         struct lnet_drop_rule *tmp;
205         struct list_head       zombies;
206         int                    n = 0;
207         ENTRY;
208
209         INIT_LIST_HEAD(&zombies);
210
211         lnet_net_lock(LNET_LOCK_EX);
212         list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) {
213                 if (rule->dr_attr.fa_src != src && src != 0)
214                         continue;
215
216                 if (rule->dr_attr.fa_dst != dst && dst != 0)
217                         continue;
218
219                 list_move(&rule->dr_link, &zombies);
220         }
221         lnet_net_unlock(LNET_LOCK_EX);
222
223         list_for_each_entry_safe(rule, tmp, &zombies, dr_link) {
224                 CDEBUG(D_NET, "Remove drop rule: src %s->dst: %s (1/%d, %d)\n",
225                        libcfs_nid2str(rule->dr_attr.fa_src),
226                        libcfs_nid2str(rule->dr_attr.fa_dst),
227                        rule->dr_attr.u.drop.da_rate,
228                        rule->dr_attr.u.drop.da_interval);
229
230                 list_del(&rule->dr_link);
231                 CFS_FREE_PTR(rule);
232                 n++;
233         }
234
235         RETURN(n);
236 }
237
238 /**
239  * List drop rule at position of \a pos
240  */
241 static int
242 lnet_drop_rule_list(int pos, struct lnet_fault_attr *attr,
243                     struct lnet_fault_stat *stat)
244 {
245         struct lnet_drop_rule *rule;
246         int                    cpt;
247         int                    i = 0;
248         int                    rc = -ENOENT;
249         ENTRY;
250
251         cpt = lnet_net_lock_current();
252         list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
253                 if (i++ < pos)
254                         continue;
255
256                 spin_lock(&rule->dr_lock);
257                 *attr = rule->dr_attr;
258                 *stat = rule->dr_stat;
259                 spin_unlock(&rule->dr_lock);
260                 rc = 0;
261                 break;
262         }
263
264         lnet_net_unlock(cpt);
265         RETURN(rc);
266 }
267
268 /**
269  * reset counters for all drop rules
270  */
271 static void
272 lnet_drop_rule_reset(void)
273 {
274         struct lnet_drop_rule *rule;
275         int                    cpt;
276         ENTRY;
277
278         cpt = lnet_net_lock_current();
279
280         list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
281                 struct lnet_fault_attr *attr = &rule->dr_attr;
282
283                 spin_lock(&rule->dr_lock);
284
285                 memset(&rule->dr_stat, 0, sizeof(rule->dr_stat));
286                 if (attr->u.drop.da_rate != 0) {
287                         rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
288                 } else {
289                         rule->dr_drop_time = ktime_get_seconds() +
290                                              prandom_u32_max(attr->u.drop.da_interval);
291                         rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
292                 }
293                 spin_unlock(&rule->dr_lock);
294         }
295
296         lnet_net_unlock(cpt);
297         EXIT;
298 }
299
300 static void
301 lnet_fault_match_health(enum lnet_msg_hstatus *hstatus, __u32 mask)
302 {
303         int choice;
304         int delta;
305         int best_delta;
306         int i;
307
308         /* assign a random failure */
309         choice = prandom_u32_max(LNET_MSG_STATUS_END - LNET_MSG_STATUS_OK);
310         if (choice == 0)
311                 choice++;
312
313         if (mask == HSTATUS_RANDOM) {
314                 *hstatus = choice;
315                 return;
316         }
317
318         if (mask & (1 << choice)) {
319                 *hstatus = choice;
320                 return;
321         }
322
323         /* round to the closest ON bit */
324         i = HSTATUS_END;
325         best_delta = HSTATUS_END;
326         while (i > 0) {
327                 if (mask & (1 << i)) {
328                         delta = choice - i;
329                         if (delta < 0)
330                                 delta *= -1;
331                         if (delta < best_delta) {
332                                 best_delta = delta;
333                                 choice = i;
334                         }
335                 }
336                 i--;
337         }
338
339         *hstatus = choice;
340 }
341
342 /**
343  * check source/destination NID, portal, message type and drop rate,
344  * decide whether should drop this message or not
345  */
346 static bool
347 drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
348                 lnet_nid_t local_nid, lnet_nid_t dst,
349                 unsigned int type, unsigned int portal,
350                 enum lnet_msg_hstatus *hstatus)
351 {
352         struct lnet_fault_attr  *attr = &rule->dr_attr;
353         bool                     drop;
354
355         if (!lnet_fault_attr_match(attr, src, local_nid, dst, type, portal))
356                 return false;
357
358         if (attr->u.drop.da_drop_all) {
359                 CDEBUG(D_NET, "set to drop all messages\n");
360                 drop = true;
361                 goto drop_matched;
362         }
363
364         /*
365          * if we're trying to match a health status error but it hasn't
366          * been set in the rule, then don't match
367          */
368         if ((hstatus && !attr->u.drop.da_health_error_mask) ||
369             (!hstatus && attr->u.drop.da_health_error_mask))
370                 return false;
371
372         /* match this rule, check drop rate now */
373         spin_lock(&rule->dr_lock);
374         if (attr->u.drop.da_random) {
375                 int value = prandom_u32_max(attr->u.drop.da_interval);
376                 if (value >= (attr->u.drop.da_interval / 2))
377                         drop = true;
378                 else
379                         drop = false;
380         } else if (rule->dr_drop_time != 0) { /* time based drop */
381                 time64_t now = ktime_get_seconds();
382
383                 rule->dr_stat.fs_count++;
384                 drop = now >= rule->dr_drop_time;
385                 if (drop) {
386                         if (now > rule->dr_time_base)
387                                 rule->dr_time_base = now;
388
389                         rule->dr_drop_time = rule->dr_time_base +
390                                              prandom_u32_max(attr->u.drop.da_interval);
391                         rule->dr_time_base += attr->u.drop.da_interval;
392
393                         CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lld\n",
394                                libcfs_nid2str(attr->fa_src),
395                                libcfs_nid2str(attr->fa_dst),
396                                rule->dr_drop_time);
397                 }
398
399         } else { /* rate based drop */
400                 __u64 count;
401
402                 drop = rule->dr_stat.fs_count++ == rule->dr_drop_at;
403                 count = rule->dr_stat.fs_count;
404                 if (do_div(count, attr->u.drop.da_rate) == 0) {
405                         rule->dr_drop_at = rule->dr_stat.fs_count +
406                                            prandom_u32_max(attr->u.drop.da_rate);
407                         CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
408                                libcfs_nid2str(attr->fa_src),
409                                libcfs_nid2str(attr->fa_dst), rule->dr_drop_at);
410                 }
411         }
412
413 drop_matched:
414
415         if (drop) { /* drop this message, update counters */
416                 if (hstatus)
417                         lnet_fault_match_health(hstatus,
418                                 attr->u.drop.da_health_error_mask);
419                 lnet_fault_stat_inc(&rule->dr_stat, type);
420                 rule->dr_stat.u.drop.ds_dropped++;
421         }
422
423         spin_unlock(&rule->dr_lock);
424         return drop;
425 }
426
427 /**
428  * Check if message from \a src to \a dst can match any existed drop rule
429  */
430 bool
431 lnet_drop_rule_match(struct lnet_hdr *hdr,
432                      lnet_nid_t local_nid,
433                      enum lnet_msg_hstatus *hstatus)
434 {
435         lnet_nid_t src = le64_to_cpu(hdr->src_nid);
436         lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
437         unsigned int typ = le32_to_cpu(hdr->type);
438         struct lnet_drop_rule *rule;
439         unsigned int ptl = -1;
440         bool drop = false;
441         int cpt;
442
443         /* NB: if Portal is specified, then only PUT and GET will be
444          * filtered by drop rule */
445         if (typ == LNET_MSG_PUT)
446                 ptl = le32_to_cpu(hdr->msg.put.ptl_index);
447         else if (typ == LNET_MSG_GET)
448                 ptl = le32_to_cpu(hdr->msg.get.ptl_index);
449
450         cpt = lnet_net_lock_current();
451         list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
452                 drop = drop_rule_match(rule, src, local_nid, dst, typ, ptl,
453                                        hstatus);
454                 if (drop)
455                         break;
456         }
457         lnet_net_unlock(cpt);
458
459         return drop;
460 }
461
462 /**
463  * LNet Delay Simulation
464  */
465 /** timestamp (second) to send delayed message */
466 #define msg_delay_send           msg_ev.hdr_data
467
468 struct lnet_delay_rule {
469         /** link chain on the_lnet.ln_delay_rules */
470         struct list_head        dl_link;
471         /** link chain on delay_dd.dd_sched_rules */
472         struct list_head        dl_sched_link;
473         /** attributes of this rule */
474         struct lnet_fault_attr  dl_attr;
475         /** lock to protect \a below members */
476         spinlock_t              dl_lock;
477         /** refcount of delay rule */
478         atomic_t                dl_refcount;
479         /**
480          * the message sequence to delay, which means message is delayed when
481          * dl_stat.fs_count == dl_delay_at
482          */
483         unsigned long           dl_delay_at;
484         /**
485          * seconds to delay the next message, it's exclusive with dl_delay_at
486          */
487         time64_t                dl_delay_time;
488         /** baseline to caculate dl_delay_time */
489         time64_t                dl_time_base;
490         /** seconds until we send the next delayed message */
491         time64_t                dl_msg_send;
492         /** delayed message list */
493         struct list_head        dl_msg_list;
494         /** statistic of delayed messages */
495         struct lnet_fault_stat  dl_stat;
496         /** timer to wakeup delay_daemon */
497         struct timer_list       dl_timer;
498 };
499
500 struct delay_daemon_data {
501         /** serialise rule add/remove */
502         struct mutex            dd_mutex;
503         /** protect rules on \a dd_sched_rules */
504         spinlock_t              dd_lock;
505         /** scheduled delay rules (by timer) */
506         struct list_head        dd_sched_rules;
507         /** deamon thread sleeps at here */
508         wait_queue_head_t       dd_waitq;
509         /** controler (lctl command) wait at here */
510         wait_queue_head_t       dd_ctl_waitq;
511         /** deamon is running */
512         unsigned int            dd_running;
513         /** deamon stopped */
514         unsigned int            dd_stopped;
515 };
516
517 static struct delay_daemon_data delay_dd;
518
519 static void
520 delay_rule_decref(struct lnet_delay_rule *rule)
521 {
522         if (atomic_dec_and_test(&rule->dl_refcount)) {
523                 LASSERT(list_empty(&rule->dl_sched_link));
524                 LASSERT(list_empty(&rule->dl_msg_list));
525                 LASSERT(list_empty(&rule->dl_link));
526
527                 CFS_FREE_PTR(rule);
528         }
529 }
530
531 /**
532  * check source/destination NID, portal, message type and delay rate,
533  * decide whether should delay this message or not
534  */
535 static bool
536 delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
537                 lnet_nid_t dst, unsigned int type, unsigned int portal,
538                 struct lnet_msg *msg)
539 {
540         struct lnet_fault_attr  *attr = &rule->dl_attr;
541         bool                     delay;
542
543         if (!lnet_fault_attr_match(attr, src, LNET_NID_ANY,
544                                    dst, type, portal))
545                 return false;
546
547         /* match this rule, check delay rate now */
548         spin_lock(&rule->dl_lock);
549         if (rule->dl_delay_time != 0) { /* time based delay */
550                 time64_t now = ktime_get_seconds();
551
552                 rule->dl_stat.fs_count++;
553                 delay = now >= rule->dl_delay_time;
554                 if (delay) {
555                         if (now > rule->dl_time_base)
556                                 rule->dl_time_base = now;
557
558                         rule->dl_delay_time = rule->dl_time_base +
559                                               prandom_u32_max(attr->u.delay.la_interval);
560                         rule->dl_time_base += attr->u.delay.la_interval;
561
562                         CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lld\n",
563                                libcfs_nid2str(attr->fa_src),
564                                libcfs_nid2str(attr->fa_dst),
565                                rule->dl_delay_time);
566                 }
567
568         } else { /* rate based delay */
569                 __u64 count;
570
571                 delay = rule->dl_stat.fs_count++ == rule->dl_delay_at;
572                 /* generate the next random rate sequence */
573                 count = rule->dl_stat.fs_count;
574                 if (do_div(count, attr->u.delay.la_rate) == 0) {
575                         rule->dl_delay_at = rule->dl_stat.fs_count +
576                                             prandom_u32_max(attr->u.delay.la_rate);
577                         CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
578                                libcfs_nid2str(attr->fa_src),
579                                libcfs_nid2str(attr->fa_dst), rule->dl_delay_at);
580                 }
581         }
582
583         if (!delay) {
584                 spin_unlock(&rule->dl_lock);
585                 return false;
586         }
587
588         /* delay this message, update counters */
589         lnet_fault_stat_inc(&rule->dl_stat, type);
590         rule->dl_stat.u.delay.ls_delayed++;
591
592         list_add_tail(&msg->msg_list, &rule->dl_msg_list);
593         msg->msg_delay_send = ktime_get_seconds() + attr->u.delay.la_latency;
594         if (rule->dl_msg_send == -1) {
595                 rule->dl_msg_send = msg->msg_delay_send;
596                 mod_timer(&rule->dl_timer,
597                           jiffies + cfs_time_seconds(rule->dl_msg_send));
598         }
599
600         spin_unlock(&rule->dl_lock);
601         return true;
602 }
603
604 /**
605  * check if \a msg can match any Delay Rule, receiving of this message
606  * will be delayed if there is a match.
607  */
608 bool
609 lnet_delay_rule_match_locked(struct lnet_hdr *hdr, struct lnet_msg *msg)
610 {
611         struct lnet_delay_rule  *rule;
612         lnet_nid_t               src = le64_to_cpu(hdr->src_nid);
613         lnet_nid_t               dst = le64_to_cpu(hdr->dest_nid);
614         unsigned int             typ = le32_to_cpu(hdr->type);
615         unsigned int             ptl = -1;
616
617         /* NB: called with hold of lnet_net_lock */
618
619         /* NB: if Portal is specified, then only PUT and GET will be
620          * filtered by delay rule */
621         if (typ == LNET_MSG_PUT)
622                 ptl = le32_to_cpu(hdr->msg.put.ptl_index);
623         else if (typ == LNET_MSG_GET)
624                 ptl = le32_to_cpu(hdr->msg.get.ptl_index);
625
626         list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
627                 if (delay_rule_match(rule, src, dst, typ, ptl, msg))
628                         return true;
629         }
630
631         return false;
632 }
633
634 /** check out delayed messages for send */
635 static void
636 delayed_msg_check(struct lnet_delay_rule *rule, bool all,
637                   struct list_head *msg_list)
638 {
639         struct lnet_msg *msg;
640         struct lnet_msg *tmp;
641         time64_t now = ktime_get_seconds();
642
643         if (!all && cfs_time_seconds(rule->dl_msg_send) > now)
644                 return;
645
646         spin_lock(&rule->dl_lock);
647         list_for_each_entry_safe(msg, tmp, &rule->dl_msg_list, msg_list) {
648                 if (!all && msg->msg_delay_send > now)
649                         break;
650
651                 msg->msg_delay_send = 0;
652                 list_move_tail(&msg->msg_list, msg_list);
653         }
654
655         if (list_empty(&rule->dl_msg_list)) {
656                 del_timer(&rule->dl_timer);
657                 rule->dl_msg_send = -1;
658
659         } else if (!list_empty(msg_list)) {
660                 /* dequeued some timedout messages, update timer for the
661                  * next delayed message on rule */
662                 msg = list_entry(rule->dl_msg_list.next,
663                                  struct lnet_msg, msg_list);
664                 rule->dl_msg_send = msg->msg_delay_send;
665                 mod_timer(&rule->dl_timer,
666                           jiffies + cfs_time_seconds(rule->dl_msg_send));
667         }
668         spin_unlock(&rule->dl_lock);
669 }
670
671 static void
672 delayed_msg_process(struct list_head *msg_list, bool drop)
673 {
674         struct lnet_msg *msg;
675
676         while (!list_empty(msg_list)) {
677                 struct lnet_ni *ni;
678                 int             cpt;
679                 int             rc;
680
681                 msg = list_entry(msg_list->next, struct lnet_msg, msg_list);
682                 LASSERT(msg->msg_rxpeer != NULL);
683                 LASSERT(msg->msg_rxni != NULL);
684
685                 ni = msg->msg_rxni;
686                 cpt = msg->msg_rx_cpt;
687
688                 list_del_init(&msg->msg_list);
689                 if (drop) {
690                         rc = -ECANCELED;
691
692                 } else if (!msg->msg_routing) {
693                         rc = lnet_parse_local(ni, msg);
694                         if (rc == 0)
695                                 continue;
696
697                 } else {
698                         lnet_net_lock(cpt);
699                         rc = lnet_parse_forward_locked(ni, msg);
700                         lnet_net_unlock(cpt);
701
702                         switch (rc) {
703                         case LNET_CREDIT_OK:
704                                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
705                                              0, msg->msg_len, msg->msg_len);
706                         case LNET_CREDIT_WAIT:
707                                 continue;
708                         default: /* failures */
709                                 break;
710                         }
711                 }
712
713                 lnet_drop_message(ni, cpt, msg->msg_private, msg->msg_len,
714                                   msg->msg_type);
715                 lnet_finalize(msg, rc);
716         }
717 }
718
719 /**
720  * Process delayed messages for scheduled rules
721  * This function can either be called by delay_rule_daemon, or by lnet_finalise
722  */
723 void
724 lnet_delay_rule_check(void)
725 {
726         struct lnet_delay_rule  *rule;
727         struct list_head         msgs;
728
729         INIT_LIST_HEAD(&msgs);
730         while (1) {
731                 if (list_empty(&delay_dd.dd_sched_rules))
732                         break;
733
734                 spin_lock_bh(&delay_dd.dd_lock);
735                 if (list_empty(&delay_dd.dd_sched_rules)) {
736                         spin_unlock_bh(&delay_dd.dd_lock);
737                         break;
738                 }
739
740                 rule = list_entry(delay_dd.dd_sched_rules.next,
741                                   struct lnet_delay_rule, dl_sched_link);
742                 list_del_init(&rule->dl_sched_link);
743                 spin_unlock_bh(&delay_dd.dd_lock);
744
745                 delayed_msg_check(rule, false, &msgs);
746                 delay_rule_decref(rule); /* -1 for delay_dd.dd_sched_rules */
747         }
748
749         if (!list_empty(&msgs))
750                 delayed_msg_process(&msgs, false);
751 }
752
753 /** deamon thread to handle delayed messages */
754 static int
755 lnet_delay_rule_daemon(void *arg)
756 {
757         delay_dd.dd_running = 1;
758         wake_up(&delay_dd.dd_ctl_waitq);
759
760         while (delay_dd.dd_running) {
761                 wait_event_interruptible(delay_dd.dd_waitq,
762                                          !delay_dd.dd_running ||
763                                          !list_empty(&delay_dd.dd_sched_rules));
764                 lnet_delay_rule_check();
765         }
766
767         /* in case more rules have been enqueued after my last check */
768         lnet_delay_rule_check();
769         delay_dd.dd_stopped = 1;
770         wake_up(&delay_dd.dd_ctl_waitq);
771
772         return 0;
773 }
774
775 static void
776 delay_timer_cb(cfs_timer_cb_arg_t data)
777 {
778         struct lnet_delay_rule *rule = cfs_from_timer(rule, data, dl_timer);
779
780         spin_lock_bh(&delay_dd.dd_lock);
781         if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
782                 atomic_inc(&rule->dl_refcount);
783                 list_add_tail(&rule->dl_sched_link, &delay_dd.dd_sched_rules);
784                 wake_up(&delay_dd.dd_waitq);
785         }
786         spin_unlock_bh(&delay_dd.dd_lock);
787 }
788
789 /**
790  * Add a new delay rule to LNet
791  * There is no check for duplicated delay rule, all rules will be checked for
792  * incoming message.
793  */
794 int
795 lnet_delay_rule_add(struct lnet_fault_attr *attr)
796 {
797         struct lnet_delay_rule *rule;
798         int                     rc = 0;
799         ENTRY;
800
801         if (!((attr->u.delay.la_rate == 0) ^
802               (attr->u.delay.la_interval == 0))) {
803                 CDEBUG(D_NET,
804                        "please provide either delay rate or delay interval, "
805                        "but not both at the same time %d/%d\n",
806                        attr->u.delay.la_rate, attr->u.delay.la_interval);
807                 RETURN(-EINVAL);
808         }
809
810         if (attr->u.delay.la_latency == 0) {
811                 CDEBUG(D_NET, "delay latency cannot be zero\n");
812                 RETURN(-EINVAL);
813         }
814
815         if (lnet_fault_attr_validate(attr) != 0)
816                 RETURN(-EINVAL);
817
818         CFS_ALLOC_PTR(rule);
819         if (rule == NULL)
820                 RETURN(-ENOMEM);
821
822         mutex_lock(&delay_dd.dd_mutex);
823         if (!delay_dd.dd_running) {
824                 struct task_struct *task;
825
826                 /* NB: although LND threads will process delayed message
827                  * in lnet_finalize, but there is no guarantee that LND
828                  * threads will be waken up if no other message needs to
829                  * be handled.
830                  * Only one daemon thread, performance is not the concern
831                  * of this simualation module.
832                  */
833                 task = kthread_run(lnet_delay_rule_daemon, NULL, "lnet_dd");
834                 if (IS_ERR(task)) {
835                         rc = PTR_ERR(task);
836                         GOTO(failed, rc);
837                 }
838                 wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
839         }
840
841         cfs_timer_setup(&rule->dl_timer, delay_timer_cb,
842                         (unsigned long)rule, 0);
843
844         spin_lock_init(&rule->dl_lock);
845         INIT_LIST_HEAD(&rule->dl_msg_list);
846         INIT_LIST_HEAD(&rule->dl_sched_link);
847
848         rule->dl_attr = *attr;
849         if (attr->u.delay.la_interval != 0) {
850                 rule->dl_time_base = ktime_get_seconds() +
851                                      attr->u.delay.la_interval;
852                 rule->dl_delay_time = ktime_get_seconds() +
853                                       prandom_u32_max(attr->u.delay.la_interval);
854         } else {
855                 rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
856         }
857
858         rule->dl_msg_send = -1;
859
860         lnet_net_lock(LNET_LOCK_EX);
861         atomic_set(&rule->dl_refcount, 1);
862         list_add(&rule->dl_link, &the_lnet.ln_delay_rules);
863         lnet_net_unlock(LNET_LOCK_EX);
864
865         CDEBUG(D_NET, "Added delay rule: src %s, dst %s, rate %d\n",
866                libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
867                attr->u.delay.la_rate);
868
869         mutex_unlock(&delay_dd.dd_mutex);
870         RETURN(0);
871  failed:
872         mutex_unlock(&delay_dd.dd_mutex);
873         CFS_FREE_PTR(rule);
874         return rc;
875 }
876
877 /**
878  * Remove matched Delay Rules from lnet, if \a shutdown is true or both \a src
879  * and \a dst are zero, all rules will be removed, otherwise only matched rules
880  * will be removed.
881  * If \a src is zero, then all rules have \a dst as destination will be remove
882  * If \a dst is zero, then all rules have \a src as source will be removed
883  *
884  * When a delay rule is removed, all delayed messages of this rule will be
885  * processed immediately.
886  */
887 int
888 lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown)
889 {
890         struct lnet_delay_rule *rule;
891         struct lnet_delay_rule  *tmp;
892         struct list_head        rule_list;
893         struct list_head        msg_list;
894         int                     n = 0;
895         bool                    cleanup;
896         ENTRY;
897
898         INIT_LIST_HEAD(&rule_list);
899         INIT_LIST_HEAD(&msg_list);
900
901         if (shutdown)
902                 src = dst = 0;
903
904         mutex_lock(&delay_dd.dd_mutex);
905         lnet_net_lock(LNET_LOCK_EX);
906
907         list_for_each_entry_safe(rule, tmp, &the_lnet.ln_delay_rules, dl_link) {
908                 if (rule->dl_attr.fa_src != src && src != 0)
909                         continue;
910
911                 if (rule->dl_attr.fa_dst != dst && dst != 0)
912                         continue;
913
914                 CDEBUG(D_NET, "Remove delay rule: src %s->dst: %s (1/%d, %d)\n",
915                        libcfs_nid2str(rule->dl_attr.fa_src),
916                        libcfs_nid2str(rule->dl_attr.fa_dst),
917                        rule->dl_attr.u.delay.la_rate,
918                        rule->dl_attr.u.delay.la_interval);
919                 /* refcount is taken over by rule_list */
920                 list_move(&rule->dl_link, &rule_list);
921         }
922
923         /* check if we need to shutdown delay_daemon */
924         cleanup = list_empty(&the_lnet.ln_delay_rules) &&
925                   !list_empty(&rule_list);
926         lnet_net_unlock(LNET_LOCK_EX);
927
928         list_for_each_entry_safe(rule, tmp, &rule_list, dl_link) {
929                 list_del_init(&rule->dl_link);
930
931                 del_timer_sync(&rule->dl_timer);
932                 delayed_msg_check(rule, true, &msg_list);
933                 delay_rule_decref(rule); /* -1 for the_lnet.ln_delay_rules */
934                 n++;
935         }
936
937         if (cleanup) { /* no more delay rule, shutdown delay_daemon */
938                 LASSERT(delay_dd.dd_running);
939                 delay_dd.dd_running = 0;
940                 wake_up(&delay_dd.dd_waitq);
941
942                 while (!delay_dd.dd_stopped)
943                         wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_stopped);
944         }
945         mutex_unlock(&delay_dd.dd_mutex);
946
947         if (!list_empty(&msg_list))
948                 delayed_msg_process(&msg_list, shutdown);
949
950         RETURN(n);
951 }
952
953 /**
954  * List Delay Rule at position of \a pos
955  */
956 int
957 lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr,
958                     struct lnet_fault_stat *stat)
959 {
960         struct lnet_delay_rule *rule;
961         int                     cpt;
962         int                     i = 0;
963         int                     rc = -ENOENT;
964         ENTRY;
965
966         cpt = lnet_net_lock_current();
967         list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
968                 if (i++ < pos)
969                         continue;
970
971                 spin_lock(&rule->dl_lock);
972                 *attr = rule->dl_attr;
973                 *stat = rule->dl_stat;
974                 spin_unlock(&rule->dl_lock);
975                 rc = 0;
976                 break;
977         }
978
979         lnet_net_unlock(cpt);
980         RETURN(rc);
981 }
982
983 /**
984  * reset counters for all Delay Rules
985  */
986 void
987 lnet_delay_rule_reset(void)
988 {
989         struct lnet_delay_rule *rule;
990         int                     cpt;
991         ENTRY;
992
993         cpt = lnet_net_lock_current();
994
995         list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
996                 struct lnet_fault_attr *attr = &rule->dl_attr;
997
998                 spin_lock(&rule->dl_lock);
999
1000                 memset(&rule->dl_stat, 0, sizeof(rule->dl_stat));
1001                 if (attr->u.delay.la_rate != 0) {
1002                         rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
1003                 } else {
1004                         rule->dl_delay_time = ktime_get_seconds() +
1005                                               prandom_u32_max(attr->u.delay.la_interval);
1006                         rule->dl_time_base = ktime_get_seconds() +
1007                                              attr->u.delay.la_interval;
1008                 }
1009                 spin_unlock(&rule->dl_lock);
1010         }
1011
1012         lnet_net_unlock(cpt);
1013         EXIT;
1014 }
1015
1016 int
1017 lnet_fault_ctl(int opc, struct libcfs_ioctl_data *data)
1018 {
1019         struct lnet_fault_attr *attr;
1020         struct lnet_fault_stat *stat;
1021
1022         attr = (struct lnet_fault_attr *)data->ioc_inlbuf1;
1023
1024         switch (opc) {
1025         default:
1026                 return -EINVAL;
1027
1028         case LNET_CTL_DROP_ADD:
1029                 if (attr == NULL)
1030                         return -EINVAL;
1031
1032                 return lnet_drop_rule_add(attr);
1033
1034         case LNET_CTL_DROP_DEL:
1035                 if (attr == NULL)
1036                         return -EINVAL;
1037
1038                 data->ioc_count = lnet_drop_rule_del(attr->fa_src,
1039                                                      attr->fa_dst);
1040                 return 0;
1041
1042         case LNET_CTL_DROP_RESET:
1043                 lnet_drop_rule_reset();
1044                 return 0;
1045
1046         case LNET_CTL_DROP_LIST:
1047                 stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
1048                 if (attr == NULL || stat == NULL)
1049                         return -EINVAL;
1050
1051                 return lnet_drop_rule_list(data->ioc_count, attr, stat);
1052
1053         case LNET_CTL_DELAY_ADD:
1054                 if (attr == NULL)
1055                         return -EINVAL;
1056
1057                 return lnet_delay_rule_add(attr);
1058
1059         case LNET_CTL_DELAY_DEL:
1060                 if (attr == NULL)
1061                         return -EINVAL;
1062
1063                 data->ioc_count = lnet_delay_rule_del(attr->fa_src,
1064                                                       attr->fa_dst, false);
1065                 return 0;
1066
1067         case LNET_CTL_DELAY_RESET:
1068                 lnet_delay_rule_reset();
1069                 return 0;
1070
1071         case LNET_CTL_DELAY_LIST:
1072                 stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
1073                 if (attr == NULL || stat == NULL)
1074                         return -EINVAL;
1075
1076                 return lnet_delay_rule_list(data->ioc_count, attr, stat);
1077         }
1078 }
1079
1080 int
1081 lnet_fault_init(void)
1082 {
1083         BUILD_BUG_ON(LNET_PUT_BIT != 1 << LNET_MSG_PUT);
1084         BUILD_BUG_ON(LNET_ACK_BIT != 1 << LNET_MSG_ACK);
1085         BUILD_BUG_ON(LNET_GET_BIT != 1 << LNET_MSG_GET);
1086         BUILD_BUG_ON(LNET_REPLY_BIT != 1 << LNET_MSG_REPLY);
1087
1088         mutex_init(&delay_dd.dd_mutex);
1089         spin_lock_init(&delay_dd.dd_lock);
1090         init_waitqueue_head(&delay_dd.dd_waitq);
1091         init_waitqueue_head(&delay_dd.dd_ctl_waitq);
1092         INIT_LIST_HEAD(&delay_dd.dd_sched_rules);
1093
1094         return 0;
1095 }
1096
1097 void
1098 lnet_fault_fini(void)
1099 {
1100         lnet_drop_rule_del(0, 0);
1101         lnet_delay_rule_del(0, 0, true);
1102
1103         LASSERT(list_empty(&the_lnet.ln_drop_rules));
1104         LASSERT(list_empty(&the_lnet.ln_delay_rules));
1105         LASSERT(list_empty(&delay_dd.dd_sched_rules));
1106 }