Whamcloud - gitweb
LU-15595 lnet: LNet peer aliveness broken
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/lib-move.c
32  *
33  * Data movement routines
34  */
35
36 #define DEBUG_SUBSYSTEM S_LNET
37
38 #include <linux/pagemap.h>
39
40 #include <lnet/lib-lnet.h>
41 #include <linux/nsproxy.h>
42 #include <lnet/lnet_rdma.h>
43 #include <net/net_namespace.h>
44
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48
49 struct lnet_send_data {
50         struct lnet_ni *sd_best_ni;
51         struct lnet_peer_ni *sd_best_lpni;
52         struct lnet_peer_ni *sd_final_dst_lpni;
53         struct lnet_peer *sd_peer;
54         struct lnet_peer *sd_gw_peer;
55         struct lnet_peer_ni *sd_gw_lpni;
56         struct lnet_peer_net *sd_peer_net;
57         struct lnet_msg *sd_msg;
58         struct lnet_nid sd_dst_nid;
59         struct lnet_nid sd_src_nid;
60         struct lnet_nid sd_rtr_nid;
61         int sd_cpt;
62         int sd_md_cpt;
63         __u32 sd_send_case;
64 };
65
66 static inline bool
67 lnet_msg_is_response(struct lnet_msg *msg)
68 {
69         return msg->msg_type == LNET_MSG_ACK || msg->msg_type == LNET_MSG_REPLY;
70 }
71
72 static inline bool
73 lnet_response_tracking_enabled(__u32 msg_type, unsigned int md_options)
74 {
75         if (md_options & LNET_MD_NO_TRACK_RESPONSE)
76                 /* Explicitly disabled in MD options */
77                 return false;
78
79         if (md_options & LNET_MD_TRACK_RESPONSE)
80                 /* Explicity enabled in MD options */
81                 return true;
82
83         if (lnet_response_tracking == 3)
84                 /* Enabled for all message types */
85                 return true;
86
87         if (msg_type == LNET_MSG_PUT)
88                 return lnet_response_tracking == 2;
89
90         if (msg_type == LNET_MSG_GET)
91                 return lnet_response_tracking == 1;
92
93         return false;
94 }
95
96 static inline struct lnet_comm_count *
97 get_stats_counts(struct lnet_element_stats *stats,
98                  enum lnet_stats_type stats_type)
99 {
100         switch (stats_type) {
101         case LNET_STATS_TYPE_SEND:
102                 return &stats->el_send_stats;
103         case LNET_STATS_TYPE_RECV:
104                 return &stats->el_recv_stats;
105         case LNET_STATS_TYPE_DROP:
106                 return &stats->el_drop_stats;
107         default:
108                 CERROR("Unknown stats type\n");
109         }
110
111         return NULL;
112 }
113
114 void lnet_incr_stats(struct lnet_element_stats *stats,
115                      enum lnet_msg_type msg_type,
116                      enum lnet_stats_type stats_type)
117 {
118         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
119         if (!counts)
120                 return;
121
122         switch (msg_type) {
123         case LNET_MSG_ACK:
124                 atomic_inc(&counts->co_ack_count);
125                 break;
126         case LNET_MSG_PUT:
127                 atomic_inc(&counts->co_put_count);
128                 break;
129         case LNET_MSG_GET:
130                 atomic_inc(&counts->co_get_count);
131                 break;
132         case LNET_MSG_REPLY:
133                 atomic_inc(&counts->co_reply_count);
134                 break;
135         case LNET_MSG_HELLO:
136                 atomic_inc(&counts->co_hello_count);
137                 break;
138         default:
139                 CERROR("There is a BUG in the code. Unknown message type\n");
140                 break;
141         }
142 }
143
144 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
145                      enum lnet_stats_type stats_type)
146 {
147         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
148         if (!counts)
149                 return 0;
150
151         return (atomic_read(&counts->co_ack_count) +
152                 atomic_read(&counts->co_put_count) +
153                 atomic_read(&counts->co_get_count) +
154                 atomic_read(&counts->co_reply_count) +
155                 atomic_read(&counts->co_hello_count));
156 }
157
158 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
159                                 struct lnet_comm_count *counts)
160 {
161         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
162         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
163         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
164         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
165         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
166 }
167
168 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
169                               struct lnet_element_stats *stats)
170 {
171         struct lnet_comm_count *counts;
172
173         LASSERT(msg_stats);
174         LASSERT(stats);
175
176         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
177         if (!counts)
178                 return;
179         assign_stats(&msg_stats->im_send_stats, counts);
180
181         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
182         if (!counts)
183                 return;
184         assign_stats(&msg_stats->im_recv_stats, counts);
185
186         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
187         if (!counts)
188                 return;
189         assign_stats(&msg_stats->im_drop_stats, counts);
190 }
191
192 int
193 lnet_fail_nid(lnet_nid_t nid4, unsigned int threshold)
194 {
195         struct lnet_test_peer *tp;
196         struct list_head *el;
197         struct list_head *next;
198         struct lnet_nid nid;
199         LIST_HEAD(cull);
200
201         lnet_nid4_to_nid(nid4, &nid);
202         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
203         if (threshold != 0) {
204                 /* Adding a new entry */
205                 LIBCFS_ALLOC(tp, sizeof(*tp));
206                 if (tp == NULL)
207                         return -ENOMEM;
208
209                 tp->tp_nid = nid;
210                 tp->tp_threshold = threshold;
211
212                 lnet_net_lock(0);
213                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
214                 lnet_net_unlock(0);
215                 return 0;
216         }
217
218         lnet_net_lock(0);
219
220         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
221                 tp = list_entry(el, struct lnet_test_peer, tp_list);
222
223                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
224                     LNET_NID_IS_ANY(&nid) ||    /* removing all entries */
225                     nid_same(&tp->tp_nid, &nid)) {      /* matched this one */
226                         list_move(&tp->tp_list, &cull);
227                 }
228         }
229
230         lnet_net_unlock(0);
231
232         while ((tp = list_first_entry_or_null(&cull,
233                                               struct lnet_test_peer,
234                                               tp_list)) != NULL) {
235                 list_del(&tp->tp_list);
236                 LIBCFS_FREE(tp, sizeof(*tp));
237         }
238         return 0;
239 }
240
241 static int
242 fail_peer(struct lnet_nid *nid, int outgoing)
243 {
244         struct lnet_test_peer *tp;
245         struct list_head *el;
246         struct list_head *next;
247         LIST_HEAD(cull);
248         int fail = 0;
249
250         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
251         lnet_net_lock(0);
252
253         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
254                 tp = list_entry(el, struct lnet_test_peer, tp_list);
255
256                 if (tp->tp_threshold == 0) {
257                         /* zombie entry */
258                         if (outgoing) {
259                                 /* only cull zombies on outgoing tests,
260                                  * since we may be at interrupt priority on
261                                  * incoming messages. */
262                                 list_move(&tp->tp_list, &cull);
263                         }
264                         continue;
265                 }
266
267                 if (LNET_NID_IS_ANY(&tp->tp_nid) ||     /* fail every peer */
268                     nid_same(nid, &tp->tp_nid)) {       /* fail this peer */
269                         fail = 1;
270
271                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
272                                 tp->tp_threshold--;
273                                 if (outgoing &&
274                                     tp->tp_threshold == 0) {
275                                         /* see above */
276                                         list_move(&tp->tp_list, &cull);
277                                 }
278                         }
279                         break;
280                 }
281         }
282
283         lnet_net_unlock(0);
284
285         while ((tp = list_first_entry_or_null(&cull,
286                                               struct lnet_test_peer,
287                                               tp_list)) != NULL) {
288                 list_del(&tp->tp_list);
289                 LIBCFS_FREE(tp, sizeof(*tp));
290         }
291
292         return fail;
293 }
294
295 unsigned int
296 lnet_iov_nob(unsigned int niov, struct kvec *iov)
297 {
298         unsigned int nob = 0;
299
300         LASSERT(niov == 0 || iov != NULL);
301         while (niov-- > 0)
302                 nob += (iov++)->iov_len;
303
304         return (nob);
305 }
306 EXPORT_SYMBOL(lnet_iov_nob);
307
308 void
309 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
310                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
311                   unsigned int nob)
312 {
313         /* NB diov, siov are READ-ONLY */
314         unsigned int this_nob;
315
316         if (nob == 0)
317                 return;
318
319         /* skip complete frags before 'doffset' */
320         LASSERT(ndiov > 0);
321         while (doffset >= diov->iov_len) {
322                 doffset -= diov->iov_len;
323                 diov++;
324                 ndiov--;
325                 LASSERT(ndiov > 0);
326         }
327
328         /* skip complete frags before 'soffset' */
329         LASSERT(nsiov > 0);
330         while (soffset >= siov->iov_len) {
331                 soffset -= siov->iov_len;
332                 siov++;
333                 nsiov--;
334                 LASSERT(nsiov > 0);
335         }
336
337         do {
338                 LASSERT(ndiov > 0);
339                 LASSERT(nsiov > 0);
340                 this_nob = min3((unsigned int)diov->iov_len - doffset,
341                                 (unsigned int)siov->iov_len - soffset,
342                                 nob);
343
344                 memcpy((char *)diov->iov_base + doffset,
345                        (char *)siov->iov_base + soffset, this_nob);
346                 nob -= this_nob;
347
348                 if (diov->iov_len > doffset + this_nob) {
349                         doffset += this_nob;
350                 } else {
351                         diov++;
352                         ndiov--;
353                         doffset = 0;
354                 }
355
356                 if (siov->iov_len > soffset + this_nob) {
357                         soffset += this_nob;
358                 } else {
359                         siov++;
360                         nsiov--;
361                         soffset = 0;
362                 }
363         } while (nob > 0);
364 }
365 EXPORT_SYMBOL(lnet_copy_iov2iov);
366
367 unsigned int
368 lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
369 {
370         unsigned int  nob = 0;
371
372         LASSERT(niov == 0 || kiov != NULL);
373         while (niov-- > 0)
374                 nob += (kiov++)->bv_len;
375
376         return (nob);
377 }
378 EXPORT_SYMBOL(lnet_kiov_nob);
379
380 void
381 lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov,
382                     unsigned int doffset,
383                     unsigned int nsiov, struct bio_vec *siov,
384                     unsigned int soffset,
385                     unsigned int nob)
386 {
387         /* NB diov, siov are READ-ONLY */
388         unsigned int    this_nob;
389         char           *daddr = NULL;
390         char           *saddr = NULL;
391
392         if (nob == 0)
393                 return;
394
395         LASSERT (!in_interrupt ());
396
397         LASSERT (ndiov > 0);
398         while (doffset >= diov->bv_len) {
399                 doffset -= diov->bv_len;
400                 diov++;
401                 ndiov--;
402                 LASSERT(ndiov > 0);
403         }
404
405         LASSERT(nsiov > 0);
406         while (soffset >= siov->bv_len) {
407                 soffset -= siov->bv_len;
408                 siov++;
409                 nsiov--;
410                 LASSERT(nsiov > 0);
411         }
412
413         do {
414                 LASSERT(ndiov > 0);
415                 LASSERT(nsiov > 0);
416                 this_nob = min3(diov->bv_len - doffset,
417                                 siov->bv_len - soffset,
418                                 nob);
419
420                 if (daddr == NULL)
421                         daddr = ((char *)kmap(diov->bv_page)) +
422                                 diov->bv_offset + doffset;
423                 if (saddr == NULL)
424                         saddr = ((char *)kmap(siov->bv_page)) +
425                                 siov->bv_offset + soffset;
426
427                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
428                  * However in practice at least one of the kiovs will be mapped
429                  * kernel pages and the map/unmap will be NOOPs */
430
431                 memcpy (daddr, saddr, this_nob);
432                 nob -= this_nob;
433
434                 if (diov->bv_len > doffset + this_nob) {
435                         daddr += this_nob;
436                         doffset += this_nob;
437                 } else {
438                         kunmap(diov->bv_page);
439                         daddr = NULL;
440                         diov++;
441                         ndiov--;
442                         doffset = 0;
443                 }
444
445                 if (siov->bv_len > soffset + this_nob) {
446                         saddr += this_nob;
447                         soffset += this_nob;
448                 } else {
449                         kunmap(siov->bv_page);
450                         saddr = NULL;
451                         siov++;
452                         nsiov--;
453                         soffset = 0;
454                 }
455         } while (nob > 0);
456
457         if (daddr != NULL)
458                 kunmap(diov->bv_page);
459         if (saddr != NULL)
460                 kunmap(siov->bv_page);
461 }
462 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
463
464 void
465 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
466                     unsigned int nkiov, struct bio_vec *kiov,
467                     unsigned int kiovoffset,
468                     unsigned int nob)
469 {
470         /* NB iov, kiov are READ-ONLY */
471         unsigned int    this_nob;
472         char           *addr = NULL;
473
474         if (nob == 0)
475                 return;
476
477         LASSERT (!in_interrupt ());
478
479         LASSERT (niov > 0);
480         while (iovoffset >= iov->iov_len) {
481                 iovoffset -= iov->iov_len;
482                 iov++;
483                 niov--;
484                 LASSERT(niov > 0);
485         }
486
487         LASSERT(nkiov > 0);
488         while (kiovoffset >= kiov->bv_len) {
489                 kiovoffset -= kiov->bv_len;
490                 kiov++;
491                 nkiov--;
492                 LASSERT(nkiov > 0);
493         }
494
495         do {
496                 LASSERT(niov > 0);
497                 LASSERT(nkiov > 0);
498                 this_nob = min3((unsigned int)iov->iov_len - iovoffset,
499                                 (unsigned int)kiov->bv_len - kiovoffset,
500                                 nob);
501
502                 if (addr == NULL)
503                         addr = ((char *)kmap(kiov->bv_page)) +
504                                 kiov->bv_offset + kiovoffset;
505
506                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
507                 nob -= this_nob;
508
509                 if (iov->iov_len > iovoffset + this_nob) {
510                         iovoffset += this_nob;
511                 } else {
512                         iov++;
513                         niov--;
514                         iovoffset = 0;
515                 }
516
517                 if (kiov->bv_len > kiovoffset + this_nob) {
518                         addr += this_nob;
519                         kiovoffset += this_nob;
520                 } else {
521                         kunmap(kiov->bv_page);
522                         addr = NULL;
523                         kiov++;
524                         nkiov--;
525                         kiovoffset = 0;
526                 }
527
528         } while (nob > 0);
529
530         if (addr != NULL)
531                 kunmap(kiov->bv_page);
532 }
533 EXPORT_SYMBOL(lnet_copy_kiov2iov);
534
535 void
536 lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
537                    unsigned int kiovoffset,
538                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
539                    unsigned int nob)
540 {
541         /* NB kiov, iov are READ-ONLY */
542         unsigned int    this_nob;
543         char           *addr = NULL;
544
545         if (nob == 0)
546                 return;
547
548         LASSERT (!in_interrupt ());
549
550         LASSERT (nkiov > 0);
551         while (kiovoffset >= kiov->bv_len) {
552                 kiovoffset -= kiov->bv_len;
553                 kiov++;
554                 nkiov--;
555                 LASSERT(nkiov > 0);
556         }
557
558         LASSERT(niov > 0);
559         while (iovoffset >= iov->iov_len) {
560                 iovoffset -= iov->iov_len;
561                 iov++;
562                 niov--;
563                 LASSERT(niov > 0);
564         }
565
566         do {
567                 LASSERT(nkiov > 0);
568                 LASSERT(niov > 0);
569                 this_nob = min3((unsigned int)kiov->bv_len - kiovoffset,
570                                 (unsigned int)iov->iov_len - iovoffset,
571                                 nob);
572
573                 if (addr == NULL)
574                         addr = ((char *)kmap(kiov->bv_page)) +
575                                 kiov->bv_offset + kiovoffset;
576
577                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
578                 nob -= this_nob;
579
580                 if (kiov->bv_len > kiovoffset + this_nob) {
581                         addr += this_nob;
582                         kiovoffset += this_nob;
583                 } else {
584                         kunmap(kiov->bv_page);
585                         addr = NULL;
586                         kiov++;
587                         nkiov--;
588                         kiovoffset = 0;
589                 }
590
591                 if (iov->iov_len > iovoffset + this_nob) {
592                         iovoffset += this_nob;
593                 } else {
594                         iov++;
595                         niov--;
596                         iovoffset = 0;
597                 }
598         } while (nob > 0);
599
600         if (addr != NULL)
601                 kunmap(kiov->bv_page);
602 }
603 EXPORT_SYMBOL(lnet_copy_iov2kiov);
604
605 int
606 lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
607                   int src_niov, struct bio_vec *src,
608                   unsigned int offset, unsigned int len)
609 {
610         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
611          * for exactly 'len' bytes, and return the number of entries.
612          * NB not destructive to 'src' */
613         unsigned int    frag_len;
614         unsigned int    niov;
615
616         if (len == 0)                           /* no data => */
617                 return (0);                     /* no frags */
618
619         LASSERT(src_niov > 0);
620         while (offset >= src->bv_len) {      /* skip initial frags */
621                 offset -= src->bv_len;
622                 src_niov--;
623                 src++;
624                 LASSERT(src_niov > 0);
625         }
626
627         niov = 1;
628         for (;;) {
629                 LASSERT(src_niov > 0);
630                 LASSERT((int)niov <= dst_niov);
631
632                 frag_len = src->bv_len - offset;
633                 dst->bv_page = src->bv_page;
634                 dst->bv_offset = src->bv_offset + offset;
635
636                 if (len <= frag_len) {
637                         dst->bv_len = len;
638                         LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
639                         return niov;
640                 }
641
642                 dst->bv_len = frag_len;
643                 LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
644
645                 len -= frag_len;
646                 dst++;
647                 src++;
648                 niov++;
649                 src_niov--;
650                 offset = 0;
651         }
652 }
653 EXPORT_SYMBOL(lnet_extract_kiov);
654
655 void
656 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
657              int delayed, unsigned int offset, unsigned int mlen,
658              unsigned int rlen)
659 {
660         unsigned int niov = 0;
661         struct kvec *iov = NULL;
662         struct bio_vec  *kiov = NULL;
663         int rc;
664
665         LASSERT (!in_interrupt ());
666         LASSERT (mlen == 0 || msg != NULL);
667
668         if (msg != NULL) {
669                 LASSERT(msg->msg_receiving);
670                 LASSERT(!msg->msg_sending);
671                 LASSERT(rlen == msg->msg_len);
672                 LASSERT(mlen <= msg->msg_len);
673                 LASSERT(msg->msg_offset == offset);
674                 LASSERT(msg->msg_wanted == mlen);
675
676                 msg->msg_receiving = 0;
677
678                 if (mlen != 0) {
679                         niov = msg->msg_niov;
680                         kiov = msg->msg_kiov;
681
682                         LASSERT (niov > 0);
683                         LASSERT ((iov == NULL) != (kiov == NULL));
684                 }
685         }
686
687         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
688                                              niov, kiov, offset, mlen,
689                                              rlen);
690         if (rc < 0)
691                 lnet_finalize(msg, rc);
692 }
693
694 static void
695 lnet_setpayloadbuffer(struct lnet_msg *msg)
696 {
697         struct lnet_libmd *md = msg->msg_md;
698
699         LASSERT(msg->msg_len > 0);
700         LASSERT(!msg->msg_routing);
701         LASSERT(md != NULL);
702         LASSERT(msg->msg_niov == 0);
703         LASSERT(msg->msg_kiov == NULL);
704
705         msg->msg_niov = md->md_niov;
706         msg->msg_kiov = md->md_kiov;
707 }
708
709 void
710 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_processid *target,
711                unsigned int offset, unsigned int len)
712 {
713         msg->msg_type = type;
714         msg->msg_target = *target;
715         msg->msg_len = len;
716         msg->msg_offset = offset;
717
718         if (len != 0)
719                 lnet_setpayloadbuffer(msg);
720
721         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
722         msg->msg_hdr.type           = type;
723         /* dest_nid will be overwritten by lnet_select_pathway() */
724         msg->msg_hdr.dest_nid = target->nid;
725         msg->msg_hdr.dest_pid = target->pid;
726         /* src_nid will be set later */
727         msg->msg_hdr.src_pid        = the_lnet.ln_pid;
728         msg->msg_hdr.payload_length = len;
729 }
730
731 void
732 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
733 {
734         void *priv = msg->msg_private;
735         int rc;
736
737         LASSERT(!in_interrupt());
738         LASSERT(nid_is_lo0(&ni->ni_nid) ||
739                 (msg->msg_txcredit && msg->msg_peertxcredit));
740
741         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
742         if (rc < 0) {
743                 msg->msg_no_resend = true;
744                 lnet_finalize(msg, rc);
745         }
746 }
747
748 static int
749 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
750 {
751         int     rc;
752
753         LASSERT(!msg->msg_sending);
754         LASSERT(msg->msg_receiving);
755         LASSERT(!msg->msg_rx_ready_delay);
756         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
757
758         msg->msg_rx_ready_delay = 1;
759         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
760                                                   &msg->msg_private);
761         if (rc != 0) {
762                 CERROR("recv from %s / send to %s aborted: "
763                        "eager_recv failed %d\n",
764                        libcfs_nidstr(&msg->msg_rxpeer->lpni_nid),
765                        libcfs_idstr(&msg->msg_target), rc);
766                 LASSERT(rc < 0); /* required by my callers */
767         }
768
769         return rc;
770 }
771
772 /* returns true if this message should be dropped */
773 static bool
774 lnet_check_message_drop(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
775                         struct lnet_msg *msg)
776 {
777         if (msg->msg_target.pid & LNET_PID_USERFLAG)
778                 return false;
779
780         if (!lnet_peer_aliveness_enabled(lpni))
781                 return false;
782
783         /* If we're resending a message, let's attempt to send it even if
784          * the peer is down to fulfill our resend quota on the message
785          */
786         if (msg->msg_retry_count > 0)
787                 return false;
788
789         /* try and send recovery messages irregardless */
790         if (msg->msg_recovery)
791                 return false;
792
793         /* always send any responses */
794         if (lnet_msg_is_response(msg))
795                 return false;
796
797         /* assume peer_ni is alive as long as we're within the configured
798          * peer timeout
799          */
800         return ktime_get_seconds() >=
801                 (lpni->lpni_last_alive +
802                  lpni->lpni_net->net_tunables.lct_peer_timeout);
803 }
804
805 /**
806  * \param msg The message to be sent.
807  * \param do_send True if lnet_ni_send() should be called in this function.
808  *        lnet_send() is going to lnet_net_unlock immediately after this, so
809  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
810  *
811  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
812  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
813  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
814  * \retval -ECANCELED If the MD of the message has been unlinked.
815  */
816 static int
817 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
818 {
819         struct lnet_peer_ni     *lp = msg->msg_txpeer;
820         struct lnet_ni          *ni = msg->msg_txni;
821         int                     cpt = msg->msg_tx_cpt;
822         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
823
824         /* non-lnet_send() callers have checked before */
825         LASSERT(!do_send || msg->msg_tx_delayed);
826         LASSERT(!msg->msg_receiving);
827         LASSERT(msg->msg_tx_committed);
828
829         /* can't get here if we're sending to the loopback interface */
830         if (the_lnet.ln_loni)
831                 LASSERT(!nid_same(&lp->lpni_nid, &the_lnet.ln_loni->ni_nid));
832
833         /* NB 'lp' is always the next hop */
834         if (lnet_check_message_drop(ni, lp, msg)) {
835                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
836                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
837                         msg->msg_len;
838                 lnet_net_unlock(cpt);
839                 if (msg->msg_txpeer)
840                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
841                                         msg->msg_type,
842                                         LNET_STATS_TYPE_DROP);
843                 if (msg->msg_txni)
844                         lnet_incr_stats(&msg->msg_txni->ni_stats,
845                                         msg->msg_type,
846                                         LNET_STATS_TYPE_DROP);
847
848                 CNETERR("Dropping message for %s: peer not alive\n",
849                         libcfs_idstr(&msg->msg_target));
850                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
851                 if (do_send)
852                         lnet_finalize(msg, -EHOSTUNREACH);
853
854                 lnet_net_lock(cpt);
855                 return -EHOSTUNREACH;
856         }
857
858         if (msg->msg_md != NULL &&
859             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
860                 lnet_net_unlock(cpt);
861
862                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
863                         "called on the MD/ME.\n",
864                         libcfs_idstr(&msg->msg_target));
865                 if (do_send) {
866                         msg->msg_no_resend = true;
867                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
868                                msg, libcfs_idstr(&msg->msg_target));
869                         lnet_finalize(msg, -ECANCELED);
870                 }
871
872                 lnet_net_lock(cpt);
873                 return -ECANCELED;
874         }
875
876         if (!msg->msg_peertxcredit) {
877                 spin_lock(&lp->lpni_lock);
878                 LASSERT((lp->lpni_txcredits < 0) ==
879                         !list_empty(&lp->lpni_txq));
880
881                 msg->msg_peertxcredit = 1;
882                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr_nid4);
883                 lp->lpni_txcredits--;
884
885                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
886                         lp->lpni_mintxcredits = lp->lpni_txcredits;
887
888                 if (lp->lpni_txcredits < 0) {
889                         msg->msg_tx_delayed = 1;
890                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
891                         spin_unlock(&lp->lpni_lock);
892                         return LNET_CREDIT_WAIT;
893                 }
894                 spin_unlock(&lp->lpni_lock);
895         }
896
897         if (!msg->msg_txcredit) {
898                 LASSERT((tq->tq_credits < 0) ==
899                         !list_empty(&tq->tq_delayed));
900
901                 msg->msg_txcredit = 1;
902                 tq->tq_credits--;
903                 atomic_dec(&ni->ni_tx_credits);
904
905                 if (tq->tq_credits < tq->tq_credits_min)
906                         tq->tq_credits_min = tq->tq_credits;
907
908                 if (tq->tq_credits < 0) {
909                         msg->msg_tx_delayed = 1;
910                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
911                         return LNET_CREDIT_WAIT;
912                 }
913         }
914
915         if (unlikely(!list_empty(&the_lnet.ln_delay_rules)) &&
916             lnet_delay_rule_match_locked(&msg->msg_hdr, msg)) {
917                 msg->msg_tx_delayed = 1;
918                 return LNET_CREDIT_WAIT;
919         }
920
921         /* unset the tx_delay flag as we're going to send it now */
922         msg->msg_tx_delayed = 0;
923
924         if (do_send) {
925                 lnet_net_unlock(cpt);
926                 lnet_ni_send(ni, msg);
927                 lnet_net_lock(cpt);
928         }
929         return LNET_CREDIT_OK;
930 }
931
932
933 static struct lnet_rtrbufpool *
934 lnet_msg2bufpool(struct lnet_msg *msg)
935 {
936         struct lnet_rtrbufpool  *rbp;
937         int                     cpt;
938
939         LASSERT(msg->msg_rx_committed);
940
941         cpt = msg->msg_rx_cpt;
942         rbp = &the_lnet.ln_rtrpools[cpt][0];
943
944         LASSERT(msg->msg_len <= LNET_MTU);
945         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
946                 rbp++;
947                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
948         }
949
950         return rbp;
951 }
952
953 static int
954 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
955 {
956         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
957          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
958          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
959          * received or OK to receive */
960         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
961         struct lnet_peer *lp;
962         struct lnet_rtrbufpool *rbp;
963         struct lnet_rtrbuf *rb;
964
965         LASSERT(msg->msg_kiov == NULL);
966         LASSERT(msg->msg_niov == 0);
967         LASSERT(msg->msg_routing);
968         LASSERT(msg->msg_receiving);
969         LASSERT(!msg->msg_sending);
970         LASSERT(lpni->lpni_peer_net);
971         LASSERT(lpni->lpni_peer_net->lpn_peer);
972
973         lp = lpni->lpni_peer_net->lpn_peer;
974
975         /* non-lnet_parse callers only receive delayed messages */
976         LASSERT(!do_recv || msg->msg_rx_delayed);
977
978         if (!msg->msg_peerrtrcredit) {
979                 /* lpni_lock protects the credit manipulation */
980                 spin_lock(&lpni->lpni_lock);
981
982                 msg->msg_peerrtrcredit = 1;
983                 lpni->lpni_rtrcredits--;
984                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
985                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
986
987                 if (lpni->lpni_rtrcredits < 0) {
988                         spin_unlock(&lpni->lpni_lock);
989                         /* must have checked eager_recv before here */
990                         LASSERT(msg->msg_rx_ready_delay);
991                         msg->msg_rx_delayed = 1;
992                         /* lp_lock protects the lp_rtrq */
993                         spin_lock(&lp->lp_lock);
994                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
995                         spin_unlock(&lp->lp_lock);
996                         return LNET_CREDIT_WAIT;
997                 }
998                 spin_unlock(&lpni->lpni_lock);
999         }
1000
1001         rbp = lnet_msg2bufpool(msg);
1002
1003         if (!msg->msg_rtrcredit) {
1004                 msg->msg_rtrcredit = 1;
1005                 rbp->rbp_credits--;
1006                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1007                         rbp->rbp_mincredits = rbp->rbp_credits;
1008
1009                 if (rbp->rbp_credits < 0) {
1010                         /* must have checked eager_recv before here */
1011                         LASSERT(msg->msg_rx_ready_delay);
1012                         msg->msg_rx_delayed = 1;
1013                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1014                         return LNET_CREDIT_WAIT;
1015                 }
1016         }
1017
1018         LASSERT(!list_empty(&rbp->rbp_bufs));
1019         rb = list_first_entry(&rbp->rbp_bufs, struct lnet_rtrbuf, rb_list);
1020         list_del(&rb->rb_list);
1021
1022         msg->msg_niov = rbp->rbp_npages;
1023         msg->msg_kiov = &rb->rb_kiov[0];
1024
1025         /* unset the msg-rx_delayed flag since we're receiving the message */
1026         msg->msg_rx_delayed = 0;
1027
1028         if (do_recv) {
1029                 int cpt = msg->msg_rx_cpt;
1030
1031                 lnet_net_unlock(cpt);
1032                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1033                              0, msg->msg_len, msg->msg_len);
1034                 lnet_net_lock(cpt);
1035         }
1036         return LNET_CREDIT_OK;
1037 }
1038
1039 void
1040 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1041 {
1042         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1043         struct lnet_ni          *txni = msg->msg_txni;
1044         struct lnet_msg         *msg2;
1045
1046         if (msg->msg_txcredit) {
1047                 struct lnet_ni       *ni = msg->msg_txni;
1048                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1049
1050                 /* give back NI txcredits */
1051                 msg->msg_txcredit = 0;
1052
1053                 LASSERT((tq->tq_credits < 0) ==
1054                         !list_empty(&tq->tq_delayed));
1055
1056                 tq->tq_credits++;
1057                 atomic_inc(&ni->ni_tx_credits);
1058                 if (tq->tq_credits <= 0) {
1059                         msg2 = list_first_entry(&tq->tq_delayed,
1060                                                 struct lnet_msg, msg_list);
1061                         list_del(&msg2->msg_list);
1062
1063                         LASSERT(msg2->msg_txni == ni);
1064                         LASSERT(msg2->msg_tx_delayed);
1065                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1066
1067                         (void) lnet_post_send_locked(msg2, 1);
1068                 }
1069         }
1070
1071         if (msg->msg_peertxcredit) {
1072                 /* give back peer txcredits */
1073                 msg->msg_peertxcredit = 0;
1074
1075                 spin_lock(&txpeer->lpni_lock);
1076                 LASSERT((txpeer->lpni_txcredits < 0) ==
1077                         !list_empty(&txpeer->lpni_txq));
1078
1079                 txpeer->lpni_txqnob -=  msg->msg_len +
1080                                         sizeof(struct lnet_hdr_nid4);
1081                 LASSERT(txpeer->lpni_txqnob >= 0);
1082
1083                 txpeer->lpni_txcredits++;
1084                 if (txpeer->lpni_txcredits <= 0) {
1085                         int msg2_cpt;
1086
1087                         msg2 = list_first_entry(&txpeer->lpni_txq,
1088                                                 struct lnet_msg, msg_list);
1089                         list_del(&msg2->msg_list);
1090                         spin_unlock(&txpeer->lpni_lock);
1091
1092                         LASSERT(msg2->msg_txpeer == txpeer);
1093                         LASSERT(msg2->msg_tx_delayed);
1094
1095                         msg2_cpt = msg2->msg_tx_cpt;
1096
1097                         /*
1098                          * The msg_cpt can be different from the msg2_cpt
1099                          * so we need to make sure we lock the correct cpt
1100                          * for msg2.
1101                          * Once we call lnet_post_send_locked() it is no
1102                          * longer safe to access msg2, since it could've
1103                          * been freed by lnet_finalize(), but we still
1104                          * need to relock the correct cpt, so we cache the
1105                          * msg2_cpt for the purpose of the check that
1106                          * follows the call to lnet_pose_send_locked().
1107                          */
1108                         if (msg2_cpt != msg->msg_tx_cpt) {
1109                                 lnet_net_unlock(msg->msg_tx_cpt);
1110                                 lnet_net_lock(msg2_cpt);
1111                         }
1112                         (void) lnet_post_send_locked(msg2, 1);
1113                         if (msg2_cpt != msg->msg_tx_cpt) {
1114                                 lnet_net_unlock(msg2_cpt);
1115                                 lnet_net_lock(msg->msg_tx_cpt);
1116                         }
1117                 } else {
1118                         spin_unlock(&txpeer->lpni_lock);
1119                 }
1120         }
1121
1122         if (txni != NULL) {
1123                 msg->msg_txni = NULL;
1124                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1125         }
1126
1127         if (txpeer != NULL) {
1128                 msg->msg_txpeer = NULL;
1129                 lnet_peer_ni_decref_locked(txpeer);
1130         }
1131 }
1132
1133 void
1134 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1135 {
1136         struct lnet_msg *msg;
1137
1138         if (list_empty(&rbp->rbp_msgs))
1139                 return;
1140         msg = list_first_entry(&rbp->rbp_msgs,
1141                                struct lnet_msg, msg_list);
1142         list_del(&msg->msg_list);
1143
1144         (void)lnet_post_routed_recv_locked(msg, 1);
1145 }
1146
1147 void
1148 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1149 {
1150         struct lnet_msg *msg;
1151         struct lnet_msg *tmp;
1152
1153         lnet_net_unlock(cpt);
1154
1155         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1156                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1157                              0, 0, 0, msg->msg_hdr.payload_length);
1158                 list_del_init(&msg->msg_list);
1159                 msg->msg_no_resend = true;
1160                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1161                 lnet_finalize(msg, -ECANCELED);
1162         }
1163
1164         lnet_net_lock(cpt);
1165 }
1166
1167 void
1168 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1169 {
1170         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1171         struct lnet_peer *lp;
1172         struct lnet_ni *rxni = msg->msg_rxni;
1173         struct lnet_msg *msg2;
1174
1175         if (msg->msg_rtrcredit) {
1176                 /* give back global router credits */
1177                 struct lnet_rtrbuf *rb;
1178                 struct lnet_rtrbufpool *rbp;
1179
1180                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1181                  * there until it gets one allocated, or aborts the wait
1182                  * itself */
1183                 LASSERT(msg->msg_kiov != NULL);
1184
1185                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1186                 rbp = rb->rb_pool;
1187
1188                 msg->msg_kiov = NULL;
1189                 msg->msg_rtrcredit = 0;
1190
1191                 LASSERT(rbp == lnet_msg2bufpool(msg));
1192
1193                 LASSERT((rbp->rbp_credits > 0) ==
1194                         !list_empty(&rbp->rbp_bufs));
1195
1196                 /* If routing is now turned off, we just drop this buffer and
1197                  * don't bother trying to return credits.  */
1198                 if (!the_lnet.ln_routing) {
1199                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1200                         goto routing_off;
1201                 }
1202
1203                 /* It is possible that a user has lowered the desired number of
1204                  * buffers in this pool.  Make sure we never put back
1205                  * more buffers than the stated number. */
1206                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1207                         /* Discard this buffer so we don't have too
1208                          * many. */
1209                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1210                         rbp->rbp_nbuffers--;
1211                 } else {
1212                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1213                         rbp->rbp_credits++;
1214                         if (rbp->rbp_credits <= 0)
1215                                 lnet_schedule_blocked_locked(rbp);
1216                 }
1217         }
1218
1219 routing_off:
1220         if (msg->msg_peerrtrcredit) {
1221                 LASSERT(rxpeerni);
1222                 LASSERT(rxpeerni->lpni_peer_net);
1223                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1224
1225                 /* give back peer router credits */
1226                 msg->msg_peerrtrcredit = 0;
1227
1228                 spin_lock(&rxpeerni->lpni_lock);
1229                 rxpeerni->lpni_rtrcredits++;
1230                 spin_unlock(&rxpeerni->lpni_lock);
1231
1232                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1233                 spin_lock(&lp->lp_lock);
1234
1235                 /* drop all messages which are queued to be routed on that
1236                  * peer. */
1237                 if (!the_lnet.ln_routing) {
1238                         LIST_HEAD(drop);
1239                         list_splice_init(&lp->lp_rtrq, &drop);
1240                         spin_unlock(&lp->lp_lock);
1241                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1242                 } else if (!list_empty(&lp->lp_rtrq)) {
1243                         int msg2_cpt;
1244
1245                         msg2 = list_first_entry(&lp->lp_rtrq,
1246                                                 struct lnet_msg, msg_list);
1247                         list_del(&msg2->msg_list);
1248                         msg2_cpt = msg2->msg_rx_cpt;
1249                         spin_unlock(&lp->lp_lock);
1250                         /*
1251                          * messages on the lp_rtrq can be from any NID in
1252                          * the peer, which means they might have different
1253                          * cpts. We need to make sure we lock the right
1254                          * one.
1255                          */
1256                         if (msg2_cpt != msg->msg_rx_cpt) {
1257                                 lnet_net_unlock(msg->msg_rx_cpt);
1258                                 lnet_net_lock(msg2_cpt);
1259                         }
1260                         (void) lnet_post_routed_recv_locked(msg2, 1);
1261                         if (msg2_cpt != msg->msg_rx_cpt) {
1262                                 lnet_net_unlock(msg2_cpt);
1263                                 lnet_net_lock(msg->msg_rx_cpt);
1264                         }
1265                 } else {
1266                         spin_unlock(&lp->lp_lock);
1267                 }
1268         }
1269         if (rxni != NULL) {
1270                 msg->msg_rxni = NULL;
1271                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1272         }
1273         if (rxpeerni != NULL) {
1274                 msg->msg_rxpeer = NULL;
1275                 lnet_peer_ni_decref_locked(rxpeerni);
1276         }
1277 }
1278
1279 static struct lnet_peer_ni *
1280 lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid,
1281                     struct lnet_peer *peer,
1282                     struct lnet_peer_ni *best_lpni,
1283                     struct lnet_peer_net *peer_net)
1284 {
1285         /*
1286          * Look at the peer NIs for the destination peer that connect
1287          * to the chosen net. If a peer_ni is preferred when using the
1288          * best_ni to communicate, we use that one. If there is no
1289          * preferred peer_ni, or there are multiple preferred peer_ni,
1290          * the available transmit credits are used. If the transmit
1291          * credits are equal, we round-robin over the peer_ni.
1292          */
1293         struct lnet_peer_ni *lpni = NULL;
1294         int best_lpni_credits = (best_lpni) ? best_lpni->lpni_txcredits :
1295                 INT_MIN;
1296         int best_lpni_healthv = (best_lpni) ?
1297                 atomic_read(&best_lpni->lpni_healthv) : 0;
1298         bool best_lpni_is_preferred = false;
1299         bool lpni_is_preferred;
1300         int lpni_healthv;
1301         __u32 lpni_sel_prio;
1302         __u32 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1303
1304         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1305                 /*
1306                  * if the best_ni we've chosen aleady has this lpni
1307                  * preferred, then let's use it
1308                  */
1309                 if (best_ni) {
1310                         lpni_is_preferred = lnet_peer_is_pref_nid_locked(
1311                                 lpni, &best_ni->ni_nid);
1312                         CDEBUG(D_NET, "%s lpni_is_preferred = %d\n",
1313                                libcfs_nidstr(&best_ni->ni_nid),
1314                                lpni_is_preferred);
1315                 } else {
1316                         lpni_is_preferred = false;
1317                 }
1318
1319                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1320                 lpni_sel_prio = lpni->lpni_sel_priority;
1321
1322                 if (best_lpni)
1323                         CDEBUG(D_NET, "n:[%s, %s] h:[%d, %d] p:[%d, %d] c:[%d, %d] s:[%d, %d]\n",
1324                                 libcfs_nidstr(&lpni->lpni_nid),
1325                                 libcfs_nidstr(&best_lpni->lpni_nid),
1326                                 lpni_healthv, best_lpni_healthv,
1327                                 lpni_sel_prio, best_sel_prio,
1328                                 lpni->lpni_txcredits, best_lpni_credits,
1329                                 lpni->lpni_seq, best_lpni->lpni_seq);
1330                 else
1331                         goto select_lpni;
1332
1333                 /* pick the healthiest peer ni */
1334                 if (lpni_healthv < best_lpni_healthv)
1335                         continue;
1336                 else if (lpni_healthv > best_lpni_healthv) {
1337                         if (best_lpni_is_preferred)
1338                                 best_lpni_is_preferred = false;
1339                         goto select_lpni;
1340                 }
1341
1342                 if (lpni_sel_prio > best_sel_prio)
1343                         continue;
1344                 else if (lpni_sel_prio < best_sel_prio) {
1345                         if (best_lpni_is_preferred)
1346                                 best_lpni_is_preferred = false;
1347                         goto select_lpni;
1348                 }
1349
1350                 /* if this is a preferred peer use it */
1351                 if (!best_lpni_is_preferred && lpni_is_preferred) {
1352                         best_lpni_is_preferred = true;
1353                         goto select_lpni;
1354                 } else if (best_lpni_is_preferred && !lpni_is_preferred) {
1355                         /* this is not the preferred peer so let's ignore
1356                          * it.
1357                          */
1358                         continue;
1359                 }
1360
1361                 if (lpni->lpni_txcredits < best_lpni_credits)
1362                         /* We already have a peer that has more credits
1363                          * available than this one. No need to consider
1364                          * this peer further.
1365                          */
1366                         continue;
1367                 else if (lpni->lpni_txcredits > best_lpni_credits)
1368                         goto select_lpni;
1369
1370                 /* The best peer found so far and the current peer
1371                  * have the same number of available credits let's
1372                  * make sure to select between them using Round Robin
1373                  */
1374                 if (best_lpni && (best_lpni->lpni_seq <= lpni->lpni_seq))
1375                         continue;
1376 select_lpni:
1377                 best_lpni_is_preferred = lpni_is_preferred;
1378                 best_lpni_healthv = lpni_healthv;
1379                 best_sel_prio = lpni_sel_prio;
1380                 best_lpni = lpni;
1381                 best_lpni_credits = lpni->lpni_txcredits;
1382         }
1383
1384         /* if we still can't find a peer ni then we can't reach it */
1385         if (!best_lpni) {
1386                 __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1387                         LNET_NIDNET(dst_nid);
1388                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1389                                 libcfs_net2str(net_id));
1390                 return NULL;
1391         }
1392
1393         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1394                libcfs_nidstr(&best_lpni->lpni_nid));
1395
1396         return best_lpni;
1397 }
1398
1399 /*
1400  * Prerequisite: the best_ni should already be set in the sd
1401  * Find the best lpni.
1402  * If the net id is provided then restrict lpni selection on
1403  * that particular net.
1404  * Otherwise find any reachable lpni. When dealing with an MR
1405  * gateway and it has multiple lpnis which we can use
1406  * we want to select the best one from the list of reachable
1407  * ones.
1408  */
1409 static inline struct lnet_peer_ni *
1410 lnet_find_best_lpni(struct lnet_ni *lni, lnet_nid_t dst_nid,
1411                     struct lnet_peer *peer, __u32 net_id)
1412 {
1413         struct lnet_peer_net *peer_net;
1414
1415         /* find the best_lpni on any local network */
1416         if (net_id == LNET_NET_ANY) {
1417                 struct lnet_peer_ni *best_lpni = NULL;
1418                 struct lnet_peer_net *lpn;
1419                 list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
1420                         /* no net specified find any reachable peer ni */
1421                         if (!lnet_islocalnet_locked(lpn->lpn_net_id))
1422                                 continue;
1423                         best_lpni = lnet_select_peer_ni(lni, dst_nid, peer,
1424                                                         best_lpni, lpn);
1425                 }
1426
1427                 return best_lpni;
1428         }
1429         /* restrict on the specified net */
1430         peer_net = lnet_peer_get_net_locked(peer, net_id);
1431         if (peer_net)
1432                 return lnet_select_peer_ni(lni, dst_nid, peer, NULL, peer_net);
1433
1434         return NULL;
1435 }
1436
1437 static int
1438 lnet_compare_gw_lpnis(struct lnet_peer_ni *lpni1, struct lnet_peer_ni *lpni2)
1439 {
1440         if (lpni1->lpni_txqnob < lpni2->lpni_txqnob)
1441                 return 1;
1442
1443         if (lpni1->lpni_txqnob > lpni2->lpni_txqnob)
1444                 return -1;
1445
1446         if (lpni1->lpni_txcredits > lpni2->lpni_txcredits)
1447                 return 1;
1448
1449         if (lpni1->lpni_txcredits < lpni2->lpni_txcredits)
1450                 return -1;
1451
1452         return 0;
1453 }
1454
1455 /* Compare route priorities and hop counts */
1456 static int
1457 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
1458 {
1459         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1460         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1461
1462         if (r1->lr_priority < r2->lr_priority)
1463                 return 1;
1464
1465         if (r1->lr_priority > r2->lr_priority)
1466                 return -1;
1467
1468         if (r1_hops < r2_hops)
1469                 return 1;
1470
1471         if (r1_hops > r2_hops)
1472                 return -1;
1473
1474         return 0;
1475 }
1476
1477 static struct lnet_route *
1478 lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net,
1479                        struct lnet_peer_ni *remote_lpni,
1480                        struct lnet_route **prev_route,
1481                        struct lnet_peer_ni **gwni)
1482 {
1483         struct lnet_peer_ni *lpni, *best_gw_ni = NULL;
1484         struct lnet_route *best_route;
1485         struct lnet_route *last_route;
1486         struct lnet_route *route;
1487         int rc;
1488         bool best_rte_is_preferred = false;
1489         struct lnet_nid *gw_pnid;
1490
1491         CDEBUG(D_NET, "Looking up a route to %s, from %s\n",
1492                libcfs_net2str(rnet->lrn_net), libcfs_net2str(src_net));
1493
1494         best_route = last_route = NULL;
1495         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1496                 if (!lnet_is_route_alive(route))
1497                         continue;
1498                 gw_pnid = &route->lr_gateway->lp_primary_nid;
1499
1500                 /* no protection on below fields, but it's harmless */
1501                 if (last_route && (last_route->lr_seq - route->lr_seq < 0))
1502                         last_route = route;
1503
1504                 /* if the best route found is in the preferred list then
1505                  * tag it as preferred and use it later on. But if we
1506                  * didn't find any routes which are on the preferred list
1507                  * then just use the best route possible.
1508                  */
1509                 rc = lnet_peer_is_pref_rtr_locked(remote_lpni, gw_pnid);
1510
1511                 if (!best_route || (rc && !best_rte_is_preferred)) {
1512                         /* Restrict the selection of the router NI on the
1513                          * src_net provided. If the src_net is LNET_NID_ANY,
1514                          * then select the best interface available.
1515                          */
1516                         lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1517                                                    route->lr_gateway,
1518                                                    src_net);
1519                         if (!lpni) {
1520                                 CDEBUG(D_NET,
1521                                        "Gateway %s does not have a peer NI on net %s\n",
1522                                        libcfs_nidstr(gw_pnid),
1523                                        libcfs_net2str(src_net));
1524                                 continue;
1525                         }
1526                 }
1527
1528                 if (rc && !best_rte_is_preferred) {
1529                         /* This is the first preferred route we found,
1530                          * so it beats any route found previously
1531                          */
1532                         best_route = route;
1533                         if (!last_route)
1534                                 last_route = route;
1535                         best_gw_ni = lpni;
1536                         best_rte_is_preferred = true;
1537                         CDEBUG(D_NET, "preferred gw = %s\n",
1538                                libcfs_nidstr(gw_pnid));
1539                         continue;
1540                 } else if ((!rc) && best_rte_is_preferred)
1541                         /* The best route we found so far is in the preferred
1542                          * list, so it beats any non-preferred route
1543                          */
1544                         continue;
1545
1546                 if (!best_route) {
1547                         best_route = last_route = route;
1548                         best_gw_ni = lpni;
1549                         continue;
1550                 }
1551
1552                 rc = lnet_compare_routes(route, best_route);
1553                 if (rc == -1)
1554                         continue;
1555
1556                 /* Restrict the selection of the router NI on the
1557                  * src_net provided. If the src_net is LNET_NID_ANY,
1558                  * then select the best interface available.
1559                  */
1560                 lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1561                                            route->lr_gateway,
1562                                            src_net);
1563                 if (!lpni) {
1564                         CDEBUG(D_NET,
1565                                "Gateway %s does not have a peer NI on net %s\n",
1566                                libcfs_nidstr(gw_pnid),
1567                                libcfs_net2str(src_net));
1568                         continue;
1569                 }
1570
1571                 if (rc == 1) {
1572                         best_route = route;
1573                         best_gw_ni = lpni;
1574                         continue;
1575                 }
1576
1577                 rc = lnet_compare_gw_lpnis(lpni, best_gw_ni);
1578                 if (rc == -1)
1579                         continue;
1580
1581                 if (rc == 1 || route->lr_seq <= best_route->lr_seq) {
1582                         best_route = route;
1583                         best_gw_ni = lpni;
1584                         continue;
1585                 }
1586         }
1587
1588         *prev_route = last_route;
1589         *gwni = best_gw_ni;
1590
1591         return best_route;
1592 }
1593
1594 static inline unsigned int
1595 lnet_dev_prio_of_md(struct lnet_ni *ni, unsigned int dev_idx)
1596 {
1597         if (dev_idx == UINT_MAX)
1598                 return UINT_MAX;
1599
1600         if (!ni || !ni->ni_net || !ni->ni_net->net_lnd ||
1601             !ni->ni_net->net_lnd->lnd_get_dev_prio)
1602                 return UINT_MAX;
1603
1604         return ni->ni_net->net_lnd->lnd_get_dev_prio(ni, dev_idx);
1605 }
1606
1607 static struct lnet_ni *
1608 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1609                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1610                  struct lnet_msg *msg, int md_cpt)
1611 {
1612         struct lnet_libmd *md = msg->msg_md;
1613         unsigned int offset = msg->msg_offset;
1614         unsigned int shortest_distance;
1615         struct lnet_ni *ni = NULL;
1616         int best_credits;
1617         int best_healthv;
1618         __u32 best_sel_prio;
1619         unsigned int best_dev_prio;
1620         int best_ni_fatal;
1621         unsigned int dev_idx = UINT_MAX;
1622         bool gpu = md ? (md->md_flags & LNET_MD_FLAG_GPU) : false;
1623
1624         if (gpu) {
1625                 struct page *page = lnet_get_first_page(md, offset);
1626
1627                 dev_idx = lnet_get_dev_idx(page);
1628         }
1629
1630         /*
1631          * If there is no peer_ni that we can send to on this network,
1632          * then there is no point in looking for a new best_ni here.
1633         */
1634         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1635                 return best_ni;
1636
1637         if (best_ni == NULL) {
1638                 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1639                 shortest_distance = UINT_MAX;
1640                 best_dev_prio = UINT_MAX;
1641                 best_credits = INT_MIN;
1642                 best_healthv = 0;
1643                 best_ni_fatal = true;
1644         } else {
1645                 best_dev_prio = lnet_dev_prio_of_md(best_ni, dev_idx);
1646                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1647                                                      best_ni->ni_dev_cpt);
1648                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1649                 best_healthv = atomic_read(&best_ni->ni_healthv);
1650                 best_sel_prio = best_ni->ni_sel_priority;
1651                 best_ni_fatal = atomic_read(&best_ni->ni_fatal_error_on);
1652         }
1653
1654         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1655                 unsigned int distance;
1656                 int ni_credits;
1657                 int ni_healthv;
1658                 int ni_fatal;
1659                 __u32 ni_sel_prio;
1660                 unsigned int ni_dev_prio;
1661
1662                 ni_credits = atomic_read(&ni->ni_tx_credits);
1663                 ni_healthv = atomic_read(&ni->ni_healthv);
1664                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1665                 ni_sel_prio = ni->ni_sel_priority;
1666
1667                 /*
1668                  * calculate the distance from the CPT on which
1669                  * the message memory is allocated to the CPT of
1670                  * the NI's physical device
1671                  */
1672                 distance = cfs_cpt_distance(lnet_cpt_table(),
1673                                             md_cpt,
1674                                             ni->ni_dev_cpt);
1675
1676                 ni_dev_prio = lnet_dev_prio_of_md(ni, dev_idx);
1677
1678                 /*
1679                  * All distances smaller than the NUMA range
1680                  * are treated equally.
1681                  */
1682                 if (!gpu && distance < lnet_numa_range)
1683                         distance = lnet_numa_range;
1684
1685                 /*
1686                  * Select on health, selection policy, direct dma prio,
1687                  * shorter distance, available credits, then round-robin.
1688                  */
1689                 if (best_ni)
1690                         CDEBUG(D_NET, "compare ni %s [f:%s, c:%d, d:%d, s:%d, p:%u, g:%u, h:%d] with best_ni %s [f:%s, c:%d, d:%d, s:%d, p:%u, g:%u, h:%d]\n",
1691                                libcfs_nidstr(&ni->ni_nid),
1692                                ni_fatal ? "y" : "n", ni_credits, distance,
1693                                ni->ni_seq, ni_sel_prio, ni_dev_prio, ni_healthv,
1694                                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid)
1695                                : "not selected",
1696                                best_ni_fatal ? "y" : "n", best_credits,
1697                                shortest_distance,
1698                                (best_ni) ? best_ni->ni_seq : 0,
1699                                best_sel_prio, best_dev_prio, best_healthv);
1700                 else
1701                         goto select_ni;
1702
1703                 if (ni_fatal && !best_ni_fatal)
1704                         continue;
1705                 else if (!ni_fatal && best_ni_fatal)
1706                         goto select_ni;
1707
1708                 if (ni_healthv < best_healthv)
1709                         continue;
1710                 else if (ni_healthv > best_healthv)
1711                         goto select_ni;
1712
1713                 if (ni_sel_prio > best_sel_prio)
1714                         continue;
1715                 else if (ni_sel_prio < best_sel_prio)
1716                         goto select_ni;
1717
1718                 if (ni_dev_prio > best_dev_prio)
1719                         continue;
1720                 else if (ni_dev_prio < best_dev_prio)
1721                         goto select_ni;
1722
1723                 if (distance > shortest_distance)
1724                         continue;
1725                 else if (distance < shortest_distance)
1726                         goto select_ni;
1727
1728                 if (ni_credits < best_credits)
1729                         continue;
1730                 else if (ni_credits > best_credits)
1731                         goto select_ni;
1732
1733                 if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1734                         continue;
1735
1736 select_ni:
1737                 best_sel_prio = ni_sel_prio;
1738                 best_dev_prio = ni_dev_prio;
1739                 shortest_distance = distance;
1740                 best_healthv = ni_healthv;
1741                 best_ni = ni;
1742                 best_credits = ni_credits;
1743                 best_ni_fatal = ni_fatal;
1744         }
1745
1746         CDEBUG(D_NET, "selected best_ni %s\n",
1747                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid) : "no selection");
1748
1749         return best_ni;
1750 }
1751
1752 static bool
1753 lnet_reserved_msg(struct lnet_msg *msg)
1754 {
1755         if (msg->msg_type == LNET_MSG_PUT) {
1756                 if (msg->msg_hdr.msg.put.ptl_index == LNET_RESERVED_PORTAL)
1757                         return true;
1758         } else if (msg->msg_type == LNET_MSG_GET) {
1759                 if (msg->msg_hdr.msg.get.ptl_index == LNET_RESERVED_PORTAL)
1760                         return true;
1761         }
1762         return false;
1763 }
1764
1765 /*
1766  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1767  * because such traffic is required to perform discovery. We therefore
1768  * exclude all GET and PUT on that portal. We also exclude all ACK and
1769  * REPLY traffic, but that is because the portal is not tracked in the
1770  * message structure for these message types. We could restrict this
1771  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1772  */
1773 static bool
1774 lnet_msg_discovery(struct lnet_msg *msg)
1775 {
1776         return !(lnet_reserved_msg(msg) || lnet_msg_is_response(msg));
1777 }
1778
1779 #define SRC_SPEC        0x0001
1780 #define SRC_ANY         0x0002
1781 #define LOCAL_DST       0x0004
1782 #define REMOTE_DST      0x0008
1783 #define MR_DST          0x0010
1784 #define NMR_DST         0x0020
1785 #define SND_RESP        0x0040
1786
1787 /* The following to defines are used for return codes */
1788 #define REPEAT_SEND     0x1000
1789 #define PASS_THROUGH    0x2000
1790
1791 /* The different cases lnet_select pathway needs to handle */
1792 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1793 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1794 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1795 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1796 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1797 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1798 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1799 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1800
1801 static int
1802 lnet_handle_lo_send(struct lnet_send_data *sd)
1803 {
1804         struct lnet_msg *msg = sd->sd_msg;
1805         int cpt = sd->sd_cpt;
1806
1807         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1808                 return -ESHUTDOWN;
1809
1810         /* No send credit hassles with LOLND */
1811         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1812         msg->msg_hdr.dest_nid = the_lnet.ln_loni->ni_nid;
1813         if (!msg->msg_routing)
1814                 msg->msg_hdr.src_nid = the_lnet.ln_loni->ni_nid;
1815         msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
1816         lnet_msg_commit(msg, cpt);
1817         msg->msg_txni = the_lnet.ln_loni;
1818
1819         return LNET_CREDIT_OK;
1820 }
1821
1822 static int
1823 lnet_handle_send(struct lnet_send_data *sd)
1824 {
1825         struct lnet_ni *best_ni = sd->sd_best_ni;
1826         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1827         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1828         struct lnet_msg *msg = sd->sd_msg;
1829         int cpt2;
1830         __u32 send_case = sd->sd_send_case;
1831         int rc;
1832         __u32 routing = send_case & REMOTE_DST;
1833         struct lnet_rsp_tracker *rspt;
1834
1835         /* Increment sequence number of the selected peer, peer net,
1836          * local ni and local net so that we pick the next ones
1837          * in Round Robin.
1838          */
1839         best_lpni->lpni_peer_net->lpn_peer->lp_send_seq++;
1840         best_lpni->lpni_peer_net->lpn_seq =
1841                 best_lpni->lpni_peer_net->lpn_peer->lp_send_seq;
1842         best_lpni->lpni_seq = best_lpni->lpni_peer_net->lpn_seq;
1843         the_lnet.ln_net_seq++;
1844         best_ni->ni_net->net_seq = the_lnet.ln_net_seq;
1845         best_ni->ni_seq = best_ni->ni_net->net_seq;
1846
1847         CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n",
1848                libcfs_nidstr(&best_ni->ni_nid),
1849                best_ni->ni_seq, best_ni->ni_net->net_seq,
1850                atomic_read(&best_ni->ni_tx_credits),
1851                best_ni->ni_sel_priority,
1852                libcfs_nidstr(&best_lpni->lpni_nid),
1853                best_lpni->lpni_seq, best_lpni->lpni_peer_net->lpn_seq,
1854                best_lpni->lpni_txcredits,
1855                best_lpni->lpni_sel_priority);
1856
1857         /*
1858          * grab a reference on the peer_ni so it sticks around even if
1859          * we need to drop and relock the lnet_net_lock below.
1860          */
1861         lnet_peer_ni_addref_locked(best_lpni);
1862
1863         /*
1864          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1865          * message. This ensures that we get a CPT that is correct for
1866          * the NI when the NI has been restricted to a subset of all CPTs.
1867          * If the selected CPT differs from the one currently locked, we
1868          * must unlock and relock the lnet_net_lock(), and then check whether
1869          * the configuration has changed. We don't have a hold on the best_ni
1870          * yet, and it may have vanished.
1871          */
1872         cpt2 = lnet_cpt_of_nid_locked(&best_lpni->lpni_nid, best_ni);
1873         if (sd->sd_cpt != cpt2) {
1874                 __u32 seq = lnet_get_dlc_seq_locked();
1875                 lnet_net_unlock(sd->sd_cpt);
1876                 sd->sd_cpt = cpt2;
1877                 lnet_net_lock(sd->sd_cpt);
1878                 if (seq != lnet_get_dlc_seq_locked()) {
1879                         lnet_peer_ni_decref_locked(best_lpni);
1880                         return REPEAT_SEND;
1881                 }
1882         }
1883
1884         /*
1885          * store the best_lpni in the message right away to avoid having
1886          * to do the same operation under different conditions
1887          */
1888         msg->msg_txpeer = best_lpni;
1889         msg->msg_txni = best_ni;
1890
1891         /*
1892          * grab a reference for the best_ni since now it's in use in this
1893          * send. The reference will be dropped in lnet_finalize()
1894          */
1895         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1896
1897         /*
1898          * Always set the target.nid to the best peer picked. Either the
1899          * NID will be one of the peer NIDs selected, or the same NID as
1900          * what was originally set in the target or it will be the NID of
1901          * a router if this message should be routed
1902          */
1903         msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1904
1905         /*
1906          * lnet_msg_commit assigns the correct cpt to the message, which
1907          * is used to decrement the correct refcount on the ni when it's
1908          * time to return the credits
1909          */
1910         lnet_msg_commit(msg, sd->sd_cpt);
1911
1912         /*
1913          * If we are routing the message then we keep the src_nid that was
1914          * set by the originator. If we are not routing then we are the
1915          * originator and set it here.
1916          */
1917         if (!msg->msg_routing)
1918                 msg->msg_hdr.src_nid = msg->msg_txni->ni_nid;
1919
1920         if (routing) {
1921                 msg->msg_target_is_router = 1;
1922                 msg->msg_target.pid = LNET_PID_LUSTRE;
1923                 /*
1924                  * since we're routing we want to ensure that the
1925                  * msg_hdr.dest_nid is set to the final destination. When
1926                  * the router receives this message it knows how to route
1927                  * it.
1928                  *
1929                  * final_dst_lpni is set at the beginning of the
1930                  * lnet_select_pathway() function and is never changed.
1931                  * It's safe to use it here.
1932                  */
1933                 final_dst_lpni->lpni_peer_net->lpn_peer->lp_send_seq++;
1934                 final_dst_lpni->lpni_peer_net->lpn_seq =
1935                         final_dst_lpni->lpni_peer_net->lpn_peer->lp_send_seq;
1936                 final_dst_lpni->lpni_seq =
1937                         final_dst_lpni->lpni_peer_net->lpn_seq;
1938                 msg->msg_hdr.dest_nid = final_dst_lpni->lpni_nid;
1939         } else {
1940                 /*
1941                  * if we're not routing set the dest_nid to the best peer
1942                  * ni NID that we picked earlier in the algorithm.
1943                  */
1944                 msg->msg_hdr.dest_nid = msg->msg_txpeer->lpni_nid;
1945         }
1946
1947         /*
1948          * if we have response tracker block update it with the next hop
1949          * nid
1950          */
1951         if (msg->msg_md) {
1952                 rspt = msg->msg_md->md_rspt_ptr;
1953                 if (rspt) {
1954                         rspt->rspt_next_hop_nid =
1955                                 msg->msg_txpeer->lpni_nid;
1956                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1957                                libcfs_nidstr(&rspt->rspt_next_hop_nid));
1958                 }
1959         }
1960
1961         rc = lnet_post_send_locked(msg, 0);
1962
1963         if (!rc)
1964                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
1965                        libcfs_nidstr(&msg->msg_hdr.src_nid),
1966                        libcfs_nidstr(&msg->msg_txni->ni_nid),
1967                        libcfs_nidstr(&sd->sd_src_nid),
1968                        libcfs_nidstr(&msg->msg_hdr.dest_nid),
1969                        libcfs_nidstr(&sd->sd_dst_nid),
1970                        libcfs_nidstr(&msg->msg_txpeer->lpni_nid),
1971                        libcfs_nidstr(&sd->sd_rtr_nid),
1972                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
1973
1974         return rc;
1975 }
1976
1977 static inline void
1978 lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni,
1979                          struct lnet_msg *msg)
1980 {
1981         if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) &&
1982             !lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) {
1983                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
1984                        libcfs_nidstr(&lni->ni_nid),
1985                        libcfs_nidstr(&lpni->lpni_nid));
1986                 lnet_peer_ni_set_non_mr_pref_nid(lpni, &lni->ni_nid);
1987         }
1988 }
1989
1990 /*
1991  * Source Specified
1992  * Local Destination
1993  * non-mr peer
1994  *
1995  * use the source and destination NIDs as the pathway
1996  */
1997 static int
1998 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
1999 {
2000         /* the destination lpni is set before we get here. */
2001
2002         /* find local NI */
2003         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2004         if (!sd->sd_best_ni) {
2005                 CERROR("Can't send to %s: src %s is not a local nid\n",
2006                        libcfs_nidstr(&sd->sd_dst_nid),
2007                        libcfs_nidstr(&sd->sd_src_nid));
2008                 return -EINVAL;
2009         }
2010
2011         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2012
2013         return lnet_handle_send(sd);
2014 }
2015
2016 /*
2017  * Source Specified
2018  * Local Destination
2019  * MR Peer
2020  *
2021  * Don't run the selection algorithm on the peer NIs. By specifying the
2022  * local NID, we're also saying that we should always use the destination NID
2023  * provided. This handles the case where we should be using the same
2024  * destination NID for the all the messages which belong to the same RPC
2025  * request.
2026  */
2027 static int
2028 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
2029 {
2030         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2031         if (!sd->sd_best_ni) {
2032                 CERROR("Can't send to %s: src %s is not a local nid\n",
2033                        libcfs_nidstr(&sd->sd_dst_nid),
2034                        libcfs_nidstr(&sd->sd_src_nid));
2035                 return -EINVAL;
2036         }
2037
2038         if (sd->sd_best_lpni &&
2039             nid_same(&sd->sd_best_lpni->lpni_nid,
2040                       &the_lnet.ln_loni->ni_nid))
2041                 return lnet_handle_lo_send(sd);
2042         else if (sd->sd_best_lpni)
2043                 return lnet_handle_send(sd);
2044
2045         CERROR("can't send to %s. no NI on %s\n",
2046                libcfs_nidstr(&sd->sd_dst_nid),
2047                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
2048
2049         return -EHOSTUNREACH;
2050 }
2051
2052 struct lnet_ni *
2053 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
2054                               struct lnet_peer *peer,
2055                               struct lnet_peer_net *peer_net,
2056                               struct lnet_msg *msg,
2057                               int cpt)
2058 {
2059         struct lnet_net *local_net;
2060         struct lnet_ni *best_ni;
2061
2062         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
2063         if (!local_net)
2064                 return NULL;
2065
2066         /*
2067          * Iterate through the NIs in this local Net and select
2068          * the NI to send from. The selection is determined by
2069          * these 3 criterion in the following priority:
2070          *      1. NUMA
2071          *      2. NI available credits
2072          *      3. Round Robin
2073          */
2074         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
2075                                    peer, peer_net, msg, cpt);
2076
2077         return best_ni;
2078 }
2079
2080 static int
2081 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg,
2082                              int cpt)
2083 {
2084         struct lnet_peer *peer;
2085         struct lnet_peer_ni *new_lpni;
2086         int rc;
2087
2088         lnet_peer_ni_addref_locked(lpni);
2089
2090         peer = lpni->lpni_peer_net->lpn_peer;
2091
2092         if (lnet_peer_gw_discovery(peer)) {
2093                 lnet_peer_ni_decref_locked(lpni);
2094                 return 0;
2095         }
2096
2097         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
2098                 lnet_peer_ni_decref_locked(lpni);
2099                 return 0;
2100         }
2101
2102         rc = lnet_discover_peer_locked(lpni, cpt, false);
2103         if (rc) {
2104                 lnet_peer_ni_decref_locked(lpni);
2105                 return rc;
2106         }
2107
2108         new_lpni = lnet_peer_ni_find_locked(&lpni->lpni_nid);
2109         if (!new_lpni) {
2110                 lnet_peer_ni_decref_locked(lpni);
2111                 return -ENOENT;
2112         }
2113
2114         peer = new_lpni->lpni_peer_net->lpn_peer;
2115         spin_lock(&peer->lp_lock);
2116         if (lpni == new_lpni && lnet_peer_is_uptodate_locked(peer)) {
2117                 /* The peer NI did not change and the peer is up to date.
2118                  * Nothing more to do.
2119                  */
2120                 spin_unlock(&peer->lp_lock);
2121                 lnet_peer_ni_decref_locked(lpni);
2122                 lnet_peer_ni_decref_locked(new_lpni);
2123                 return 0;
2124         }
2125         spin_unlock(&peer->lp_lock);
2126
2127         /* Either the peer NI changed during discovery, or the peer isn't up
2128          * to date. In both cases we want to queue the message on the
2129          * (possibly new) peer's pending queue and queue the peer for discovery
2130          */
2131         msg->msg_sending = 0;
2132         msg->msg_txpeer = NULL;
2133         lnet_net_unlock(cpt);
2134         lnet_peer_queue_message(peer, msg);
2135         lnet_net_lock(cpt);
2136
2137         lnet_peer_ni_decref_locked(lpni);
2138         lnet_peer_ni_decref_locked(new_lpni);
2139
2140         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2141                msg, libcfs_nidstr(&peer->lp_primary_nid));
2142
2143         return LNET_DC_WAIT;
2144 }
2145
2146 static int
2147 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2148                              struct lnet_nid *dst_nid,
2149                              struct lnet_peer_ni **gw_lpni,
2150                              struct lnet_peer **gw_peer)
2151 {
2152         int rc;
2153         struct lnet_peer *gw;
2154         struct lnet_peer *lp;
2155         struct lnet_peer_net *lpn;
2156         struct lnet_peer_net *best_lpn = NULL;
2157         struct lnet_remotenet *rnet, *best_rnet = NULL;
2158         struct lnet_route *best_route = NULL;
2159         struct lnet_route *last_route = NULL;
2160         struct lnet_peer_ni *lpni = NULL;
2161         struct lnet_peer_ni *gwni = NULL;
2162         bool route_found = false;
2163         struct lnet_nid *src_nid =
2164                 !LNET_NID_IS_ANY(&sd->sd_src_nid) || !sd->sd_best_ni
2165                 ? &sd->sd_src_nid
2166                 : &sd->sd_best_ni->ni_nid;
2167         int best_lpn_healthv = 0;
2168         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2169
2170         CDEBUG(D_NET, "%s route (%s) from local NI %s to destination %s\n",
2171                LNET_NID_IS_ANY(&sd->sd_rtr_nid) ? "Lookup" : "Specified",
2172                libcfs_nidstr(&sd->sd_rtr_nid), libcfs_nidstr(src_nid),
2173                libcfs_nidstr(&sd->sd_dst_nid));
2174
2175         /* If a router nid was specified then we are replying to a GET or
2176          * sending an ACK. In this case we use the gateway associated with the
2177          * specified router nid.
2178          */
2179         if (!LNET_NID_IS_ANY(&sd->sd_rtr_nid)) {
2180                 gwni = lnet_peer_ni_find_locked(&sd->sd_rtr_nid);
2181                 if (gwni) {
2182                         gw = gwni->lpni_peer_net->lpn_peer;
2183                         lnet_peer_ni_decref_locked(gwni);
2184                         if (gw->lp_rtr_refcount)
2185                                 route_found = true;
2186                 } else {
2187                         CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n",
2188                               libcfs_nidstr(&sd->sd_rtr_nid));
2189                 }
2190         }
2191
2192         if (!route_found) {
2193                 if (sd->sd_msg->msg_routing || !LNET_NID_IS_ANY(src_nid)) {
2194                         /* If I'm routing this message then I need to find the
2195                          * next hop based on the destination NID
2196                          *
2197                          * We also find next hop based on the destination NID
2198                          * if the source NI was specified
2199                          */
2200                         best_rnet = lnet_find_rnet_locked(LNET_NID_NET(&sd->sd_dst_nid));
2201                         if (!best_rnet) {
2202                                 CERROR("Unable to send message from %s to %s - Route table may be misconfigured\n",
2203                                        (src_nid && LNET_NID_IS_ANY(src_nid)) ?
2204                                                 "any local NI" :
2205                                                 libcfs_nidstr(src_nid),
2206                                        libcfs_nidstr(&sd->sd_dst_nid));
2207                                 return -EHOSTUNREACH;
2208                         }
2209                         CDEBUG(D_NET, "best_rnet %s\n",
2210                                libcfs_net2str(best_rnet->lrn_net));
2211                 } else {
2212                         /* we've already looked up the initial lpni using
2213                          * dst_nid
2214                          */
2215                         lpni = sd->sd_best_lpni;
2216                         /* the peer tree must be in existence */
2217                         LASSERT(lpni && lpni->lpni_peer_net &&
2218                                 lpni->lpni_peer_net->lpn_peer);
2219                         lp = lpni->lpni_peer_net->lpn_peer;
2220
2221                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2222                                 /* is this remote network reachable?  */
2223                                 rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2224                                 if (!rnet)
2225                                         continue;
2226
2227                                 if (!best_lpn)
2228                                         goto use_lpn;
2229                                 else
2230                                         CDEBUG(D_NET, "n[%s, %s] h[%d, %d], p[%u, %u], s[%d, %d]\n",
2231                                                libcfs_net2str(lpn->lpn_net_id),
2232                                                libcfs_net2str(best_lpn->lpn_net_id),
2233                                                lpn->lpn_healthv,
2234                                                best_lpn->lpn_healthv,
2235                                                lpn->lpn_sel_priority,
2236                                                best_lpn->lpn_sel_priority,
2237                                                lpn->lpn_seq,
2238                                                best_lpn->lpn_seq);
2239
2240                                 /* select the preferred peer net */
2241                                 if (best_lpn_healthv > lpn->lpn_healthv)
2242                                         continue;
2243                                 else if (best_lpn_healthv < lpn->lpn_healthv)
2244                                         goto use_lpn;
2245
2246                                 if (best_lpn_sel_prio < lpn->lpn_sel_priority)
2247                                         continue;
2248                                 else if (best_lpn_sel_prio > lpn->lpn_sel_priority)
2249                                         goto use_lpn;
2250
2251                                 if (best_lpn->lpn_seq <= lpn->lpn_seq)
2252                                         continue;
2253 use_lpn:
2254                                 best_lpn_healthv = lpn->lpn_healthv;
2255                                 best_lpn_sel_prio = lpn->lpn_sel_priority;
2256                                 best_lpn = lpn;
2257                                 best_rnet = rnet;
2258                         }
2259
2260                         if (!best_lpn) {
2261                                 CERROR("peer %s has no available nets\n",
2262                                        libcfs_nidstr(&sd->sd_dst_nid));
2263                                 return -EHOSTUNREACH;
2264                         }
2265
2266                         CDEBUG(D_NET, "selected best_lpn %s\n",
2267                                libcfs_net2str(best_lpn->lpn_net_id));
2268
2269                         sd->sd_best_lpni = lnet_find_best_lpni(sd->sd_best_ni,
2270                                                                lnet_nid_to_nid4(&sd->sd_dst_nid),
2271                                                                lp,
2272                                                                best_lpn->lpn_net_id);
2273                         if (!sd->sd_best_lpni) {
2274                                 CERROR("peer %s is unreachable\n",
2275                                        libcfs_nidstr(&sd->sd_dst_nid));
2276                                 return -EHOSTUNREACH;
2277                         }
2278
2279                         /* We're attempting to round robin over the remote peer
2280                          * NI's so update the final destination we selected
2281                          */
2282                         sd->sd_final_dst_lpni = sd->sd_best_lpni;
2283                 }
2284
2285                 /*
2286                  * find the best route. Restrict the selection on the net of the
2287                  * local NI if we've already picked the local NI to send from.
2288                  * Otherwise, let's pick any route we can find and then find
2289                  * a local NI we can reach the route's gateway on. Any route we
2290                  * select will be reachable by virtue of the restriction we have
2291                  * when adding a route.
2292                  */
2293                 best_route = lnet_find_route_locked(best_rnet,
2294                                                     LNET_NID_NET(src_nid),
2295                                                     sd->sd_best_lpni,
2296                                                     &last_route, &gwni);
2297
2298                 if (!best_route) {
2299                         CERROR("no route to %s from %s\n",
2300                                libcfs_nidstr(dst_nid),
2301                                libcfs_nidstr(src_nid));
2302                         return -EHOSTUNREACH;
2303                 }
2304
2305                 if (!gwni) {
2306                         CERROR("Internal Error. Route expected to %s from %s\n",
2307                                libcfs_nidstr(dst_nid),
2308                                libcfs_nidstr(src_nid));
2309                         return -EFAULT;
2310                 }
2311
2312                 gw = best_route->lr_gateway;
2313                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2314         }
2315
2316         /*
2317          * If the router checker is not active then discover the gateway here.
2318          * This ensures we are able to take advantage of multi-rail routing, but
2319          * if the router checker is active then we do not unecessarily delay
2320          * messages while the gateway is being checked by the dedicated monitor
2321          * thread.
2322          *
2323          * NB: We're only checking the alive_router_check_interval here, rather
2324          * than calling lnet_router_checker_active(), because the other
2325          * conditions that are checked by that function are either
2326          * irrelevant (the_lnet.ln_routing) or must be true (list of routers
2327          * is not empty)
2328          */
2329         if (alive_router_check_interval <= 0) {
2330                 rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt);
2331                 if (rc)
2332                         return rc;
2333         }
2334
2335         if (!sd->sd_best_ni) {
2336                 lpn = gwni->lpni_peer_net;
2337                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw, lpn,
2338                                                                sd->sd_msg,
2339                                                                sd->sd_md_cpt);
2340                 if (!sd->sd_best_ni) {
2341                         CERROR("Internal Error. Expected local ni on %s but non found: %s\n",
2342                                libcfs_net2str(lpn->lpn_net_id),
2343                                libcfs_nidstr(&sd->sd_src_nid));
2344                         return -EFAULT;
2345                 }
2346         }
2347
2348         *gw_lpni = gwni;
2349         *gw_peer = gw;
2350
2351         /*
2352          * increment the sequence number since now we're sure we're
2353          * going to use this route
2354          */
2355         if (LNET_NID_IS_ANY(&sd->sd_rtr_nid)) {
2356                 LASSERT(best_route && last_route);
2357                 best_route->lr_seq = last_route->lr_seq + 1;
2358         }
2359
2360         return 0;
2361 }
2362
2363 /*
2364  * Handle two cases:
2365  *
2366  * Case 1:
2367  *  Source specified
2368  *  Remote destination
2369  *  Non-MR destination
2370  *
2371  * Case 2:
2372  *  Source specified
2373  *  Remote destination
2374  *  MR destination
2375  *
2376  * The handling of these two cases is similar. Even though the destination
2377  * can be MR or non-MR, we'll deal directly with the router.
2378  */
2379 static int
2380 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2381 {
2382         int rc;
2383         struct lnet_peer_ni *gw_lpni = NULL;
2384         struct lnet_peer *gw_peer = NULL;
2385
2386         /* find local NI */
2387         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2388         if (!sd->sd_best_ni) {
2389                 CERROR("Can't send to %s: src %s is not a local nid\n",
2390                        libcfs_nidstr(&sd->sd_dst_nid),
2391                        libcfs_nidstr(&sd->sd_src_nid));
2392                 return -EINVAL;
2393         }
2394
2395         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid,
2396                                           &gw_lpni, &gw_peer);
2397         if (rc)
2398                 return rc;
2399
2400         if (sd->sd_send_case & NMR_DST)
2401                 /*
2402                  * since the final destination is non-MR let's set its preferred
2403                  * NID before we send
2404                  */
2405                 lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni,
2406                                          sd->sd_msg);
2407
2408         /*
2409          * We're going to send to the gw found so let's set its
2410          * info
2411          */
2412         sd->sd_peer = gw_peer;
2413         sd->sd_best_lpni = gw_lpni;
2414
2415         return lnet_handle_send(sd);
2416 }
2417
2418 struct lnet_ni *
2419 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2420                                struct lnet_msg *msg, bool discovery)
2421 {
2422         struct lnet_peer_net *lpn = NULL;
2423         struct lnet_peer_net *best_lpn = NULL;
2424         struct lnet_net *net = NULL;
2425         struct lnet_net *best_net = NULL;
2426         struct lnet_ni *best_ni = NULL;
2427         int best_lpn_healthv = 0;
2428         int best_net_healthv = 0;
2429         int net_healthv;
2430         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2431         __u32 lpn_sel_prio;
2432         __u32 best_net_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2433         __u32 net_sel_prio;
2434
2435         /* if this is a discovery message and lp_disc_net_id is
2436          * specified then use that net to send the discovery on.
2437          */
2438         if (discovery && peer->lp_disc_net_id) {
2439                 best_lpn = lnet_peer_get_net_locked(peer, peer->lp_disc_net_id);
2440                 if (best_lpn && lnet_get_net_locked(best_lpn->lpn_net_id))
2441                         goto select_best_ni;
2442         }
2443
2444         /*
2445          * The peer can have multiple interfaces, some of them can be on
2446          * the local network and others on a routed network. We should
2447          * prefer the local network. However if the local network is not
2448          * available then we need to try the routed network
2449          */
2450
2451         /* go through all the peer nets and find the best_ni */
2452         list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
2453                 /*
2454                  * The peer's list of nets can contain non-local nets. We
2455                  * want to only examine the local ones.
2456                  */
2457                 net = lnet_get_net_locked(lpn->lpn_net_id);
2458                 if (!net)
2459                         continue;
2460
2461                 lpn_sel_prio = lpn->lpn_sel_priority;
2462                 net_healthv = lnet_get_net_healthv_locked(net);
2463                 net_sel_prio = net->net_sel_priority;
2464
2465                 if (!best_lpn)
2466                         goto select_lpn;
2467                 else
2468                         CDEBUG(D_NET,
2469                                "n[%s, %s] ph[%d, %d], pp[%u, %u], nh[%d, %d], np[%u, %u], ps[%u, %u], ns[%u, %u]\n",
2470                                libcfs_net2str(lpn->lpn_net_id),
2471                                libcfs_net2str(best_lpn->lpn_net_id),
2472                                lpn->lpn_healthv,
2473                                best_lpn_healthv,
2474                                lpn_sel_prio,
2475                                best_lpn_sel_prio,
2476                                net_healthv,
2477                                best_net_healthv,
2478                                net_sel_prio,
2479                                best_net_sel_prio,
2480                                lpn->lpn_seq,
2481                                best_lpn->lpn_seq,
2482                                net->net_seq,
2483                                best_net->net_seq);
2484
2485                 /* always select the lpn with the best health */
2486                 if (best_lpn_healthv > lpn->lpn_healthv)
2487                         continue;
2488                 else if (best_lpn_healthv < lpn->lpn_healthv)
2489                         goto select_lpn;
2490
2491                 /* select the preferred peer and local nets */
2492                 if (best_lpn_sel_prio < lpn_sel_prio)
2493                         continue;
2494                 else if (best_lpn_sel_prio > lpn_sel_prio)
2495                         goto select_lpn;
2496
2497                 if (best_net_healthv > net_healthv)
2498                         continue;
2499                 else if (best_net_healthv < net_healthv)
2500                         goto select_lpn;
2501
2502                 if (best_net_sel_prio < net_sel_prio)
2503                         continue;
2504                 else if (best_net_sel_prio > net_sel_prio)
2505                         goto select_lpn;
2506
2507                 if (best_lpn->lpn_seq < lpn->lpn_seq)
2508                         continue;
2509                 else if (best_lpn->lpn_seq > lpn->lpn_seq)
2510                         goto select_lpn;
2511
2512                 /* round robin over the local networks */
2513                 if (best_net->net_seq <= net->net_seq)
2514                         continue;
2515
2516 select_lpn:
2517                 best_net_healthv = net_healthv;
2518                 best_net_sel_prio = net_sel_prio;
2519                 best_lpn_healthv = lpn->lpn_healthv;
2520                 best_lpn_sel_prio = lpn_sel_prio;
2521                 best_lpn = lpn;
2522                 best_net = net;
2523         }
2524
2525         if (best_lpn) {
2526                 /* Select the best NI on the same net as best_lpn chosen
2527                  * above
2528                  */
2529 select_best_ni:
2530                 CDEBUG(D_NET, "selected best_lpn %s\n",
2531                        libcfs_net2str(best_lpn->lpn_net_id));
2532                 best_ni = lnet_find_best_ni_on_spec_net(NULL, peer, best_lpn,
2533                                                         msg, md_cpt);
2534         }
2535
2536         return best_ni;
2537 }
2538
2539 static struct lnet_ni *
2540 lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt)
2541 {
2542         struct lnet_ni *best_ni = NULL;
2543         struct lnet_peer_net *peer_net = lpni->lpni_peer_net;
2544         struct lnet_peer_ni *lpni_entry;
2545
2546         /*
2547          * We must use a consistent source address when sending to a
2548          * non-MR peer. However, a non-MR peer can have multiple NIDs
2549          * on multiple networks, and we may even need to talk to this
2550          * peer on multiple networks -- certain types of
2551          * load-balancing configuration do this.
2552          *
2553          * So we need to pick the NI the peer prefers for this
2554          * particular network.
2555          */
2556         LASSERT(peer_net);
2557         list_for_each_entry(lpni_entry, &peer_net->lpn_peer_nis,
2558                             lpni_peer_nis) {
2559                 if (lpni_entry->lpni_pref_nnids == 0)
2560                         continue;
2561                 LASSERT(lpni_entry->lpni_pref_nnids == 1);
2562                 best_ni = lnet_nid_to_ni_locked(&lpni_entry->lpni_pref.nid,
2563                                                 cpt);
2564                 break;
2565         }
2566
2567         return best_ni;
2568 }
2569
2570 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2571 static int
2572 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2573 {
2574         struct lnet_ni *best_ni = NULL;
2575
2576         /*
2577          * We must use a consistent source address when sending to a
2578          * non-MR peer. However, a non-MR peer can have multiple NIDs
2579          * on multiple networks, and we may even need to talk to this
2580          * peer on multiple networks -- certain types of
2581          * load-balancing configuration do this.
2582          *
2583          * So we need to pick the NI the peer prefers for this
2584          * particular network.
2585          *
2586          * An exception is traffic on LNET_RESERVED_PORTAL. Internal LNet
2587          * traffic doesn't care which source NI is used, and we don't actually
2588          * want to restrict local recovery pings to a single source NI.
2589          */
2590         if (!lnet_reserved_msg(sd->sd_msg))
2591                 best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2592                                                                sd->sd_cpt);
2593
2594         if (!best_ni)
2595                 best_ni = lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2596                                                 sd->sd_best_lpni->lpni_peer_net,
2597                                                 sd->sd_msg,
2598                                                 sd->sd_md_cpt);
2599
2600         /* If there is no best_ni we don't have a route */
2601         if (!best_ni) {
2602                 CERROR("no path to %s from net %s\n",
2603                         libcfs_nidstr(&sd->sd_best_lpni->lpni_nid),
2604                         libcfs_net2str(sd->sd_best_lpni->lpni_net->net_id));
2605                 return -EHOSTUNREACH;
2606         }
2607
2608         sd->sd_best_ni = best_ni;
2609
2610         /* Set preferred NI if necessary. */
2611         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2612
2613         return 0;
2614 }
2615
2616
2617 /*
2618  * Source not specified
2619  * Local destination
2620  * Non-MR Peer
2621  *
2622  * always use the same source NID for NMR peers
2623  * If we've talked to that peer before then we already have a preferred
2624  * source NI associated with it. Otherwise, we select a preferred local NI
2625  * and store it in the peer
2626  */
2627 static int
2628 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2629 {
2630         int rc = 0;
2631
2632         /* sd->sd_best_lpni is already set to the final destination */
2633
2634         /*
2635          * At this point we should've created the peer ni and peer. If we
2636          * can't find it, then something went wrong. Instead of assert
2637          * output a relevant message and fail the send
2638          */
2639         if (!sd->sd_best_lpni) {
2640                 CERROR("Internal fault. Unable to send msg %s to %s. NID not known\n",
2641                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2642                        libcfs_nidstr(&sd->sd_dst_nid));
2643                 return -EFAULT;
2644         }
2645
2646         if (sd->sd_msg->msg_routing) {
2647                 /* If I'm forwarding this message then I can choose any NI
2648                  * on the destination peer net
2649                  */
2650                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL,
2651                                                                sd->sd_peer,
2652                                                                sd->sd_best_lpni->lpni_peer_net,
2653                                                                sd->sd_msg,
2654                                                                sd->sd_md_cpt);
2655                 if (!sd->sd_best_ni) {
2656                         CERROR("Unable to forward message to %s. No local NI available\n",
2657                                libcfs_nidstr(&sd->sd_dst_nid));
2658                         rc = -EHOSTUNREACH;
2659                 }
2660         } else
2661                 rc = lnet_select_preferred_best_ni(sd);
2662
2663         if (!rc)
2664                 rc = lnet_handle_send(sd);
2665
2666         return rc;
2667 }
2668
2669 static int
2670 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2671 {
2672         /*
2673          * NOTE we've already handled the remote peer case. So we only
2674          * need to worry about the local case here.
2675          *
2676          * if we're sending a response, ACK or reply, we need to send it
2677          * to the destination NID given to us. At this point we already
2678          * have the peer_ni we're suppose to send to, so just find the
2679          * best_ni on the peer net and use that. Since we're sending to an
2680          * MR peer then we can just run the selection algorithm on our
2681          * local NIs and pick the best one.
2682          */
2683         if (sd->sd_send_case & SND_RESP) {
2684                 sd->sd_best_ni =
2685                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2686                                                 sd->sd_best_lpni->lpni_peer_net,
2687                                                 sd->sd_msg,
2688                                                 sd->sd_md_cpt);
2689
2690                 if (!sd->sd_best_ni) {
2691                         /*
2692                          * We're not going to deal with not able to send
2693                          * a response to the provided final destination
2694                          */
2695                         CERROR("Can't send response to %s. No local NI available\n",
2696                                 libcfs_nidstr(&sd->sd_dst_nid));
2697                         return -EHOSTUNREACH;
2698                 }
2699
2700                 return lnet_handle_send(sd);
2701         }
2702
2703         /*
2704          * If we get here that means we're sending a fresh request, PUT or
2705          * GET, so we need to run our standard selection algorithm.
2706          * First find the best local interface that's on any of the peer's
2707          * networks.
2708          */
2709         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2710                                         sd->sd_md_cpt,
2711                                         sd->sd_msg,
2712                                         lnet_msg_discovery(sd->sd_msg));
2713         if (sd->sd_best_ni) {
2714                 sd->sd_best_lpni =
2715                   lnet_find_best_lpni(sd->sd_best_ni,
2716                                              lnet_nid_to_nid4(&sd->sd_dst_nid),
2717                                       sd->sd_peer,
2718                                       sd->sd_best_ni->ni_net->net_id);
2719
2720                 /*
2721                  * if we're successful in selecting a peer_ni on the local
2722                  * network, then send to it. Otherwise fall through and
2723                  * try and see if we can reach it over another routed
2724                  * network
2725                  */
2726                 if (sd->sd_best_lpni &&
2727                     nid_same(&sd->sd_best_lpni->lpni_nid,
2728                              &the_lnet.ln_loni->ni_nid)) {
2729                         /*
2730                          * in case we initially started with a routed
2731                          * destination, let's reset to local
2732                          */
2733                         sd->sd_send_case &= ~REMOTE_DST;
2734                         sd->sd_send_case |= LOCAL_DST;
2735                         return lnet_handle_lo_send(sd);
2736                 } else if (sd->sd_best_lpni) {
2737                         /*
2738                          * in case we initially started with a routed
2739                          * destination, let's reset to local
2740                          */
2741                         sd->sd_send_case &= ~REMOTE_DST;
2742                         sd->sd_send_case |= LOCAL_DST;
2743                         return lnet_handle_send(sd);
2744                 }
2745
2746                 CERROR("Internal Error. Expected to have a best_lpni: "
2747                        "%s -> %s\n",
2748                        libcfs_nidstr(&sd->sd_src_nid),
2749                        libcfs_nidstr(&sd->sd_dst_nid));
2750
2751                 return -EFAULT;
2752         }
2753
2754         /*
2755          * Peer doesn't have a local network. Let's see if there is
2756          * a remote network we can reach it on.
2757          */
2758         return PASS_THROUGH;
2759 }
2760
2761 /*
2762  * Case 1:
2763  *      Source NID not specified
2764  *      Local destination
2765  *      MR peer
2766  *
2767  * Case 2:
2768  *      Source NID not speified
2769  *      Remote destination
2770  *      MR peer
2771  *
2772  * In both of these cases if we're sending a response, ACK or REPLY, then
2773  * we need to send to the destination NID provided.
2774  *
2775  * In the remote case let's deal with MR routers.
2776  *
2777  */
2778
2779 static int
2780 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2781 {
2782         int rc = 0;
2783         struct lnet_peer *gw_peer = NULL;
2784         struct lnet_peer_ni *gw_lpni = NULL;
2785
2786         /*
2787          * handle sending a response to a remote peer here so we don't
2788          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2789          */
2790         if (sd->sd_send_case & REMOTE_DST &&
2791             sd->sd_send_case & SND_RESP) {
2792                 struct lnet_peer_ni *gw;
2793                 struct lnet_peer *gw_peer;
2794
2795                 rc = lnet_handle_find_routed_path(
2796                         sd, &sd->sd_dst_nid, &gw, &gw_peer);
2797                 if (rc < 0) {
2798                         CERROR("Can't send response to %s. No route available\n",
2799                                libcfs_nidstr(&sd->sd_dst_nid));
2800                         return -EHOSTUNREACH;
2801                 } else if (rc > 0) {
2802                         return rc;
2803                 }
2804
2805                 sd->sd_best_lpni = gw;
2806                 sd->sd_peer = gw_peer;
2807
2808                 return lnet_handle_send(sd);
2809         }
2810
2811         /*
2812          * Even though the NID for the peer might not be on a local network,
2813          * since the peer is MR there could be other interfaces on the
2814          * local network. In that case we'd still like to prefer the local
2815          * network over the routed network. If we're unable to do that
2816          * then we select the best router among the different routed networks,
2817          * and if the router is MR then we can deal with it as such.
2818          */
2819         rc = lnet_handle_any_mr_dsta(sd);
2820         if (rc != PASS_THROUGH)
2821                 return rc;
2822
2823         /*
2824          * Now that we must route to the destination, we must consider the
2825          * MR case, where the destination has multiple interfaces, some of
2826          * which we can route to and others we do not. For this reason we
2827          * need to select the destination which we can route to and if
2828          * there are multiple, we need to round robin.
2829          */
2830         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid,
2831                                           &gw_lpni, &gw_peer);
2832         if (rc)
2833                 return rc;
2834
2835         sd->sd_send_case &= ~LOCAL_DST;
2836         sd->sd_send_case |= REMOTE_DST;
2837
2838         sd->sd_peer = gw_peer;
2839         sd->sd_best_lpni = gw_lpni;
2840
2841         return lnet_handle_send(sd);
2842 }
2843
2844 /*
2845  * Source not specified
2846  * Remote destination
2847  * Non-MR peer
2848  *
2849  * Must send to the specified peer NID using the same source NID that
2850  * we've used before. If it's the first time to talk to that peer then
2851  * find the source NI and assign it as preferred to that peer
2852  */
2853 static int
2854 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2855 {
2856         int rc;
2857         struct lnet_peer_ni *gw_lpni = NULL;
2858         struct lnet_peer *gw_peer = NULL;
2859
2860         /*
2861          * Let's see if we have a preferred NI to talk to this NMR peer
2862          */
2863         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2864                                                               sd->sd_cpt);
2865
2866         /*
2867          * find the router and that'll find the best NI if we didn't find
2868          * it already.
2869          */
2870         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid, &gw_lpni,
2871                                           &gw_peer);
2872         if (rc)
2873                 return rc;
2874
2875         /*
2876          * set the best_ni we've chosen as the preferred one for
2877          * this peer
2878          */
2879         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2880
2881         /* we'll be sending to the gw */
2882         sd->sd_best_lpni = gw_lpni;
2883         sd->sd_peer = gw_peer;
2884
2885         return lnet_handle_send(sd);
2886 }
2887
2888 static int
2889 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2890 {
2891         /*
2892          * turn off the SND_RESP bit.
2893          * It will be checked in the case handling
2894          */
2895         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2896
2897         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2898                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2899                 (send_case & SRC_SPEC) ? libcfs_nidstr(&sd->sd_src_nid) : "",
2900                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2901                 libcfs_nidstr(&sd->sd_dst_nid),
2902                 (send_case & LOCAL_DST) ? "local" : "routed");
2903
2904         switch (send_case) {
2905         /*
2906          * For all cases where the source is specified, we should always
2907          * use the destination NID, whether it's an MR destination or not,
2908          * since we're continuing a series of related messages for the
2909          * same RPC
2910          */
2911         case SRC_SPEC_LOCAL_NMR_DST:
2912                 return lnet_handle_spec_local_nmr_dst(sd);
2913         case SRC_SPEC_LOCAL_MR_DST:
2914                 return lnet_handle_spec_local_mr_dst(sd);
2915         case SRC_SPEC_ROUTER_NMR_DST:
2916         case SRC_SPEC_ROUTER_MR_DST:
2917                 return lnet_handle_spec_router_dst(sd);
2918         case SRC_ANY_LOCAL_NMR_DST:
2919                 return lnet_handle_any_local_nmr_dst(sd);
2920         case SRC_ANY_LOCAL_MR_DST:
2921         case SRC_ANY_ROUTER_MR_DST:
2922                 return lnet_handle_any_mr_dst(sd);
2923         case SRC_ANY_ROUTER_NMR_DST:
2924                 return lnet_handle_any_router_nmr_dst(sd);
2925         default:
2926                 CERROR("Unknown send case\n");
2927                 return -1;
2928         }
2929 }
2930
2931 static int
2932 lnet_select_pathway(struct lnet_nid *src_nid,
2933                     struct lnet_nid *dst_nid,
2934                     struct lnet_msg *msg,
2935                     struct lnet_nid *rtr_nid)
2936 {
2937         struct lnet_peer_ni *lpni;
2938         struct lnet_peer *peer;
2939         struct lnet_send_data send_data;
2940         int cpt, rc;
2941         int md_cpt;
2942         __u32 send_case = 0;
2943         bool final_hop;
2944         bool mr_forwarding_allowed;
2945
2946         memset(&send_data, 0, sizeof(send_data));
2947
2948         /*
2949          * get an initial CPT to use for locking. The idea here is not to
2950          * serialize the calls to select_pathway, so that as many
2951          * operations can run concurrently as possible. To do that we use
2952          * the CPT where this call is being executed. Later on when we
2953          * determine the CPT to use in lnet_message_commit, we switch the
2954          * lock and check if there was any configuration change.  If none,
2955          * then we proceed, if there is, then we restart the operation.
2956          */
2957         cpt = lnet_net_lock_current();
2958
2959         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
2960         if (md_cpt == CFS_CPT_ANY)
2961                 md_cpt = cpt;
2962
2963 again:
2964
2965         /*
2966          * If we're being asked to send to the loopback interface, there
2967          * is no need to go through any selection. We can just shortcut
2968          * the entire process and send over lolnd
2969          */
2970         send_data.sd_msg = msg;
2971         send_data.sd_cpt = cpt;
2972         if (nid_is_lo0(dst_nid)) {
2973                 rc = lnet_handle_lo_send(&send_data);
2974                 lnet_net_unlock(cpt);
2975                 return rc;
2976         }
2977
2978         /*
2979          * find an existing peer_ni, or create one and mark it as having been
2980          * created due to network traffic. This call will create the
2981          * peer->peer_net->peer_ni tree.
2982          */
2983         lpni = lnet_peerni_by_nid_locked(dst_nid, NULL, cpt);
2984         if (IS_ERR(lpni)) {
2985                 lnet_net_unlock(cpt);
2986                 return PTR_ERR(lpni);
2987         }
2988
2989         /*
2990          * Cache the original src_nid and rtr_nid. If we need to resend the
2991          * message then we'll need to know whether the src_nid was originally
2992          * specified for this message. If it was originally specified,
2993          * then we need to keep using the same src_nid since it's
2994          * continuing the same sequence of messages. Similarly, rtr_nid will
2995          * affect our choice of next hop.
2996          */
2997         if (src_nid)
2998                 msg->msg_src_nid_param = *src_nid;
2999         else
3000                 msg->msg_src_nid_param = LNET_ANY_NID;
3001         if (rtr_nid)
3002                 msg->msg_rtr_nid_param = *rtr_nid;
3003         else
3004                 msg->msg_rtr_nid_param = LNET_ANY_NID;
3005
3006         /*
3007          * If necessary, perform discovery on the peer that owns this peer_ni.
3008          * Note, this can result in the ownership of this peer_ni changing
3009          * to another peer object.
3010          */
3011         rc = lnet_initiate_peer_discovery(lpni, msg, cpt);
3012         if (rc) {
3013                 lnet_peer_ni_decref_locked(lpni);
3014                 lnet_net_unlock(cpt);
3015                 return rc;
3016         }
3017         lnet_peer_ni_decref_locked(lpni);
3018
3019         peer = lpni->lpni_peer_net->lpn_peer;
3020
3021         /*
3022          * Identify the different send cases
3023          */
3024         if (!src_nid || LNET_NID_IS_ANY(src_nid)) {
3025                 send_case |= SRC_ANY;
3026                 if (lnet_get_net_locked(LNET_NID_NET(dst_nid)))
3027                         send_case |= LOCAL_DST;
3028                 else
3029                         send_case |= REMOTE_DST;
3030         } else {
3031                 send_case |= SRC_SPEC;
3032                 if (LNET_NID_NET(src_nid) == LNET_NID_NET(dst_nid))
3033                         send_case |= LOCAL_DST;
3034                 else
3035                         send_case |= REMOTE_DST;
3036         }
3037
3038         final_hop = false;
3039         if (msg->msg_routing && (send_case & LOCAL_DST))
3040                 final_hop = true;
3041
3042         /* Determine whether to allow MR forwarding for this message.
3043          * NB: MR forwarding is allowed if the message originator and the
3044          * destination are both MR capable, and the destination lpni that was
3045          * originally chosen by the originator is unhealthy or down.
3046          * We check the MR capability of the destination further below
3047          */
3048         mr_forwarding_allowed = false;
3049         if (final_hop) {
3050                 struct lnet_peer *src_lp;
3051                 struct lnet_peer_ni *src_lpni;
3052
3053                 src_lpni = lnet_peerni_by_nid_locked(&msg->msg_hdr.src_nid,
3054                                                    NULL, cpt);
3055                 /* We don't fail the send if we hit any errors here. We'll just
3056                  * try to send it via non-multi-rail criteria
3057                  */
3058                 if (!IS_ERR(src_lpni)) {
3059                         /* Drop ref taken by lnet_peerni_by_nid_locked() */
3060                         lnet_peer_ni_decref_locked(src_lpni);
3061                         src_lp = lpni->lpni_peer_net->lpn_peer;
3062                         if (lnet_peer_is_multi_rail(src_lp) &&
3063                             !lnet_is_peer_ni_alive(lpni))
3064                                 mr_forwarding_allowed = true;
3065
3066                 }
3067                 CDEBUG(D_NET, "msg %p MR forwarding %s\n", msg,
3068                        mr_forwarding_allowed ? "allowed" : "not allowed");
3069         }
3070
3071         /*
3072          * Deal with the peer as NMR in the following cases:
3073          * 1. the peer is NMR
3074          * 2. We're trying to recover a specific peer NI
3075          * 3. I'm a router sending to the final destination and MR forwarding is
3076          *    not allowed for this message (as determined above).
3077          *    In this case the source of the message would've
3078          *    already selected the final destination so my job
3079          *    is to honor the selection.
3080          */
3081         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery ||
3082             (final_hop && !mr_forwarding_allowed))
3083                 send_case |= NMR_DST;
3084         else
3085                 send_case |= MR_DST;
3086
3087         if (lnet_msg_is_response(msg))
3088                 send_case |= SND_RESP;
3089
3090         /* assign parameters to the send_data */
3091         if (rtr_nid)
3092                 send_data.sd_rtr_nid = *rtr_nid;
3093         else
3094                 send_data.sd_rtr_nid = LNET_ANY_NID;
3095         if (src_nid)
3096                 send_data.sd_src_nid = *src_nid;
3097         else
3098                 send_data.sd_src_nid = LNET_ANY_NID;
3099         send_data.sd_dst_nid = *dst_nid;
3100         send_data.sd_best_lpni = lpni;
3101         /*
3102          * keep a pointer to the final destination in case we're going to
3103          * route, so we'll need to access it later
3104          */
3105         send_data.sd_final_dst_lpni = lpni;
3106         send_data.sd_peer = peer;
3107         send_data.sd_md_cpt = md_cpt;
3108         send_data.sd_send_case = send_case;
3109
3110         rc = lnet_handle_send_case_locked(&send_data);
3111
3112         /*
3113          * Update the local cpt since send_data.sd_cpt might've been
3114          * updated as a result of calling lnet_handle_send_case_locked().
3115          */
3116         cpt = send_data.sd_cpt;
3117
3118         if (rc == REPEAT_SEND)
3119                 goto again;
3120
3121         lnet_net_unlock(cpt);
3122
3123         return rc;
3124 }
3125
3126 int
3127 lnet_send(struct lnet_nid *src_nid, struct lnet_msg *msg,
3128           struct lnet_nid *rtr_nid)
3129 {
3130         struct lnet_nid *dst_nid = &msg->msg_target.nid;
3131         int rc;
3132
3133         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
3134         LASSERT(msg->msg_txpeer == NULL);
3135         LASSERT(msg->msg_txni == NULL);
3136         LASSERT(!msg->msg_sending);
3137         LASSERT(!msg->msg_target_is_router);
3138         LASSERT(!msg->msg_receiving);
3139
3140         msg->msg_sending = 1;
3141
3142         LASSERT(!msg->msg_tx_committed);
3143
3144         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
3145         if (rc < 0) {
3146                 if (rc == -EHOSTUNREACH)
3147                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
3148                 else
3149                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
3150                 return rc;
3151         }
3152
3153         if (rc == LNET_CREDIT_OK)
3154                 lnet_ni_send(msg->msg_txni, msg);
3155
3156         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
3157         return 0;
3158 }
3159
3160 enum lnet_mt_event_type {
3161         MT_TYPE_LOCAL_NI = 0,
3162         MT_TYPE_PEER_NI
3163 };
3164
3165 struct lnet_mt_event_info {
3166         enum lnet_mt_event_type mt_type;
3167         struct lnet_nid mt_nid;
3168 };
3169
3170 /* called with res_lock held */
3171 void
3172 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
3173 {
3174         struct lnet_rsp_tracker *rspt;
3175
3176         /*
3177          * msg has a refcount on the MD so the MD is not going away.
3178          * The rspt queue for the cpt is protected by
3179          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
3180          */
3181         if (!md->md_rspt_ptr)
3182                 return;
3183
3184         rspt = md->md_rspt_ptr;
3185
3186         /* debug code */
3187         LASSERT(rspt->rspt_cpt == cpt);
3188
3189         md->md_rspt_ptr = NULL;
3190
3191         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3192                 /*
3193                  * The monitor thread has invalidated this handle because the
3194                  * response timed out, but it failed to lookup the MD. That
3195                  * means this response tracker is on the zombie list. We can
3196                  * safely remove it under the resource lock (held by caller) and
3197                  * free the response tracker block.
3198                  */
3199                 list_del(&rspt->rspt_on_list);
3200                 lnet_rspt_free(rspt, cpt);
3201         } else {
3202                 /*
3203                  * invalidate the handle to indicate that a response has been
3204                  * received, which will then lead the monitor thread to clean up
3205                  * the rspt block.
3206                  */
3207                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
3208         }
3209 }
3210
3211 void
3212 lnet_clean_zombie_rstqs(void)
3213 {
3214         struct lnet_rsp_tracker *rspt, *tmp;
3215         int i;
3216
3217         cfs_cpt_for_each(i, lnet_cpt_table()) {
3218                 list_for_each_entry_safe(rspt, tmp,
3219                                          the_lnet.ln_mt_zombie_rstqs[i],
3220                                          rspt_on_list) {
3221                         list_del(&rspt->rspt_on_list);
3222                         lnet_rspt_free(rspt, i);
3223                 }
3224         }
3225
3226         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
3227 }
3228
3229 static void
3230 lnet_finalize_expired_responses(void)
3231 {
3232         struct lnet_libmd *md;
3233         struct lnet_rsp_tracker *rspt, *tmp;
3234         ktime_t now;
3235         int i;
3236
3237         if (the_lnet.ln_mt_rstq == NULL)
3238                 return;
3239
3240         cfs_cpt_for_each(i, lnet_cpt_table()) {
3241                 LIST_HEAD(local_queue);
3242
3243                 lnet_net_lock(i);
3244                 if (!the_lnet.ln_mt_rstq[i]) {
3245                         lnet_net_unlock(i);
3246                         continue;
3247                 }
3248                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
3249                 lnet_net_unlock(i);
3250
3251                 now = ktime_get();
3252
3253                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
3254                         /*
3255                          * The rspt mdh will be invalidated when a response
3256                          * is received or whenever we want to discard the
3257                          * block the monitor thread will walk the queue
3258                          * and clean up any rsts with an invalid mdh.
3259                          * The monitor thread will walk the queue until
3260                          * the first unexpired rspt block. This means that
3261                          * some rspt blocks which received their
3262                          * corresponding responses will linger in the
3263                          * queue until they are cleaned up eventually.
3264                          */
3265                         lnet_res_lock(i);
3266                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3267                                 lnet_res_unlock(i);
3268                                 list_del(&rspt->rspt_on_list);
3269                                 lnet_rspt_free(rspt, i);
3270                                 continue;
3271                         }
3272
3273                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
3274                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
3275                                 struct lnet_peer_ni *lpni;
3276                                 struct lnet_nid nid;
3277
3278                                 md = lnet_handle2md(&rspt->rspt_mdh);
3279                                 if (!md) {
3280                                         /* MD has been queued for unlink, but
3281                                          * rspt hasn't been detached (Note we've
3282                                          * checked above that the rspt_mdh is
3283                                          * valid). Since we cannot lookup the MD
3284                                          * we're unable to detach the rspt
3285                                          * ourselves. Thus, move the rspt to the
3286                                          * zombie list where we'll wait for
3287                                          * either:
3288                                          *   1. The remaining operations on the
3289                                          *   MD to complete. In this case the
3290                                          *   final operation will result in
3291                                          *   lnet_msg_detach_md()->
3292                                          *   lnet_detach_rsp_tracker() where
3293                                          *   we will clean up this response
3294                                          *   tracker.
3295                                          *   2. LNet to shutdown. In this case
3296                                          *   we'll wait until after all LND Nets
3297                                          *   have shutdown and then we can
3298                                          *   safely free any remaining response
3299                                          *   tracker blocks on the zombie list.
3300                                          * Note: We need to hold the resource
3301                                          * lock when adding to the zombie list
3302                                          * because we may have concurrent access
3303                                          * with lnet_detach_rsp_tracker().
3304                                          */
3305                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
3306                                         list_move(&rspt->rspt_on_list,
3307                                                   the_lnet.ln_mt_zombie_rstqs[i]);
3308                                         lnet_res_unlock(i);
3309                                         continue;
3310                                 }
3311                                 LASSERT(md->md_rspt_ptr == rspt);
3312                                 md->md_rspt_ptr = NULL;
3313                                 lnet_res_unlock(i);
3314
3315                                 LNetMDUnlink(rspt->rspt_mdh);
3316
3317                                 nid = rspt->rspt_next_hop_nid;
3318
3319                                 list_del(&rspt->rspt_on_list);
3320                                 lnet_rspt_free(rspt, i);
3321
3322                                 /* If we're shutting down we just want to clean
3323                                  * up the rspt blocks
3324                                  */
3325                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3326                                         continue;
3327
3328                                 lnet_net_lock(i);
3329                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
3330                                 lnet_net_unlock(i);
3331
3332                                 CDEBUG(D_NET,
3333                                        "Response timeout: md = %p: nid = %s\n",
3334                                        md, libcfs_nidstr(&nid));
3335
3336                                 /*
3337                                  * If there is a timeout on the response
3338                                  * from the next hop decrement its health
3339                                  * value so that we don't use it
3340                                  */
3341                                 lnet_net_lock(0);
3342                                 lpni = lnet_peer_ni_find_locked(&nid);
3343                                 if (lpni) {
3344                                         lnet_handle_remote_failure_locked(lpni);
3345                                         lnet_peer_ni_decref_locked(lpni);
3346                                 }
3347                                 lnet_net_unlock(0);
3348                         } else {
3349                                 lnet_res_unlock(i);
3350                                 break;
3351                         }
3352                 }
3353
3354                 if (!list_empty(&local_queue)) {
3355                         lnet_net_lock(i);
3356                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
3357                         lnet_net_unlock(i);
3358                 }
3359         }
3360 }
3361
3362 static void
3363 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
3364 {
3365         struct lnet_msg *msg;
3366
3367         while (!list_empty(resendq)) {
3368                 struct lnet_peer_ni *lpni;
3369
3370                 msg = list_entry(resendq->next, struct lnet_msg,
3371                                  msg_list);
3372
3373                 list_del_init(&msg->msg_list);
3374
3375                 lpni = lnet_peer_ni_find_locked(&msg->msg_hdr.dest_nid);
3376                 if (!lpni) {
3377                         lnet_net_unlock(cpt);
3378                         CERROR("Expected that a peer is already created for %s\n",
3379                                libcfs_nidstr(&msg->msg_hdr.dest_nid));
3380                         msg->msg_no_resend = true;
3381                         lnet_finalize(msg, -EFAULT);
3382                         lnet_net_lock(cpt);
3383                 } else {
3384                         int rc;
3385
3386                         lnet_peer_ni_decref_locked(lpni);
3387
3388                         lnet_net_unlock(cpt);
3389                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3390                                libcfs_nidstr(&msg->msg_src_nid_param),
3391                                libcfs_idstr(&msg->msg_target),
3392                                lnet_msgtyp2str(msg->msg_type),
3393                                msg->msg_recovery,
3394                                msg->msg_retry_count);
3395                         rc = lnet_send(&msg->msg_src_nid_param, msg,
3396                                        &msg->msg_rtr_nid_param);
3397                         if (rc) {
3398                                 CERROR("Error sending %s to %s: %d\n",
3399                                        lnet_msgtyp2str(msg->msg_type),
3400                                        libcfs_idstr(&msg->msg_target), rc);
3401                                 msg->msg_no_resend = true;
3402                                 lnet_finalize(msg, rc);
3403                         }
3404                         lnet_net_lock(cpt);
3405                         if (!rc)
3406                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3407                 }
3408         }
3409 }
3410
3411 static void
3412 lnet_resend_pending_msgs(void)
3413 {
3414         int i;
3415
3416         cfs_cpt_for_each(i, lnet_cpt_table()) {
3417                 lnet_net_lock(i);
3418                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3419                 lnet_net_unlock(i);
3420         }
3421 }
3422
3423 /* called with cpt and ni_lock held */
3424 static void
3425 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3426 {
3427         struct lnet_handle_md recovery_mdh;
3428
3429         LNetInvalidateMDHandle(&recovery_mdh);
3430
3431         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3432             force) {
3433                 recovery_mdh = ni->ni_ping_mdh;
3434                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3435         }
3436         lnet_ni_unlock(ni);
3437         lnet_net_unlock(cpt);
3438         if (!LNetMDHandleIsInvalid(recovery_mdh))
3439                 LNetMDUnlink(recovery_mdh);
3440         lnet_net_lock(cpt);
3441         lnet_ni_lock(ni);
3442 }
3443
3444 static void
3445 lnet_recover_local_nis(void)
3446 {
3447         struct lnet_mt_event_info *ev_info;
3448         LIST_HEAD(processed_list);
3449         LIST_HEAD(local_queue);
3450         struct lnet_handle_md mdh;
3451         struct lnet_ni *tmp;
3452         struct lnet_ni *ni;
3453         struct lnet_nid nid;
3454         int healthv;
3455         int rc;
3456         time64_t now;
3457
3458         /*
3459          * splice the recovery queue on a local queue. We will iterate
3460          * through the local queue and update it as needed. Once we're
3461          * done with the traversal, we'll splice the local queue back on
3462          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3463          * will be traversed in the next iteration.
3464          */
3465         lnet_net_lock(0);
3466         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3467                          &local_queue);
3468         lnet_net_unlock(0);
3469
3470         now = ktime_get_seconds();
3471
3472         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3473                 /*
3474                  * if an NI is being deleted or it is now healthy, there
3475                  * is no need to keep it around in the recovery queue.
3476                  * The monitor thread is the only thread responsible for
3477                  * removing the NI from the recovery queue.
3478                  * Multiple threads can be adding NIs to the recovery
3479                  * queue.
3480                  */
3481                 healthv = atomic_read(&ni->ni_healthv);
3482
3483                 lnet_net_lock(0);
3484                 lnet_ni_lock(ni);
3485                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3486                     healthv == LNET_MAX_HEALTH_VALUE) {
3487                         list_del_init(&ni->ni_recovery);
3488                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3489                         lnet_ni_unlock(ni);
3490                         lnet_ni_decref_locked(ni, 0);
3491                         lnet_net_unlock(0);
3492                         continue;
3493                 }
3494
3495                 /*
3496                  * if the local NI failed recovery we must unlink the md.
3497                  * But we want to keep the local_ni on the recovery queue
3498                  * so we can continue the attempts to recover it.
3499                  */
3500                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3501                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3502                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3503                 }
3504
3505
3506                 lnet_ni_unlock(ni);
3507
3508                 if (now < ni->ni_next_ping) {
3509                         lnet_net_unlock(0);
3510                         continue;
3511                 }
3512
3513                 lnet_net_unlock(0);
3514
3515                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3516                        libcfs_nidstr(&ni->ni_nid));
3517
3518                 lnet_ni_lock(ni);
3519                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3520                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3521                         lnet_ni_unlock(ni);
3522
3523                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3524                         if (!ev_info) {
3525                                 CERROR("out of memory. Can't recover %s\n",
3526                                        libcfs_nidstr(&ni->ni_nid));
3527                                 lnet_ni_lock(ni);
3528                                 ni->ni_recovery_state &=
3529                                   ~LNET_NI_RECOVERY_PENDING;
3530                                 lnet_ni_unlock(ni);
3531                                 continue;
3532                         }
3533
3534                         mdh = ni->ni_ping_mdh;
3535                         /*
3536                          * Invalidate the ni mdh in case it's deleted.
3537                          * We'll unlink the mdh in this case below.
3538                          */
3539                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3540                         nid = ni->ni_nid;
3541
3542                         /*
3543                          * remove the NI from the local queue and drop the
3544                          * reference count to it while we're recovering
3545                          * it. The reason for that, is that the NI could
3546                          * be deleted, and the way the code is structured
3547                          * is if we don't drop the NI, then the deletion
3548                          * code will enter a loop waiting for the
3549                          * reference count to be removed while holding the
3550                          * ln_mutex_lock(). When we look up the peer to
3551                          * send to in lnet_select_pathway() we will try to
3552                          * lock the ln_mutex_lock() as well, leading to
3553                          * a deadlock. By dropping the refcount and
3554                          * removing it from the list, we allow for the NI
3555                          * to be removed, then we use the cached NID to
3556                          * look it up again. If it's gone, then we just
3557                          * continue examining the rest of the queue.
3558                          */
3559                         lnet_net_lock(0);
3560                         list_del_init(&ni->ni_recovery);
3561                         lnet_ni_decref_locked(ni, 0);
3562                         lnet_net_unlock(0);
3563
3564                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3565                         ev_info->mt_nid = nid;
3566                         rc = lnet_send_ping(&nid, &mdh, LNET_INTERFACES_MIN,
3567                                             ev_info, the_lnet.ln_mt_handler,
3568                                             true);
3569                         /* lookup the nid again */
3570                         lnet_net_lock(0);
3571                         ni = lnet_nid_to_ni_locked(&nid, 0);
3572                         if (!ni) {
3573                                 /*
3574                                  * the NI has been deleted when we dropped
3575                                  * the ref count
3576                                  */
3577                                 lnet_net_unlock(0);
3578                                 LNetMDUnlink(mdh);
3579                                 continue;
3580                         }
3581                         ni->ni_ping_count++;
3582
3583                         ni->ni_ping_mdh = mdh;
3584                         lnet_ni_add_to_recoveryq_locked(ni, &processed_list,
3585                                                         now);
3586
3587                         if (rc) {
3588                                 lnet_ni_lock(ni);
3589                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3590                                 lnet_ni_unlock(ni);
3591                         }
3592                         lnet_net_unlock(0);
3593                 } else
3594                         lnet_ni_unlock(ni);
3595         }
3596
3597         /*
3598          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3599          * reexamined in the next iteration.
3600          */
3601         list_splice_init(&processed_list, &local_queue);
3602         lnet_net_lock(0);
3603         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3604         lnet_net_unlock(0);
3605 }
3606
3607 static int
3608 lnet_resendqs_create(void)
3609 {
3610         struct list_head **resendqs;
3611         resendqs = lnet_create_array_of_queues();
3612
3613         if (!resendqs)
3614                 return -ENOMEM;
3615
3616         lnet_net_lock(LNET_LOCK_EX);
3617         the_lnet.ln_mt_resendqs = resendqs;
3618         lnet_net_unlock(LNET_LOCK_EX);
3619
3620         return 0;
3621 }
3622
3623 static void
3624 lnet_clean_local_ni_recoveryq(void)
3625 {
3626         struct lnet_ni *ni;
3627
3628         /* This is only called when the monitor thread has stopped */
3629         lnet_net_lock(0);
3630
3631         while ((ni = list_first_entry_or_null(&the_lnet.ln_mt_localNIRecovq,
3632                                               struct lnet_ni,
3633                                               ni_recovery)) != NULL) {
3634                 list_del_init(&ni->ni_recovery);
3635                 lnet_ni_lock(ni);
3636                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3637                 lnet_ni_unlock(ni);
3638                 lnet_ni_decref_locked(ni, 0);
3639         }
3640
3641         lnet_net_unlock(0);
3642 }
3643
3644 static void
3645 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3646                                      bool force)
3647 {
3648         struct lnet_handle_md recovery_mdh;
3649
3650         LNetInvalidateMDHandle(&recovery_mdh);
3651
3652         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3653                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3654                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3655         }
3656         spin_unlock(&lpni->lpni_lock);
3657         lnet_net_unlock(cpt);
3658         if (!LNetMDHandleIsInvalid(recovery_mdh))
3659                 LNetMDUnlink(recovery_mdh);
3660         lnet_net_lock(cpt);
3661         spin_lock(&lpni->lpni_lock);
3662 }
3663
3664 static void
3665 lnet_clean_peer_ni_recoveryq(void)
3666 {
3667         struct lnet_peer_ni *lpni, *tmp;
3668
3669         lnet_net_lock(LNET_LOCK_EX);
3670
3671         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3672                                  lpni_recovery) {
3673                 list_del_init(&lpni->lpni_recovery);
3674                 spin_lock(&lpni->lpni_lock);
3675                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3676                 spin_unlock(&lpni->lpni_lock);
3677                 lnet_peer_ni_decref_locked(lpni);
3678         }
3679
3680         lnet_net_unlock(LNET_LOCK_EX);
3681 }
3682
3683 static void
3684 lnet_clean_resendqs(void)
3685 {
3686         struct lnet_msg *msg, *tmp;
3687         LIST_HEAD(msgs);
3688         int i;
3689
3690         cfs_cpt_for_each(i, lnet_cpt_table()) {
3691                 lnet_net_lock(i);
3692                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3693                 lnet_net_unlock(i);
3694                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3695                         list_del_init(&msg->msg_list);
3696                         msg->msg_no_resend = true;
3697                         lnet_finalize(msg, -ESHUTDOWN);
3698                 }
3699         }
3700
3701         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3702 }
3703
3704 static void
3705 lnet_recover_peer_nis(void)
3706 {
3707         struct lnet_mt_event_info *ev_info;
3708         LIST_HEAD(processed_list);
3709         LIST_HEAD(local_queue);
3710         struct lnet_handle_md mdh;
3711         struct lnet_peer_ni *lpni;
3712         struct lnet_peer_ni *tmp;
3713         struct lnet_nid nid;
3714         int healthv;
3715         int rc;
3716         time64_t now;
3717
3718         /*
3719          * Always use cpt 0 for locking across all interactions with
3720          * ln_mt_peerNIRecovq
3721          */
3722         lnet_net_lock(0);
3723         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3724                          &local_queue);
3725         lnet_net_unlock(0);
3726
3727         now = ktime_get_seconds();
3728
3729         list_for_each_entry_safe(lpni, tmp, &local_queue,
3730                                  lpni_recovery) {
3731                 /*
3732                  * The same protection strategy is used here as is in the
3733                  * local recovery case.
3734                  */
3735                 lnet_net_lock(0);
3736                 healthv = atomic_read(&lpni->lpni_healthv);
3737                 spin_lock(&lpni->lpni_lock);
3738                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3739                     healthv == LNET_MAX_HEALTH_VALUE) {
3740                         list_del_init(&lpni->lpni_recovery);
3741                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3742                         spin_unlock(&lpni->lpni_lock);
3743                         lnet_peer_ni_decref_locked(lpni);
3744                         lnet_net_unlock(0);
3745                         continue;
3746                 }
3747
3748                 /*
3749                  * If the peer NI has failed recovery we must unlink the
3750                  * md. But we want to keep the peer ni on the recovery
3751                  * queue so we can try to continue recovering it
3752                  */
3753                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3754                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3755                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3756                 }
3757
3758                 spin_unlock(&lpni->lpni_lock);
3759
3760                 if (now < lpni->lpni_next_ping) {
3761                         lnet_net_unlock(0);
3762                         continue;
3763                 }
3764
3765                 lnet_net_unlock(0);
3766
3767                 /*
3768                  * NOTE: we're racing with peer deletion from user space.
3769                  * It's possible that a peer is deleted after we check its
3770                  * state. In this case the recovery can create a new peer
3771                  */
3772                 spin_lock(&lpni->lpni_lock);
3773                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3774                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3775                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3776                         spin_unlock(&lpni->lpni_lock);
3777
3778                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3779                         if (!ev_info) {
3780                                 CERROR("out of memory. Can't recover %s\n",
3781                                        libcfs_nidstr(&lpni->lpni_nid));
3782                                 spin_lock(&lpni->lpni_lock);
3783                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3784                                 spin_unlock(&lpni->lpni_lock);
3785                                 continue;
3786                         }
3787
3788                         /* look at the comments in lnet_recover_local_nis() */
3789                         mdh = lpni->lpni_recovery_ping_mdh;
3790                         nid = lpni->lpni_nid;
3791                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3792                         lnet_net_lock(0);
3793                         list_del_init(&lpni->lpni_recovery);
3794                         lnet_peer_ni_decref_locked(lpni);
3795                         lnet_net_unlock(0);
3796
3797                         ev_info->mt_type = MT_TYPE_PEER_NI;
3798                         ev_info->mt_nid = nid;
3799                         rc = lnet_send_ping(&nid, &mdh, LNET_INTERFACES_MIN,
3800                                             ev_info, the_lnet.ln_mt_handler,
3801                                             true);
3802                         lnet_net_lock(0);
3803                         /*
3804                          * lnet_peer_ni_find_locked() grabs a refcount for
3805                          * us. No need to take it explicitly.
3806                          */
3807                         lpni = lnet_peer_ni_find_locked(&nid);
3808                         if (!lpni) {
3809                                 lnet_net_unlock(0);
3810                                 LNetMDUnlink(mdh);
3811                                 continue;
3812                         }
3813
3814                         lpni->lpni_ping_count++;
3815
3816                         lpni->lpni_recovery_ping_mdh = mdh;
3817
3818                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
3819                                                              &processed_list,
3820                                                              now);
3821                         if (rc) {
3822                                 spin_lock(&lpni->lpni_lock);
3823                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3824                                 spin_unlock(&lpni->lpni_lock);
3825                         }
3826
3827                         /* Drop the ref taken by lnet_peer_ni_find_locked() */
3828                         lnet_peer_ni_decref_locked(lpni);
3829                         lnet_net_unlock(0);
3830                 } else
3831                         spin_unlock(&lpni->lpni_lock);
3832         }
3833
3834         list_splice_init(&processed_list, &local_queue);
3835         lnet_net_lock(0);
3836         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3837         lnet_net_unlock(0);
3838 }
3839
3840 static int
3841 lnet_monitor_thread(void *arg)
3842 {
3843         time64_t rsp_timeout = 0;
3844         time64_t now;
3845
3846         wait_for_completion(&the_lnet.ln_started);
3847         /*
3848          * The monitor thread takes care of the following:
3849          *  1. Checks the aliveness of routers
3850          *  2. Checks if there are messages on the resend queue to resend
3851          *     them.
3852          *  3. Check if there are any NIs on the local recovery queue and
3853          *     pings them
3854          *  4. Checks if there are any NIs on the remote recovery queue
3855          *     and pings them.
3856          */
3857         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
3858                 now = ktime_get_real_seconds();
3859
3860                 if (lnet_router_checker_active())
3861                         lnet_check_routers();
3862
3863                 lnet_resend_pending_msgs();
3864
3865                 if (now >= rsp_timeout) {
3866                         lnet_finalize_expired_responses();
3867                         rsp_timeout = now + (lnet_transaction_timeout / 2);
3868                 }
3869
3870                 lnet_recover_local_nis();
3871                 lnet_recover_peer_nis();
3872
3873                 /*
3874                  * TODO do we need to check if we should sleep without
3875                  * timeout?  Technically, an active system will always
3876                  * have messages in flight so this check will always
3877                  * evaluate to false. And on an idle system do we care
3878                  * if we wake up every 1 second? Although, we've seen
3879                  * cases where we get a complaint that an idle thread
3880                  * is waking up unnecessarily.
3881                  */
3882                 wait_for_completion_interruptible_timeout(
3883                         &the_lnet.ln_mt_wait_complete,
3884                         cfs_time_seconds(1));
3885                 /* Must re-init the completion before testing anything,
3886                  * including ln_mt_state.
3887                  */
3888                 reinit_completion(&the_lnet.ln_mt_wait_complete);
3889         }
3890
3891         /* Shutting down */
3892         lnet_net_lock(LNET_LOCK_EX);
3893         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3894         lnet_net_unlock(LNET_LOCK_EX);
3895
3896         /* signal that the monitor thread is exiting */
3897         up(&the_lnet.ln_mt_signal);
3898
3899         return 0;
3900 }
3901
3902 /*
3903  * lnet_send_ping
3904  * Sends a ping.
3905  * Returns == 0 if success
3906  * Returns > 0 if LNetMDBind or prior fails
3907  * Returns < 0 if LNetGet fails
3908  */
3909 int
3910 lnet_send_ping(struct lnet_nid *dest_nid,
3911                struct lnet_handle_md *mdh, int nnis,
3912                void *user_data, lnet_handler_t handler, bool recovery)
3913 {
3914         struct lnet_md md = { NULL };
3915         struct lnet_processid id;
3916         struct lnet_ping_buffer *pbuf;
3917         int rc;
3918
3919         if (LNET_NID_IS_ANY(dest_nid)) {
3920                 rc = -EHOSTUNREACH;
3921                 goto fail_error;
3922         }
3923
3924         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
3925         if (!pbuf) {
3926                 rc = ENOMEM;
3927                 goto fail_error;
3928         }
3929
3930         /* initialize md content */
3931         md.start     = &pbuf->pb_info;
3932         md.length    = LNET_PING_INFO_SIZE(nnis);
3933         md.threshold = 2; /* GET/REPLY */
3934         md.max_size  = 0;
3935         md.options   = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE;
3936         md.user_ptr  = user_data;
3937         md.handler   = handler;
3938
3939         rc = LNetMDBind(&md, LNET_UNLINK, mdh);
3940         if (rc) {
3941                 lnet_ping_buffer_decref(pbuf);
3942                 CERROR("Can't bind MD: %d\n", rc);
3943                 rc = -rc; /* change the rc to positive */
3944                 goto fail_error;
3945         }
3946         id.pid = LNET_PID_LUSTRE;
3947         id.nid = *dest_nid;
3948
3949         rc = LNetGet(NULL, *mdh, &id,
3950                      LNET_RESERVED_PORTAL,
3951                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
3952
3953         if (rc)
3954                 goto fail_unlink_md;
3955
3956         return 0;
3957
3958 fail_unlink_md:
3959         LNetMDUnlink(*mdh);
3960         LNetInvalidateMDHandle(mdh);
3961 fail_error:
3962         return rc;
3963 }
3964
3965 static void
3966 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
3967                            int status, bool send, bool unlink_event)
3968 {
3969         struct lnet_nid *nid = &ev_info->mt_nid;
3970
3971         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
3972                 struct lnet_ni *ni;
3973
3974                 lnet_net_lock(0);
3975                 ni = lnet_nid_to_ni_locked(nid, 0);
3976                 if (!ni) {
3977                         lnet_net_unlock(0);
3978                         return;
3979                 }
3980                 lnet_ni_lock(ni);
3981                 if (!send || (send && status != 0))
3982                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3983                 if (status)
3984                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
3985                 lnet_ni_unlock(ni);
3986                 lnet_net_unlock(0);
3987
3988                 if (status != 0) {
3989                         CERROR("local NI (%s) recovery failed with %d\n",
3990                                libcfs_nidstr(nid), status);
3991                         return;
3992                 }
3993                 /*
3994                  * need to increment healthv for the ni here, because in
3995                  * the lnet_finalize() path we don't have access to this
3996                  * NI. And in order to get access to it, we'll need to
3997                  * carry forward too much information.
3998                  * In the peer case, it'll naturally be incremented
3999                  */
4000                 if (!unlink_event)
4001                         lnet_inc_healthv(&ni->ni_healthv,
4002                                          lnet_health_sensitivity);
4003         } else {
4004                 struct lnet_peer_ni *lpni;
4005                 int cpt;
4006
4007                 cpt = lnet_net_lock_current();
4008                 lpni = lnet_peer_ni_find_locked(nid);
4009                 if (!lpni) {
4010                         lnet_net_unlock(cpt);
4011                         return;
4012                 }
4013                 spin_lock(&lpni->lpni_lock);
4014                 if (!send || (send && status != 0))
4015                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
4016                 if (status)
4017                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
4018                 spin_unlock(&lpni->lpni_lock);
4019                 lnet_peer_ni_decref_locked(lpni);
4020                 lnet_net_unlock(cpt);
4021
4022                 if (status != 0)
4023                         CERROR("peer NI (%s) recovery failed with %d\n",
4024                                libcfs_nidstr(nid), status);
4025         }
4026 }
4027
4028 void
4029 lnet_mt_event_handler(struct lnet_event *event)
4030 {
4031         struct lnet_mt_event_info *ev_info = event->md_user_ptr;
4032         struct lnet_ping_buffer *pbuf;
4033
4034         /* TODO: remove assert */
4035         LASSERT(event->type == LNET_EVENT_REPLY ||
4036                 event->type == LNET_EVENT_SEND ||
4037                 event->type == LNET_EVENT_UNLINK);
4038
4039         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
4040                event->status);
4041
4042         switch (event->type) {
4043         case LNET_EVENT_UNLINK:
4044                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
4045                        libcfs_nidstr(&ev_info->mt_nid));
4046                 fallthrough;
4047         case LNET_EVENT_REPLY:
4048                 lnet_handle_recovery_reply(ev_info, event->status, false,
4049                                            event->type == LNET_EVENT_UNLINK);
4050                 break;
4051         case LNET_EVENT_SEND:
4052                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
4053                                libcfs_nidstr(&ev_info->mt_nid),
4054                                (event->status) ? "unsuccessfully" :
4055                                "successfully", event->status);
4056                 lnet_handle_recovery_reply(ev_info, event->status, true, false);
4057                 break;
4058         default:
4059                 CERROR("Unexpected event: %d\n", event->type);
4060                 break;
4061         }
4062         if (event->unlinked) {
4063                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
4064                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
4065                 lnet_ping_buffer_decref(pbuf);
4066         }
4067 }
4068
4069 static int
4070 lnet_rsp_tracker_create(void)
4071 {
4072         struct list_head **rstqs;
4073         rstqs = lnet_create_array_of_queues();
4074
4075         if (!rstqs)
4076                 return -ENOMEM;
4077
4078         the_lnet.ln_mt_rstq = rstqs;
4079
4080         return 0;
4081 }
4082
4083 static void
4084 lnet_rsp_tracker_clean(void)
4085 {
4086         lnet_finalize_expired_responses();
4087
4088         cfs_percpt_free(the_lnet.ln_mt_rstq);
4089         the_lnet.ln_mt_rstq = NULL;
4090 }
4091
4092 int lnet_monitor_thr_start(void)
4093 {
4094         int rc = 0;
4095         struct task_struct *task;
4096
4097         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
4098                 return -EALREADY;
4099
4100         rc = lnet_resendqs_create();
4101         if (rc)
4102                 return rc;
4103
4104         rc = lnet_rsp_tracker_create();
4105         if (rc)
4106                 goto clean_queues;
4107
4108         sema_init(&the_lnet.ln_mt_signal, 0);
4109
4110         lnet_net_lock(LNET_LOCK_EX);
4111         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
4112         lnet_net_unlock(LNET_LOCK_EX);
4113         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
4114         if (IS_ERR(task)) {
4115                 rc = PTR_ERR(task);
4116                 CERROR("Can't start monitor thread: %d\n", rc);
4117                 goto clean_thread;
4118         }
4119
4120         return 0;
4121
4122 clean_thread:
4123         lnet_net_lock(LNET_LOCK_EX);
4124         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4125         lnet_net_unlock(LNET_LOCK_EX);
4126         /* block until event callback signals exit */
4127         down(&the_lnet.ln_mt_signal);
4128         /* clean up */
4129         lnet_net_lock(LNET_LOCK_EX);
4130         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
4131         lnet_net_unlock(LNET_LOCK_EX);
4132         lnet_rsp_tracker_clean();
4133         lnet_clean_local_ni_recoveryq();
4134         lnet_clean_peer_ni_recoveryq();
4135         lnet_clean_resendqs();
4136         the_lnet.ln_mt_handler = NULL;
4137         return rc;
4138 clean_queues:
4139         lnet_rsp_tracker_clean();
4140         lnet_clean_local_ni_recoveryq();
4141         lnet_clean_peer_ni_recoveryq();
4142         lnet_clean_resendqs();
4143         return rc;
4144 }
4145
4146 void lnet_monitor_thr_stop(void)
4147 {
4148         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
4149                 return;
4150
4151         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
4152         lnet_net_lock(LNET_LOCK_EX);
4153         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4154         lnet_net_unlock(LNET_LOCK_EX);
4155
4156         /* tell the monitor thread that we're shutting down */
4157         complete(&the_lnet.ln_mt_wait_complete);
4158
4159         /* block until monitor thread signals that it's done */
4160         mutex_unlock(&the_lnet.ln_api_mutex);
4161         down(&the_lnet.ln_mt_signal);
4162         mutex_lock(&the_lnet.ln_api_mutex);
4163         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
4164
4165         /* perform cleanup tasks */
4166         lnet_rsp_tracker_clean();
4167         lnet_clean_local_ni_recoveryq();
4168         lnet_clean_peer_ni_recoveryq();
4169         lnet_clean_resendqs();
4170 }
4171
4172 void
4173 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
4174                   __u32 msg_type)
4175 {
4176         lnet_net_lock(cpt);
4177         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
4178         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4179         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
4180         lnet_net_unlock(cpt);
4181
4182         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
4183 }
4184
4185 static void
4186 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
4187 {
4188         struct lnet_hdr *hdr = &msg->msg_hdr;
4189
4190         if (msg->msg_wanted != 0)
4191                 lnet_setpayloadbuffer(msg);
4192
4193         lnet_build_msg_event(msg, LNET_EVENT_PUT);
4194
4195         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
4196          * it back into the ACK during lnet_finalize() */
4197         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
4198                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
4199
4200         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
4201                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
4202 }
4203
4204 static int
4205 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
4206 {
4207         struct lnet_hdr         *hdr = &msg->msg_hdr;
4208         struct lnet_match_info  info;
4209         int                     rc;
4210         bool                    ready_delay;
4211
4212         /* Convert put fields to host byte order */
4213         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
4214         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
4215         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
4216
4217         /* Primary peer NID. */
4218         info.mi_id.nid = msg->msg_initiator;
4219         info.mi_id.pid  = hdr->src_pid;
4220         info.mi_opc     = LNET_MD_OP_PUT;
4221         info.mi_portal  = hdr->msg.put.ptl_index;
4222         info.mi_rlength = hdr->payload_length;
4223         info.mi_roffset = hdr->msg.put.offset;
4224         info.mi_mbits   = hdr->msg.put.match_bits;
4225         info.mi_cpt     = lnet_nid2cpt(&msg->msg_initiator, ni);
4226
4227         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
4228         ready_delay = msg->msg_rx_ready_delay;
4229
4230  again:
4231         rc = lnet_ptl_match_md(&info, msg);
4232         switch (rc) {
4233         default:
4234                 LBUG();
4235
4236         case LNET_MATCHMD_OK:
4237                 lnet_recv_put(ni, msg);
4238                 return 0;
4239
4240         case LNET_MATCHMD_NONE:
4241                 if (ready_delay)
4242                         /* no eager_recv or has already called it, should
4243                          * have been attached on delayed list */
4244                         return 0;
4245
4246                 rc = lnet_ni_eager_recv(ni, msg);
4247                 if (rc == 0) {
4248                         ready_delay = true;
4249                         goto again;
4250                 }
4251                 fallthrough;
4252
4253         case LNET_MATCHMD_DROP:
4254                 CNETERR("Dropping PUT from %s portal %d match %llu"
4255                         " offset %d length %d: %d\n",
4256                         libcfs_idstr(&info.mi_id), info.mi_portal,
4257                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
4258
4259                 return -ENOENT; /* -ve: OK but no match */
4260         }
4261 }
4262
4263 static int
4264 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
4265 {
4266         struct lnet_match_info info;
4267         struct lnet_hdr *hdr = &msg->msg_hdr;
4268         struct lnet_processid source_id;
4269         struct lnet_handle_wire reply_wmd;
4270         int rc;
4271
4272         /* Convert get fields to host byte order */
4273         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
4274         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
4275         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
4276         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
4277
4278         source_id.nid = hdr->src_nid;
4279         source_id.pid = hdr->src_pid;
4280         /* Primary peer NID */
4281         info.mi_id.nid  = msg->msg_initiator;
4282         info.mi_id.pid  = hdr->src_pid;
4283         info.mi_opc     = LNET_MD_OP_GET;
4284         info.mi_portal  = hdr->msg.get.ptl_index;
4285         info.mi_rlength = hdr->msg.get.sink_length;
4286         info.mi_roffset = hdr->msg.get.src_offset;
4287         info.mi_mbits   = hdr->msg.get.match_bits;
4288         info.mi_cpt     = lnet_nid2cpt(&msg->msg_initiator, ni);
4289
4290         rc = lnet_ptl_match_md(&info, msg);
4291         if (rc == LNET_MATCHMD_DROP) {
4292                 CNETERR("Dropping GET from %s portal %d match %llu"
4293                         " offset %d length %d\n",
4294                         libcfs_idstr(&info.mi_id), info.mi_portal,
4295                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
4296                 return -ENOENT; /* -ve: OK but no match */
4297         }
4298
4299         LASSERT(rc == LNET_MATCHMD_OK);
4300
4301         lnet_build_msg_event(msg, LNET_EVENT_GET);
4302
4303         reply_wmd = hdr->msg.get.return_wmd;
4304
4305         lnet_prep_send(msg, LNET_MSG_REPLY, &source_id,
4306                        msg->msg_offset, msg->msg_wanted);
4307
4308         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
4309
4310         if (rdma_get) {
4311                 /* The LND completes the REPLY from her recv procedure */
4312                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
4313                              msg->msg_offset, msg->msg_len, msg->msg_len);
4314                 return 0;
4315         }
4316
4317         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
4318         msg->msg_receiving = 0;
4319
4320         rc = lnet_send(&ni->ni_nid, msg, &msg->msg_from);
4321         if (rc < 0) {
4322                 /* didn't get as far as lnet_ni_send() */
4323                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
4324                        libcfs_nidstr(&ni->ni_nid),
4325                        libcfs_idstr(&info.mi_id), rc);
4326
4327                 lnet_finalize(msg, rc);
4328         }
4329
4330         return 0;
4331 }
4332
4333 static int
4334 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
4335 {
4336         void *private = msg->msg_private;
4337         struct lnet_hdr *hdr = &msg->msg_hdr;
4338         struct lnet_processid src = {};
4339         struct lnet_libmd *md;
4340         unsigned int rlength;
4341         unsigned int mlength;
4342         int cpt;
4343
4344         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4345         lnet_res_lock(cpt);
4346
4347         src.nid = hdr->src_nid;
4348         src.pid = hdr->src_pid;
4349
4350         /* NB handles only looked up by creator (no flips) */
4351         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4352         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4353                 CNETERR("%s: Dropping REPLY from %s for %s "
4354                         "MD %#llx.%#llx\n",
4355                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4356                         (md == NULL) ? "invalid" : "inactive",
4357                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4358                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4359                 if (md != NULL && md->md_me != NULL)
4360                         CERROR("REPLY MD also attached to portal %d\n",
4361                                md->md_me->me_portal);
4362
4363                 lnet_res_unlock(cpt);
4364                 return -ENOENT; /* -ve: OK but no match */
4365         }
4366
4367         LASSERT(md->md_offset == 0);
4368
4369         rlength = hdr->payload_length;
4370         mlength = min(rlength, md->md_length);
4371
4372         if (mlength < rlength &&
4373             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4374                 CNETERR("%s: Dropping REPLY from %s length %d "
4375                         "for MD %#llx would overflow (%d)\n",
4376                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4377                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4378                         mlength);
4379                 lnet_res_unlock(cpt);
4380                 return -ENOENT; /* -ve: OK but no match */
4381         }
4382
4383         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4384                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4385                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4386
4387         lnet_msg_attach_md(msg, md, 0, mlength);
4388
4389         if (mlength != 0)
4390                 lnet_setpayloadbuffer(msg);
4391
4392         lnet_res_unlock(cpt);
4393
4394         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4395
4396         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4397         return 0;
4398 }
4399
4400 static int
4401 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4402 {
4403         struct lnet_hdr *hdr = &msg->msg_hdr;
4404         struct lnet_processid src = {};
4405         struct lnet_libmd *md;
4406         int cpt;
4407
4408         src.nid = hdr->src_nid;
4409         src.pid = hdr->src_pid;
4410
4411         /* Convert ack fields to host byte order */
4412         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4413         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4414
4415         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4416         lnet_res_lock(cpt);
4417
4418         /* NB handles only looked up by creator (no flips) */
4419         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4420         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4421                 /* Don't moan; this is expected */
4422                 CDEBUG(D_NET,
4423                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4424                        libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4425                        (md == NULL) ? "invalid" : "inactive",
4426                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4427                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4428                 if (md != NULL && md->md_me != NULL)
4429                         CERROR("Source MD also attached to portal %d\n",
4430                                md->md_me->me_portal);
4431
4432                 lnet_res_unlock(cpt);
4433                 return -ENOENT;                  /* -ve! */
4434         }
4435
4436         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4437                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4438                hdr->msg.ack.dst_wmd.wh_object_cookie);
4439
4440         lnet_msg_attach_md(msg, md, 0, 0);
4441
4442         lnet_res_unlock(cpt);
4443
4444         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4445
4446         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4447         return 0;
4448 }
4449
4450 /**
4451  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4452  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4453  * \retval -ve                  error code
4454  */
4455 int
4456 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4457 {
4458         int     rc = 0;
4459
4460         if (!the_lnet.ln_routing)
4461                 return -ECANCELED;
4462
4463         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4464             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4465                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4466                         msg->msg_rx_ready_delay = 1;
4467                 } else {
4468                         lnet_net_unlock(msg->msg_rx_cpt);
4469                         rc = lnet_ni_eager_recv(ni, msg);
4470                         lnet_net_lock(msg->msg_rx_cpt);
4471                 }
4472         }
4473
4474         if (rc == 0)
4475                 rc = lnet_post_routed_recv_locked(msg, 0);
4476         return rc;
4477 }
4478
4479 int
4480 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4481 {
4482         int     rc;
4483
4484         switch (msg->msg_type) {
4485         case LNET_MSG_ACK:
4486                 rc = lnet_parse_ack(ni, msg);
4487                 break;
4488         case LNET_MSG_PUT:
4489                 rc = lnet_parse_put(ni, msg);
4490                 break;
4491         case LNET_MSG_GET:
4492                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4493                 break;
4494         case LNET_MSG_REPLY:
4495                 rc = lnet_parse_reply(ni, msg);
4496                 break;
4497         default: /* prevent an unused label if !kernel */
4498                 LASSERT(0);
4499                 return -EPROTO;
4500         }
4501
4502         LASSERT(rc == 0 || rc == -ENOENT);
4503         return rc;
4504 }
4505
4506 char *
4507 lnet_msgtyp2str (int type)
4508 {
4509         switch (type) {
4510         case LNET_MSG_ACK:
4511                 return ("ACK");
4512         case LNET_MSG_PUT:
4513                 return ("PUT");
4514         case LNET_MSG_GET:
4515                 return ("GET");
4516         case LNET_MSG_REPLY:
4517                 return ("REPLY");
4518         case LNET_MSG_HELLO:
4519                 return ("HELLO");
4520         default:
4521                 return ("<UNKNOWN>");
4522         }
4523 }
4524 EXPORT_SYMBOL(lnet_msgtyp2str);
4525
4526 int
4527 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr,
4528            struct lnet_nid *from_nid, void *private, int rdma_req)
4529 {
4530         struct lnet_peer_ni *lpni;
4531         struct lnet_msg *msg;
4532         __u32 payload_length;
4533         lnet_pid_t dest_pid;
4534         struct lnet_nid dest_nid;
4535         struct lnet_nid src_nid;
4536         bool push = false;
4537         int for_me;
4538         __u32 type;
4539         int rc = 0;
4540         int cpt;
4541         time64_t now = ktime_get_seconds();
4542
4543         LASSERT (!in_interrupt ());
4544
4545         type = hdr->type;
4546         src_nid = hdr->src_nid;
4547         dest_nid = hdr->dest_nid;
4548         dest_pid = hdr->dest_pid;
4549         payload_length = hdr->payload_length;
4550
4551         for_me = nid_same(&ni->ni_nid, &dest_nid);
4552         cpt = lnet_nid2cpt(from_nid, ni);
4553
4554         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4555                 libcfs_nidstr(&dest_nid),
4556                 libcfs_nidstr(&ni->ni_nid),
4557                 libcfs_nidstr(&src_nid),
4558                 lnet_msgtyp2str(type),
4559                 (for_me) ? "for me" : "routed");
4560
4561         switch (type) {
4562         case LNET_MSG_ACK:
4563         case LNET_MSG_GET:
4564                 if (payload_length > 0) {
4565                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4566                                libcfs_nidstr(from_nid),
4567                                libcfs_nidstr(&src_nid),
4568                                lnet_msgtyp2str(type), payload_length);
4569                         return -EPROTO;
4570                 }
4571                 break;
4572
4573         case LNET_MSG_PUT:
4574         case LNET_MSG_REPLY:
4575                 if (payload_length >
4576                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4577                         CERROR("%s, src %s: bad %s payload %d "
4578                                "(%d max expected)\n",
4579                                libcfs_nidstr(from_nid),
4580                                libcfs_nidstr(&src_nid),
4581                                lnet_msgtyp2str(type),
4582                                payload_length,
4583                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4584                         return -EPROTO;
4585                 }
4586                 break;
4587
4588         default:
4589                 CERROR("%s, src %s: Bad message type 0x%x\n",
4590                        libcfs_nidstr(from_nid),
4591                        libcfs_nidstr(&src_nid), type);
4592                 return -EPROTO;
4593         }
4594
4595         /* Only update net_last_alive for incoming GETs on the reserved portal
4596          * (i.e. incoming lnet/discovery pings).
4597          * This avoids situations where the router's own traffic results in NI
4598          * status changes
4599          */
4600         if (the_lnet.ln_routing && type == LNET_MSG_GET &&
4601             hdr->msg.get.ptl_index == LNET_RESERVED_PORTAL &&
4602             !lnet_islocalnid(&src_nid) &&
4603             ni->ni_net->net_last_alive != now) {
4604                 lnet_ni_lock(ni);
4605                 spin_lock(&ni->ni_net->net_lock);
4606                 ni->ni_net->net_last_alive = now;
4607                 spin_unlock(&ni->ni_net->net_lock);
4608                 push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP);
4609                 lnet_ni_unlock(ni);
4610         }
4611
4612         if (push)
4613                 lnet_push_update_to_peers(1);
4614
4615         /* Regard a bad destination NID as a protocol error.  Senders should
4616          * know what they're doing; if they don't they're misconfigured, buggy
4617          * or malicious so we chop them off at the knees :) */
4618
4619         if (!for_me) {
4620                 if (LNET_NID_NET(&dest_nid) == LNET_NID_NET(&ni->ni_nid)) {
4621                         /* should have gone direct */
4622                         CERROR("%s, src %s: Bad dest nid %s "
4623                                "(should have been sent direct)\n",
4624                                 libcfs_nidstr(from_nid),
4625                                 libcfs_nidstr(&src_nid),
4626                                 libcfs_nidstr(&dest_nid));
4627                         return -EPROTO;
4628                 }
4629
4630                 if (lnet_islocalnid(&dest_nid)) {
4631                         /* dest is another local NI; sender should have used
4632                          * this node's NID on its own network */
4633                         CERROR("%s, src %s: Bad dest nid %s "
4634                                "(it's my nid but on a different network)\n",
4635                                 libcfs_nidstr(from_nid),
4636                                 libcfs_nidstr(&src_nid),
4637                                 libcfs_nidstr(&dest_nid));
4638                         return -EPROTO;
4639                 }
4640
4641                 if (rdma_req && type == LNET_MSG_GET) {
4642                         CERROR("%s, src %s: Bad optimized GET for %s "
4643                                "(final destination must be me)\n",
4644                                 libcfs_nidstr(from_nid),
4645                                 libcfs_nidstr(&src_nid),
4646                                 libcfs_nidstr(&dest_nid));
4647                         return -EPROTO;
4648                 }
4649
4650                 if (!the_lnet.ln_routing) {
4651                         CERROR("%s, src %s: Dropping message for %s "
4652                                "(routing not enabled)\n",
4653                                 libcfs_nidstr(from_nid),
4654                                 libcfs_nidstr(&src_nid),
4655                                 libcfs_nidstr(&dest_nid));
4656                         goto drop;
4657                 }
4658         }
4659
4660         /* Message looks OK; we're not going to return an error, so we MUST
4661          * call back lnd_recv() come what may... */
4662
4663         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4664             fail_peer(&src_nid, 0)) {                   /* shall we now? */
4665                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4666                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4667                        lnet_msgtyp2str(type));
4668                 goto drop;
4669         }
4670
4671         if (!list_empty(&the_lnet.ln_drop_rules) &&
4672             lnet_drop_rule_match(hdr, &ni->ni_nid, NULL)) {
4673                 CDEBUG(D_NET,
4674                        "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
4675                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4676                        libcfs_nidstr(&dest_nid), lnet_msgtyp2str(type));
4677                 goto drop;
4678         }
4679
4680         msg = lnet_msg_alloc();
4681         if (msg == NULL) {
4682                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4683                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4684                        lnet_msgtyp2str(type));
4685                 goto drop;
4686         }
4687
4688         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4689          * pointers NULL etc */
4690
4691         msg->msg_type = type;
4692         msg->msg_private = private;
4693         msg->msg_receiving = 1;
4694         msg->msg_rdma_get = rdma_req;
4695         msg->msg_len = msg->msg_wanted = payload_length;
4696         msg->msg_offset = 0;
4697         msg->msg_hdr = *hdr;
4698         /* for building message event */
4699         msg->msg_from = *from_nid;
4700         if (!for_me) {
4701                 msg->msg_target.pid = dest_pid;
4702                 msg->msg_target.nid = dest_nid;
4703                 msg->msg_routing = 1;
4704         }
4705
4706         lnet_net_lock(cpt);
4707         lpni = lnet_peerni_by_nid_locked(from_nid, &ni->ni_nid, cpt);
4708         if (IS_ERR(lpni)) {
4709                 lnet_net_unlock(cpt);
4710                 rc = PTR_ERR(lpni);
4711                 CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
4712                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4713                        lnet_msgtyp2str(type), rc);
4714                 lnet_msg_free(msg);
4715                 if (rc == -ESHUTDOWN)
4716                         /* We are shutting down.  Don't do anything more */
4717                         return rc;
4718                 goto drop;
4719         }
4720
4721         /* If this message was forwarded to us from a router then we may need
4722          * to update router aliveness or check for an asymmetrical route
4723          * (or both)
4724          */
4725         if (((lnet_drop_asym_route && for_me) ||
4726              !lpni->lpni_peer_net->lpn_peer->lp_alive) &&
4727             LNET_NID_NET(&src_nid) != LNET_NID_NET(from_nid)) {
4728                 __u32 src_net_id = LNET_NID_NET(&src_nid);
4729                 struct lnet_peer *gw = lpni->lpni_peer_net->lpn_peer;
4730                 struct lnet_route *route;
4731                 bool found = false;
4732
4733                 list_for_each_entry(route, &gw->lp_routes, lr_gwlist) {
4734                         if (route->lr_net == src_net_id) {
4735                                 found = true;
4736                                 /* If we're transitioning the gateway from
4737                                  * dead -> alive, and discovery is disabled
4738                                  * locally or on the gateway, then we need to
4739                                  * update the cached route aliveness for each
4740                                  * route to the src_nid's net.
4741                                  *
4742                                  * Otherwise, we're only checking for
4743                                  * symmetrical route, and we can break the
4744                                  * loop
4745                                  */
4746                                 if (!gw->lp_alive &&
4747                                     lnet_is_discovery_disabled(gw))
4748                                         lnet_set_route_aliveness(route, true);
4749                                 else
4750                                         break;
4751                         }
4752                 }
4753                 if (lnet_drop_asym_route && for_me && !found) {
4754                         /* Drop ref taken by lnet_nid2peerni_locked() */
4755                         lnet_peer_ni_decref_locked(lpni);
4756                         lnet_net_unlock(cpt);
4757                         /* we would not use from_nid to route a message to
4758                          * src_nid
4759                          * => asymmetric routing detected but forbidden
4760                          */
4761                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4762                                libcfs_nidstr(from_nid),
4763                                libcfs_nidstr(&src_nid), lnet_msgtyp2str(type));
4764                         lnet_msg_free(msg);
4765                         goto drop;
4766                 }
4767                 if (!gw->lp_alive) {
4768                         struct lnet_peer_net *lpn;
4769                         struct lnet_peer_ni *lpni2;
4770
4771                         gw->lp_alive = true;
4772                         /* Mark all remote NIs on src_nid's net UP */
4773                         lpn = lnet_peer_get_net_locked(gw, src_net_id);
4774                         if (lpn)
4775                                 list_for_each_entry(lpni2, &lpn->lpn_peer_nis,
4776                                                     lpni_peer_nis)
4777                                         lpni2->lpni_ns_status = LNET_NI_STATUS_UP;
4778                 }
4779         }
4780
4781         lpni->lpni_last_alive = now;
4782
4783         msg->msg_rxpeer = lpni;
4784         msg->msg_rxni = ni;
4785         lnet_ni_addref_locked(ni, cpt);
4786         /* Multi-Rail: Primary NID of source. */
4787         lnet_peer_primary_nid_locked(&src_nid, &msg->msg_initiator);
4788
4789         /*
4790          * mark the status of this lpni as UP since we received a message
4791          * from it. The ping response reports back the ns_status which is
4792          * marked on the remote as up or down and we cache it here.
4793          */
4794         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
4795
4796         lnet_msg_commit(msg, cpt);
4797
4798         /* message delay simulation */
4799         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
4800                      lnet_delay_rule_match_locked(hdr, msg))) {
4801                 lnet_net_unlock(cpt);
4802                 return 0;
4803         }
4804
4805         if (!for_me) {
4806                 rc = lnet_parse_forward_locked(ni, msg);
4807                 lnet_net_unlock(cpt);
4808
4809                 if (rc < 0)
4810                         goto free_drop;
4811
4812                 if (rc == LNET_CREDIT_OK) {
4813                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
4814                                      0, payload_length, payload_length);
4815                 }
4816                 return 0;
4817         }
4818
4819         lnet_net_unlock(cpt);
4820
4821         rc = lnet_parse_local(ni, msg);
4822         if (rc != 0)
4823                 goto free_drop;
4824         return 0;
4825
4826  free_drop:
4827         LASSERT(msg->msg_md == NULL);
4828         lnet_finalize(msg, rc);
4829
4830  drop:
4831         lnet_drop_message(ni, cpt, private, payload_length, type);
4832         return 0;
4833 }
4834 EXPORT_SYMBOL(lnet_parse);
4835
4836 void
4837 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
4838 {
4839         struct lnet_msg *msg;
4840
4841         while ((msg = list_first_entry_or_null(head, struct lnet_msg,
4842                                                msg_list)) != NULL) {
4843                 struct lnet_processid id = {};
4844
4845                 list_del(&msg->msg_list);
4846
4847                 id.nid = msg->msg_hdr.src_nid;
4848                 id.pid = msg->msg_hdr.src_pid;
4849
4850                 LASSERT(msg->msg_md == NULL);
4851                 LASSERT(msg->msg_rx_delayed);
4852                 LASSERT(msg->msg_rxpeer != NULL);
4853                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4854
4855                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
4856                       " offset %d length %d: %s\n",
4857                       libcfs_idstr(&id),
4858                       msg->msg_hdr.msg.put.ptl_index,
4859                       msg->msg_hdr.msg.put.match_bits,
4860                       msg->msg_hdr.msg.put.offset,
4861                       msg->msg_hdr.payload_length, reason);
4862
4863                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
4864                  * called lnet_drop_message(), so I just hang onto msg as well
4865                  * until that's done */
4866
4867                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
4868                                   msg->msg_private, msg->msg_len,
4869                                   msg->msg_type);
4870
4871                 msg->msg_no_resend = true;
4872                 /*
4873                  * NB: message will not generate event because w/o attached MD,
4874                  * but we still should give error code so lnet_msg_decommit()
4875                  * can skip counters operations and other checks.
4876                  */
4877                 lnet_finalize(msg, -ENOENT);
4878         }
4879 }
4880
4881 void
4882 lnet_recv_delayed_msg_list(struct list_head *head)
4883 {
4884         struct lnet_msg *msg;
4885
4886         while ((msg = list_first_entry_or_null(head, struct lnet_msg,
4887                                                msg_list)) != NULL) {
4888                 struct lnet_processid id;
4889
4890                 list_del(&msg->msg_list);
4891
4892                 /* md won't disappear under me, since each msg
4893                  * holds a ref on it */
4894
4895                 id.nid = msg->msg_hdr.src_nid;
4896                 id.pid = msg->msg_hdr.src_pid;
4897
4898                 LASSERT(msg->msg_rx_delayed);
4899                 LASSERT(msg->msg_md != NULL);
4900                 LASSERT(msg->msg_rxpeer != NULL);
4901                 LASSERT(msg->msg_rxni != NULL);
4902                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4903
4904                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
4905                        "match %llu offset %d length %d.\n",
4906                         libcfs_idstr(&id), msg->msg_hdr.msg.put.ptl_index,
4907                         msg->msg_hdr.msg.put.match_bits,
4908                         msg->msg_hdr.msg.put.offset,
4909                         msg->msg_hdr.payload_length);
4910
4911                 lnet_recv_put(msg->msg_rxni, msg);
4912         }
4913 }
4914
4915 static void
4916 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
4917                         struct lnet_libmd *md, struct lnet_handle_md mdh)
4918 {
4919         s64 timeout_ns;
4920         struct lnet_rsp_tracker *local_rspt;
4921
4922         /*
4923          * MD has a refcount taken by message so it's not going away.
4924          * The MD however can be looked up. We need to secure the access
4925          * to the md_rspt_ptr by taking the res_lock.
4926          * The rspt can be accessed without protection up to when it gets
4927          * added to the list.
4928          */
4929
4930         lnet_res_lock(cpt);
4931         local_rspt = md->md_rspt_ptr;
4932         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
4933         if (local_rspt != NULL) {
4934                 /*
4935                  * we already have an rspt attached to the md, so we'll
4936                  * update the deadline on that one.
4937                  */
4938                 lnet_rspt_free(rspt, cpt);
4939         } else {
4940                 /* new md */
4941                 rspt->rspt_mdh = mdh;
4942                 rspt->rspt_cpt = cpt;
4943                 /* store the rspt so we can access it when we get the REPLY */
4944                 md->md_rspt_ptr = rspt;
4945                 local_rspt = rspt;
4946         }
4947         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
4948
4949         /*
4950          * add to the list of tracked responses. It's added to tail of the
4951          * list in order to expire all the older entries first.
4952          */
4953         lnet_net_lock(cpt);
4954         list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
4955         lnet_net_unlock(cpt);
4956         lnet_res_unlock(cpt);
4957 }
4958
4959 /**
4960  * Initiate an asynchronous PUT operation.
4961  *
4962  * There are several events associated with a PUT: completion of the send on
4963  * the initiator node (LNET_EVENT_SEND), and when the send completes
4964  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
4965  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
4966  * used at the target node to indicate the completion of incoming data
4967  * delivery.
4968  *
4969  * The local events will be logged in the EQ associated with the MD pointed to
4970  * by \a mdh handle. Using a MD without an associated EQ results in these
4971  * events being discarded. In this case, the caller must have another
4972  * mechanism (e.g., a higher level protocol) for determining when it is safe
4973  * to modify the memory region associated with the MD.
4974  *
4975  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
4976  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
4977  *
4978  * \param self Indicates the NID of a local interface through which to send
4979  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
4980  * \param mdh A handle for the MD that describes the memory to be sent. The MD
4981  * must be "free floating" (See LNetMDBind()).
4982  * \param ack Controls whether an acknowledgment is requested.
4983  * Acknowledgments are only sent when they are requested by the initiating
4984  * process and the target MD enables them.
4985  * \param target A process identifier for the target process.
4986  * \param portal The index in the \a target's portal table.
4987  * \param match_bits The match bits to use for MD selection at the target
4988  * process.
4989  * \param offset The offset into the target MD (only used when the target
4990  * MD has the LNET_MD_MANAGE_REMOTE option set).
4991  * \param hdr_data 64 bits of user data that can be included in the message
4992  * header. This data is written to an event queue entry at the target if an
4993  * EQ is present on the matching MD.
4994  *
4995  * \retval  0      Success, and only in this case events will be generated
4996  * and logged to EQ (if it exists).
4997  * \retval -EIO    Simulated failure.
4998  * \retval -ENOMEM Memory allocation failure.
4999  * \retval -ENOENT Invalid MD object.
5000  *
5001  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
5002  */
5003 int
5004 LNetPut(struct lnet_nid *self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
5005         struct lnet_processid *target, unsigned int portal,
5006         __u64 match_bits, unsigned int offset,
5007         __u64 hdr_data)
5008 {
5009         struct lnet_msg *msg;
5010         struct lnet_libmd *md;
5011         int cpt;
5012         int rc;
5013         struct lnet_rsp_tracker *rspt = NULL;
5014
5015         LASSERT(the_lnet.ln_refcount > 0);
5016
5017         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5018             fail_peer(&target->nid, 1)) {               /* shall we now? */
5019                 CERROR("Dropping PUT to %s: simulated failure\n",
5020                        libcfs_idstr(target));
5021                 return -EIO;
5022         }
5023
5024         msg = lnet_msg_alloc();
5025         if (msg == NULL) {
5026                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
5027                        libcfs_idstr(target));
5028                 return -ENOMEM;
5029         }
5030         msg->msg_vmflush = !!(current->flags & PF_MEMALLOC);
5031
5032         cpt = lnet_cpt_of_cookie(mdh.cookie);
5033
5034         if (ack == LNET_ACK_REQ) {
5035                 rspt = lnet_rspt_alloc(cpt);
5036                 if (!rspt) {
5037                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
5038                                 libcfs_idstr(target));
5039                         return -ENOMEM;
5040                 }
5041                 INIT_LIST_HEAD(&rspt->rspt_on_list);
5042         }
5043
5044         lnet_res_lock(cpt);
5045
5046         md = lnet_handle2md(&mdh);
5047         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5048                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
5049                        match_bits, portal, libcfs_idstr(target),
5050                        md == NULL ? -1 : md->md_threshold);
5051                 if (md != NULL && md->md_me != NULL)
5052                         CERROR("Source MD also attached to portal %d\n",
5053                                md->md_me->me_portal);
5054                 lnet_res_unlock(cpt);
5055
5056                 if (rspt)
5057                         lnet_rspt_free(rspt, cpt);
5058
5059                 lnet_msg_free(msg);
5060                 return -ENOENT;
5061         }
5062
5063         CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_idstr(target));
5064
5065         lnet_msg_attach_md(msg, md, 0, 0);
5066
5067         lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
5068
5069         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
5070         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
5071         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
5072         msg->msg_hdr.msg.put.hdr_data = hdr_data;
5073
5074         /* NB handles only looked up by creator (no flips) */
5075         if (ack == LNET_ACK_REQ) {
5076                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5077                         the_lnet.ln_interface_cookie;
5078                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5079                         md->md_lh.lh_cookie;
5080         } else {
5081                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5082                         LNET_WIRE_HANDLE_COOKIE_NONE;
5083                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5084                         LNET_WIRE_HANDLE_COOKIE_NONE;
5085         }
5086
5087         lnet_res_unlock(cpt);
5088
5089         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5090
5091         if (rspt && lnet_response_tracking_enabled(LNET_MSG_PUT,
5092                                                    md->md_options))
5093                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5094         else if (rspt)
5095                 lnet_rspt_free(rspt, cpt);
5096
5097         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
5098                                  CFS_FAIL_ONCE))
5099                 rc = -EIO;
5100         else
5101                 rc = lnet_send(self, msg, NULL);
5102
5103         if (rc != 0) {
5104                 CNETERR("Error sending PUT to %s: %d\n",
5105                         libcfs_idstr(target), rc);
5106                 msg->msg_no_resend = true;
5107                 lnet_finalize(msg, rc);
5108         }
5109
5110         /* completion will be signalled by an event */
5111         return 0;
5112 }
5113 EXPORT_SYMBOL(LNetPut);
5114
5115 /*
5116  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
5117  * returns a msg for the LND to pass to lnet_finalize() when the sink
5118  * data has been received.
5119  *
5120  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
5121  * lnet_finalize() is called on it, so the LND must call this first
5122  */
5123 struct lnet_msg *
5124 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
5125 {
5126         struct lnet_msg *msg = lnet_msg_alloc();
5127         struct lnet_libmd *getmd = getmsg->msg_md;
5128         struct lnet_processid *peer_id = &getmsg->msg_target;
5129         int cpt;
5130
5131         LASSERT(!getmsg->msg_target_is_router);
5132         LASSERT(!getmsg->msg_routing);
5133
5134         if (msg == NULL) {
5135                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
5136                        libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id));
5137                 goto drop;
5138         }
5139
5140         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
5141         lnet_res_lock(cpt);
5142
5143         LASSERT(getmd->md_refcount > 0);
5144
5145         if (getmd->md_threshold == 0) {
5146                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
5147                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id),
5148                         getmd);
5149                 lnet_res_unlock(cpt);
5150                 goto drop;
5151         }
5152
5153         LASSERT(getmd->md_offset == 0);
5154
5155         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
5156                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id), getmd);
5157
5158         /* setup information for lnet_build_msg_event */
5159         msg->msg_initiator =
5160                 getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
5161         msg->msg_from = peer_id->nid;
5162         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
5163         msg->msg_hdr.src_nid = peer_id->nid;
5164         msg->msg_hdr.payload_length = getmd->md_length;
5165         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
5166
5167         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
5168         lnet_res_unlock(cpt);
5169
5170         cpt = lnet_nid2cpt(&peer_id->nid, ni);
5171
5172         lnet_net_lock(cpt);
5173         lnet_msg_commit(msg, cpt);
5174         lnet_net_unlock(cpt);
5175
5176         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
5177
5178         return msg;
5179
5180  drop:
5181         cpt = lnet_nid2cpt(&peer_id->nid, ni);
5182
5183         lnet_net_lock(cpt);
5184         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
5185         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
5186         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
5187                 getmd->md_length;
5188         lnet_net_unlock(cpt);
5189
5190         if (msg != NULL)
5191                 lnet_msg_free(msg);
5192
5193         return NULL;
5194 }
5195 EXPORT_SYMBOL(lnet_create_reply_msg);
5196
5197 void
5198 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
5199                        unsigned int len)
5200 {
5201         /* Set the REPLY length, now the RDMA that elides the REPLY message has
5202          * completed and I know it. */
5203         LASSERT(reply != NULL);
5204         LASSERT(reply->msg_type == LNET_MSG_GET);
5205         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
5206
5207         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
5208          * the end of my buffer, I might as well be dead. */
5209         LASSERT(len <= reply->msg_ev.mlength);
5210
5211         reply->msg_ev.mlength = len;
5212 }
5213 EXPORT_SYMBOL(lnet_set_reply_msg_len);
5214
5215 /**
5216  * Initiate an asynchronous GET operation.
5217  *
5218  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
5219  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
5220  * the target node in the REPLY has been written to local MD.
5221  *
5222  * On the target node, an LNET_EVENT_GET is logged when the GET request
5223  * arrives and is accepted into a MD.
5224  *
5225  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
5226  * \param mdh A handle for the MD that describes the memory into which the
5227  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
5228  *
5229  * \retval  0      Success, and only in this case events will be generated
5230  * and logged to EQ (if it exists) of the MD.
5231  * \retval -EIO    Simulated failure.
5232  * \retval -ENOMEM Memory allocation failure.
5233  * \retval -ENOENT Invalid MD object.
5234  */
5235 int
5236 LNetGet(struct lnet_nid *self, struct lnet_handle_md mdh,
5237         struct lnet_processid *target, unsigned int portal,
5238         __u64 match_bits, unsigned int offset, bool recovery)
5239 {
5240         struct lnet_msg *msg;
5241         struct lnet_libmd *md;
5242         struct lnet_rsp_tracker *rspt;
5243         int cpt;
5244         int rc;
5245
5246         LASSERT(the_lnet.ln_refcount > 0);
5247
5248         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5249             fail_peer(&target->nid, 1))         /* shall we now? */
5250         {
5251                 CERROR("Dropping GET to %s: simulated failure\n",
5252                        libcfs_idstr(target));
5253                 return -EIO;
5254         }
5255
5256         msg = lnet_msg_alloc();
5257         if (!msg) {
5258                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
5259                        libcfs_idstr(target));
5260                 return -ENOMEM;
5261         }
5262
5263         cpt = lnet_cpt_of_cookie(mdh.cookie);
5264
5265         rspt = lnet_rspt_alloc(cpt);
5266         if (!rspt) {
5267                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
5268                        libcfs_idstr(target));
5269                 return -ENOMEM;
5270         }
5271         INIT_LIST_HEAD(&rspt->rspt_on_list);
5272
5273         msg->msg_recovery = recovery;
5274
5275         lnet_res_lock(cpt);
5276
5277         md = lnet_handle2md(&mdh);
5278         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5279                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
5280                        match_bits, portal, libcfs_idstr(target),
5281                        md == NULL ? -1 : md->md_threshold);
5282                 if (md != NULL && md->md_me != NULL)
5283                         CERROR("REPLY MD also attached to portal %d\n",
5284                                md->md_me->me_portal);
5285
5286                 lnet_res_unlock(cpt);
5287
5288                 lnet_msg_free(msg);
5289                 lnet_rspt_free(rspt, cpt);
5290                 return -ENOENT;
5291         }
5292
5293         CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_idstr(target));
5294
5295         lnet_msg_attach_md(msg, md, 0, 0);
5296
5297         lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
5298
5299         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5300         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5301         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5302         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5303
5304         /* NB handles only looked up by creator (no flips) */
5305         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5306                 the_lnet.ln_interface_cookie;
5307         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5308                 md->md_lh.lh_cookie;
5309
5310         lnet_res_unlock(cpt);
5311
5312         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5313
5314         if (lnet_response_tracking_enabled(LNET_MSG_GET, md->md_options))
5315                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5316         else
5317                 lnet_rspt_free(rspt, cpt);
5318
5319         rc = lnet_send(self, msg, NULL);
5320         if (rc < 0) {
5321                 CNETERR("Error sending GET to %s: %d\n",
5322                         libcfs_idstr(target), rc);
5323                 msg->msg_no_resend = true;
5324                 lnet_finalize(msg, rc);
5325         }
5326
5327         /* completion will be signalled by an event */
5328         return 0;
5329 }
5330 EXPORT_SYMBOL(LNetGet);
5331
5332 /**
5333  * Calculate distance to node at \a dstnid.
5334  *
5335  * \param dstnid Target NID.
5336  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5337  * is saved here.
5338  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5339  * here.
5340  *
5341  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5342  * local_nid_dist_zero is set, which is the default.
5343  * \retval positives Distance to target NID, i.e. number of hops plus one.
5344  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5345  */
5346 int
5347 LNetDist(struct lnet_nid *dstnid, struct lnet_nid *srcnid, __u32 *orderp)
5348 {
5349         struct lnet_ni *ni = NULL;
5350         struct lnet_remotenet *rnet;
5351         __u32 dstnet = LNET_NID_NET(dstnid);
5352         int hops;
5353         int cpt;
5354         __u32 order = 2;
5355         struct list_head *rn_list;
5356         struct lnet_ni *matched_dstnet = NULL;
5357
5358         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5359          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5360          * keep order 0 free for 0@lo and order 1 free for a local NID
5361          * match
5362          * WARNING: dstnid and srcnid might point to same place.
5363          * Don't set *srcnid until late.
5364          */
5365
5366         LASSERT(the_lnet.ln_refcount > 0);
5367
5368         cpt = lnet_net_lock_current();
5369
5370         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5371                 if (nid_same(&ni->ni_nid, dstnid)) {
5372                         if (orderp != NULL) {
5373                                 if (nid_is_lo0(dstnid))
5374                                         *orderp = 0;
5375                                 else
5376                                         *orderp = 1;
5377                         }
5378                         if (srcnid)
5379                                 *srcnid = *dstnid;
5380                         lnet_net_unlock(cpt);
5381
5382                         return local_nid_dist_zero ? 0 : 1;
5383                 }
5384
5385                 if (!matched_dstnet && LNET_NID_NET(&ni->ni_nid) == dstnet) {
5386                         matched_dstnet = ni;
5387                         /* We matched the destination net, but we may have
5388                          * additional local NIs to inspect.
5389                          *
5390                          * We record the order as appropriate, but
5391                          * they may be overwritten if we match local NI above.
5392                          */
5393
5394                         if (orderp) {
5395                                 /* Check if ni was originally created in
5396                                  * current net namespace.
5397                                  * If not, assign order above 0xffff0000,
5398                                  * to make this ni not a priority.
5399                                  */
5400                                 if (current->nsproxy &&
5401                                     !net_eq(ni->ni_net_ns,
5402                                             current->nsproxy->net_ns))
5403                                         *orderp = order + 0xffff0000;
5404                                 else
5405                                         *orderp = order;
5406                         }
5407                 }
5408
5409                 order++;
5410         }
5411
5412         if (matched_dstnet) {
5413                 if (srcnid)
5414                         *srcnid = matched_dstnet->ni_nid;
5415                 lnet_net_unlock(cpt);
5416                 return 1;
5417         }
5418
5419         rn_list = lnet_net2rnethash(dstnet);
5420         list_for_each_entry(rnet, rn_list, lrn_list) {
5421                 if (rnet->lrn_net == dstnet) {
5422                         struct lnet_route *route;
5423                         struct lnet_route *shortest = NULL;
5424                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5425                         __u32 route_hops;
5426
5427                         LASSERT(!list_empty(&rnet->lrn_routes));
5428
5429                         list_for_each_entry(route, &rnet->lrn_routes,
5430                                             lr_list) {
5431                                 route_hops = route->lr_hops;
5432                                 if (route_hops == LNET_UNDEFINED_HOPS)
5433                                         route_hops = 1;
5434                                 if (shortest == NULL ||
5435                                     route_hops < shortest_hops) {
5436                                         shortest = route;
5437                                         shortest_hops = route_hops;
5438                                 }
5439                         }
5440
5441                         LASSERT(shortest != NULL);
5442                         hops = shortest_hops;
5443                         if (srcnid) {
5444                                 struct lnet_net *net;
5445                                 net = lnet_get_net_locked(shortest->lr_lnet);
5446                                 LASSERT(net);
5447                                 ni = lnet_get_next_ni_locked(net, NULL);
5448                                 *srcnid = ni->ni_nid;
5449                         }
5450                         if (orderp != NULL)
5451                                 *orderp = order;
5452                         lnet_net_unlock(cpt);
5453                         return hops + 1;
5454                 }
5455                 order++;
5456         }
5457
5458         lnet_net_unlock(cpt);
5459         return -EHOSTUNREACH;
5460 }
5461 EXPORT_SYMBOL(LNetDist);