Whamcloud - gitweb
d405aff6507377a78e4cdd9e9072b80c75f44dfc
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/lib-move.c
32  *
33  * Data movement routines
34  */
35
36 #define DEBUG_SUBSYSTEM S_LNET
37
38 #include <linux/pagemap.h>
39
40 #include <lnet/lib-lnet.h>
41 #include <linux/nsproxy.h>
42 #include <lnet/lnet_rdma.h>
43 #include <net/net_namespace.h>
44
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48
49 struct lnet_send_data {
50         struct lnet_ni *sd_best_ni;
51         struct lnet_peer_ni *sd_best_lpni;
52         struct lnet_peer_ni *sd_final_dst_lpni;
53         struct lnet_peer *sd_peer;
54         struct lnet_peer *sd_gw_peer;
55         struct lnet_peer_ni *sd_gw_lpni;
56         struct lnet_peer_net *sd_peer_net;
57         struct lnet_msg *sd_msg;
58         struct lnet_nid sd_dst_nid;
59         struct lnet_nid sd_src_nid;
60         struct lnet_nid sd_rtr_nid;
61         int sd_cpt;
62         int sd_md_cpt;
63         __u32 sd_send_case;
64 };
65
66 static inline bool
67 lnet_msg_is_response(struct lnet_msg *msg)
68 {
69         return msg->msg_type == LNET_MSG_ACK || msg->msg_type == LNET_MSG_REPLY;
70 }
71
72 static inline bool
73 lnet_response_tracking_enabled(__u32 msg_type, unsigned int md_options)
74 {
75         if (md_options & LNET_MD_NO_TRACK_RESPONSE)
76                 /* Explicitly disabled in MD options */
77                 return false;
78
79         if (md_options & LNET_MD_TRACK_RESPONSE)
80                 /* Explicity enabled in MD options */
81                 return true;
82
83         if (lnet_response_tracking == 3)
84                 /* Enabled for all message types */
85                 return true;
86
87         if (msg_type == LNET_MSG_PUT)
88                 return lnet_response_tracking == 2;
89
90         if (msg_type == LNET_MSG_GET)
91                 return lnet_response_tracking == 1;
92
93         return false;
94 }
95
96 static inline struct lnet_comm_count *
97 get_stats_counts(struct lnet_element_stats *stats,
98                  enum lnet_stats_type stats_type)
99 {
100         switch (stats_type) {
101         case LNET_STATS_TYPE_SEND:
102                 return &stats->el_send_stats;
103         case LNET_STATS_TYPE_RECV:
104                 return &stats->el_recv_stats;
105         case LNET_STATS_TYPE_DROP:
106                 return &stats->el_drop_stats;
107         default:
108                 CERROR("Unknown stats type\n");
109         }
110
111         return NULL;
112 }
113
114 void lnet_incr_stats(struct lnet_element_stats *stats,
115                      enum lnet_msg_type msg_type,
116                      enum lnet_stats_type stats_type)
117 {
118         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
119         if (!counts)
120                 return;
121
122         switch (msg_type) {
123         case LNET_MSG_ACK:
124                 atomic_inc(&counts->co_ack_count);
125                 break;
126         case LNET_MSG_PUT:
127                 atomic_inc(&counts->co_put_count);
128                 break;
129         case LNET_MSG_GET:
130                 atomic_inc(&counts->co_get_count);
131                 break;
132         case LNET_MSG_REPLY:
133                 atomic_inc(&counts->co_reply_count);
134                 break;
135         case LNET_MSG_HELLO:
136                 atomic_inc(&counts->co_hello_count);
137                 break;
138         default:
139                 CERROR("There is a BUG in the code. Unknown message type\n");
140                 break;
141         }
142 }
143
144 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
145                      enum lnet_stats_type stats_type)
146 {
147         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
148         if (!counts)
149                 return 0;
150
151         return (atomic_read(&counts->co_ack_count) +
152                 atomic_read(&counts->co_put_count) +
153                 atomic_read(&counts->co_get_count) +
154                 atomic_read(&counts->co_reply_count) +
155                 atomic_read(&counts->co_hello_count));
156 }
157
158 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
159                                 struct lnet_comm_count *counts)
160 {
161         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
162         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
163         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
164         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
165         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
166 }
167
168 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
169                               struct lnet_element_stats *stats)
170 {
171         struct lnet_comm_count *counts;
172
173         LASSERT(msg_stats);
174         LASSERT(stats);
175
176         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
177         if (!counts)
178                 return;
179         assign_stats(&msg_stats->im_send_stats, counts);
180
181         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
182         if (!counts)
183                 return;
184         assign_stats(&msg_stats->im_recv_stats, counts);
185
186         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
187         if (!counts)
188                 return;
189         assign_stats(&msg_stats->im_drop_stats, counts);
190 }
191
192 int
193 lnet_fail_nid(lnet_nid_t nid4, unsigned int threshold)
194 {
195         struct lnet_test_peer *tp;
196         struct list_head *el;
197         struct list_head *next;
198         struct lnet_nid nid;
199         LIST_HEAD(cull);
200
201         lnet_nid4_to_nid(nid4, &nid);
202         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
203         if (threshold != 0) {
204                 /* Adding a new entry */
205                 LIBCFS_ALLOC(tp, sizeof(*tp));
206                 if (tp == NULL)
207                         return -ENOMEM;
208
209                 tp->tp_nid = nid;
210                 tp->tp_threshold = threshold;
211
212                 lnet_net_lock(0);
213                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
214                 lnet_net_unlock(0);
215                 return 0;
216         }
217
218         lnet_net_lock(0);
219
220         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
221                 tp = list_entry(el, struct lnet_test_peer, tp_list);
222
223                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
224                     LNET_NID_IS_ANY(&nid) ||    /* removing all entries */
225                     nid_same(&tp->tp_nid, &nid)) {      /* matched this one */
226                         list_move(&tp->tp_list, &cull);
227                 }
228         }
229
230         lnet_net_unlock(0);
231
232         while ((tp = list_first_entry_or_null(&cull,
233                                               struct lnet_test_peer,
234                                               tp_list)) != NULL) {
235                 list_del(&tp->tp_list);
236                 LIBCFS_FREE(tp, sizeof(*tp));
237         }
238         return 0;
239 }
240
241 static int
242 fail_peer(struct lnet_nid *nid, int outgoing)
243 {
244         struct lnet_test_peer *tp;
245         struct list_head *el;
246         struct list_head *next;
247         LIST_HEAD(cull);
248         int fail = 0;
249
250         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
251         lnet_net_lock(0);
252
253         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
254                 tp = list_entry(el, struct lnet_test_peer, tp_list);
255
256                 if (tp->tp_threshold == 0) {
257                         /* zombie entry */
258                         if (outgoing) {
259                                 /* only cull zombies on outgoing tests,
260                                  * since we may be at interrupt priority on
261                                  * incoming messages. */
262                                 list_move(&tp->tp_list, &cull);
263                         }
264                         continue;
265                 }
266
267                 if (LNET_NID_IS_ANY(&tp->tp_nid) ||     /* fail every peer */
268                     nid_same(nid, &tp->tp_nid)) {       /* fail this peer */
269                         fail = 1;
270
271                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
272                                 tp->tp_threshold--;
273                                 if (outgoing &&
274                                     tp->tp_threshold == 0) {
275                                         /* see above */
276                                         list_move(&tp->tp_list, &cull);
277                                 }
278                         }
279                         break;
280                 }
281         }
282
283         lnet_net_unlock(0);
284
285         while ((tp = list_first_entry_or_null(&cull,
286                                               struct lnet_test_peer,
287                                               tp_list)) != NULL) {
288                 list_del(&tp->tp_list);
289                 LIBCFS_FREE(tp, sizeof(*tp));
290         }
291
292         return fail;
293 }
294
295 unsigned int
296 lnet_iov_nob(unsigned int niov, struct kvec *iov)
297 {
298         unsigned int nob = 0;
299
300         LASSERT(niov == 0 || iov != NULL);
301         while (niov-- > 0)
302                 nob += (iov++)->iov_len;
303
304         return (nob);
305 }
306 EXPORT_SYMBOL(lnet_iov_nob);
307
308 void
309 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
310                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
311                   unsigned int nob)
312 {
313         /* NB diov, siov are READ-ONLY */
314         unsigned int this_nob;
315
316         if (nob == 0)
317                 return;
318
319         /* skip complete frags before 'doffset' */
320         LASSERT(ndiov > 0);
321         while (doffset >= diov->iov_len) {
322                 doffset -= diov->iov_len;
323                 diov++;
324                 ndiov--;
325                 LASSERT(ndiov > 0);
326         }
327
328         /* skip complete frags before 'soffset' */
329         LASSERT(nsiov > 0);
330         while (soffset >= siov->iov_len) {
331                 soffset -= siov->iov_len;
332                 siov++;
333                 nsiov--;
334                 LASSERT(nsiov > 0);
335         }
336
337         do {
338                 LASSERT(ndiov > 0);
339                 LASSERT(nsiov > 0);
340                 this_nob = min3((unsigned int)diov->iov_len - doffset,
341                                 (unsigned int)siov->iov_len - soffset,
342                                 nob);
343
344                 memcpy((char *)diov->iov_base + doffset,
345                        (char *)siov->iov_base + soffset, this_nob);
346                 nob -= this_nob;
347
348                 if (diov->iov_len > doffset + this_nob) {
349                         doffset += this_nob;
350                 } else {
351                         diov++;
352                         ndiov--;
353                         doffset = 0;
354                 }
355
356                 if (siov->iov_len > soffset + this_nob) {
357                         soffset += this_nob;
358                 } else {
359                         siov++;
360                         nsiov--;
361                         soffset = 0;
362                 }
363         } while (nob > 0);
364 }
365 EXPORT_SYMBOL(lnet_copy_iov2iov);
366
367 unsigned int
368 lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
369 {
370         unsigned int  nob = 0;
371
372         LASSERT(niov == 0 || kiov != NULL);
373         while (niov-- > 0)
374                 nob += (kiov++)->bv_len;
375
376         return (nob);
377 }
378 EXPORT_SYMBOL(lnet_kiov_nob);
379
380 void
381 lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov,
382                     unsigned int doffset,
383                     unsigned int nsiov, struct bio_vec *siov,
384                     unsigned int soffset,
385                     unsigned int nob)
386 {
387         /* NB diov, siov are READ-ONLY */
388         unsigned int    this_nob;
389         char           *daddr = NULL;
390         char           *saddr = NULL;
391
392         if (nob == 0)
393                 return;
394
395         LASSERT (!in_interrupt ());
396
397         LASSERT (ndiov > 0);
398         while (doffset >= diov->bv_len) {
399                 doffset -= diov->bv_len;
400                 diov++;
401                 ndiov--;
402                 LASSERT(ndiov > 0);
403         }
404
405         LASSERT(nsiov > 0);
406         while (soffset >= siov->bv_len) {
407                 soffset -= siov->bv_len;
408                 siov++;
409                 nsiov--;
410                 LASSERT(nsiov > 0);
411         }
412
413         do {
414                 LASSERT(ndiov > 0);
415                 LASSERT(nsiov > 0);
416                 this_nob = min3(diov->bv_len - doffset,
417                                 siov->bv_len - soffset,
418                                 nob);
419
420                 if (daddr == NULL)
421                         daddr = ((char *)kmap(diov->bv_page)) +
422                                 diov->bv_offset + doffset;
423                 if (saddr == NULL)
424                         saddr = ((char *)kmap(siov->bv_page)) +
425                                 siov->bv_offset + soffset;
426
427                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
428                  * However in practice at least one of the kiovs will be mapped
429                  * kernel pages and the map/unmap will be NOOPs */
430
431                 memcpy (daddr, saddr, this_nob);
432                 nob -= this_nob;
433
434                 if (diov->bv_len > doffset + this_nob) {
435                         daddr += this_nob;
436                         doffset += this_nob;
437                 } else {
438                         kunmap(diov->bv_page);
439                         daddr = NULL;
440                         diov++;
441                         ndiov--;
442                         doffset = 0;
443                 }
444
445                 if (siov->bv_len > soffset + this_nob) {
446                         saddr += this_nob;
447                         soffset += this_nob;
448                 } else {
449                         kunmap(siov->bv_page);
450                         saddr = NULL;
451                         siov++;
452                         nsiov--;
453                         soffset = 0;
454                 }
455         } while (nob > 0);
456
457         if (daddr != NULL)
458                 kunmap(diov->bv_page);
459         if (saddr != NULL)
460                 kunmap(siov->bv_page);
461 }
462 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
463
464 void
465 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
466                     unsigned int nkiov, struct bio_vec *kiov,
467                     unsigned int kiovoffset,
468                     unsigned int nob)
469 {
470         /* NB iov, kiov are READ-ONLY */
471         unsigned int    this_nob;
472         char           *addr = NULL;
473
474         if (nob == 0)
475                 return;
476
477         LASSERT (!in_interrupt ());
478
479         LASSERT (niov > 0);
480         while (iovoffset >= iov->iov_len) {
481                 iovoffset -= iov->iov_len;
482                 iov++;
483                 niov--;
484                 LASSERT(niov > 0);
485         }
486
487         LASSERT(nkiov > 0);
488         while (kiovoffset >= kiov->bv_len) {
489                 kiovoffset -= kiov->bv_len;
490                 kiov++;
491                 nkiov--;
492                 LASSERT(nkiov > 0);
493         }
494
495         do {
496                 LASSERT(niov > 0);
497                 LASSERT(nkiov > 0);
498                 this_nob = min3((unsigned int)iov->iov_len - iovoffset,
499                                 (unsigned int)kiov->bv_len - kiovoffset,
500                                 nob);
501
502                 if (addr == NULL)
503                         addr = ((char *)kmap(kiov->bv_page)) +
504                                 kiov->bv_offset + kiovoffset;
505
506                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
507                 nob -= this_nob;
508
509                 if (iov->iov_len > iovoffset + this_nob) {
510                         iovoffset += this_nob;
511                 } else {
512                         iov++;
513                         niov--;
514                         iovoffset = 0;
515                 }
516
517                 if (kiov->bv_len > kiovoffset + this_nob) {
518                         addr += this_nob;
519                         kiovoffset += this_nob;
520                 } else {
521                         kunmap(kiov->bv_page);
522                         addr = NULL;
523                         kiov++;
524                         nkiov--;
525                         kiovoffset = 0;
526                 }
527
528         } while (nob > 0);
529
530         if (addr != NULL)
531                 kunmap(kiov->bv_page);
532 }
533 EXPORT_SYMBOL(lnet_copy_kiov2iov);
534
535 void
536 lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
537                    unsigned int kiovoffset,
538                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
539                    unsigned int nob)
540 {
541         /* NB kiov, iov are READ-ONLY */
542         unsigned int    this_nob;
543         char           *addr = NULL;
544
545         if (nob == 0)
546                 return;
547
548         LASSERT (!in_interrupt ());
549
550         LASSERT (nkiov > 0);
551         while (kiovoffset >= kiov->bv_len) {
552                 kiovoffset -= kiov->bv_len;
553                 kiov++;
554                 nkiov--;
555                 LASSERT(nkiov > 0);
556         }
557
558         LASSERT(niov > 0);
559         while (iovoffset >= iov->iov_len) {
560                 iovoffset -= iov->iov_len;
561                 iov++;
562                 niov--;
563                 LASSERT(niov > 0);
564         }
565
566         do {
567                 LASSERT(nkiov > 0);
568                 LASSERT(niov > 0);
569                 this_nob = min3((unsigned int)kiov->bv_len - kiovoffset,
570                                 (unsigned int)iov->iov_len - iovoffset,
571                                 nob);
572
573                 if (addr == NULL)
574                         addr = ((char *)kmap(kiov->bv_page)) +
575                                 kiov->bv_offset + kiovoffset;
576
577                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
578                 nob -= this_nob;
579
580                 if (kiov->bv_len > kiovoffset + this_nob) {
581                         addr += this_nob;
582                         kiovoffset += this_nob;
583                 } else {
584                         kunmap(kiov->bv_page);
585                         addr = NULL;
586                         kiov++;
587                         nkiov--;
588                         kiovoffset = 0;
589                 }
590
591                 if (iov->iov_len > iovoffset + this_nob) {
592                         iovoffset += this_nob;
593                 } else {
594                         iov++;
595                         niov--;
596                         iovoffset = 0;
597                 }
598         } while (nob > 0);
599
600         if (addr != NULL)
601                 kunmap(kiov->bv_page);
602 }
603 EXPORT_SYMBOL(lnet_copy_iov2kiov);
604
605 int
606 lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
607                   int src_niov, struct bio_vec *src,
608                   unsigned int offset, unsigned int len)
609 {
610         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
611          * for exactly 'len' bytes, and return the number of entries.
612          * NB not destructive to 'src' */
613         unsigned int    frag_len;
614         unsigned int    niov;
615
616         if (len == 0)                           /* no data => */
617                 return (0);                     /* no frags */
618
619         LASSERT(src_niov > 0);
620         while (offset >= src->bv_len) {      /* skip initial frags */
621                 offset -= src->bv_len;
622                 src_niov--;
623                 src++;
624                 LASSERT(src_niov > 0);
625         }
626
627         niov = 1;
628         for (;;) {
629                 LASSERT(src_niov > 0);
630                 LASSERT((int)niov <= dst_niov);
631
632                 frag_len = src->bv_len - offset;
633                 dst->bv_page = src->bv_page;
634                 dst->bv_offset = src->bv_offset + offset;
635
636                 if (len <= frag_len) {
637                         dst->bv_len = len;
638                         LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
639                         return niov;
640                 }
641
642                 dst->bv_len = frag_len;
643                 LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
644
645                 len -= frag_len;
646                 dst++;
647                 src++;
648                 niov++;
649                 src_niov--;
650                 offset = 0;
651         }
652 }
653 EXPORT_SYMBOL(lnet_extract_kiov);
654
655 void
656 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
657              int delayed, unsigned int offset, unsigned int mlen,
658              unsigned int rlen)
659 {
660         unsigned int niov = 0;
661         struct kvec *iov = NULL;
662         struct bio_vec  *kiov = NULL;
663         int rc;
664
665         LASSERT (!in_interrupt ());
666         LASSERT (mlen == 0 || msg != NULL);
667
668         if (msg != NULL) {
669                 LASSERT(msg->msg_receiving);
670                 LASSERT(!msg->msg_sending);
671                 LASSERT(rlen == msg->msg_len);
672                 LASSERT(mlen <= msg->msg_len);
673                 LASSERT(msg->msg_offset == offset);
674                 LASSERT(msg->msg_wanted == mlen);
675
676                 msg->msg_receiving = 0;
677
678                 if (mlen != 0) {
679                         niov = msg->msg_niov;
680                         kiov = msg->msg_kiov;
681
682                         LASSERT (niov > 0);
683                         LASSERT ((iov == NULL) != (kiov == NULL));
684                 }
685         }
686
687         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
688                                              niov, kiov, offset, mlen,
689                                              rlen);
690         if (rc < 0)
691                 lnet_finalize(msg, rc);
692 }
693
694 static void
695 lnet_setpayloadbuffer(struct lnet_msg *msg)
696 {
697         struct lnet_libmd *md = msg->msg_md;
698
699         LASSERT(msg->msg_len > 0);
700         LASSERT(!msg->msg_routing);
701         LASSERT(md != NULL);
702         LASSERT(msg->msg_niov == 0);
703         LASSERT(msg->msg_kiov == NULL);
704
705         msg->msg_niov = md->md_niov;
706         msg->msg_kiov = md->md_kiov;
707 }
708
709 void
710 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_processid *target,
711                unsigned int offset, unsigned int len)
712 {
713         msg->msg_type = type;
714         msg->msg_target = *target;
715         msg->msg_len = len;
716         msg->msg_offset = offset;
717
718         if (len != 0)
719                 lnet_setpayloadbuffer(msg);
720
721         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
722         msg->msg_hdr.type           = type;
723         /* dest_nid will be overwritten by lnet_select_pathway() */
724         msg->msg_hdr.dest_nid = target->nid;
725         msg->msg_hdr.dest_pid = target->pid;
726         /* src_nid will be set later */
727         msg->msg_hdr.src_pid        = the_lnet.ln_pid;
728         msg->msg_hdr.payload_length = len;
729 }
730
731 void
732 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
733 {
734         void *priv = msg->msg_private;
735         int rc;
736
737         LASSERT(!in_interrupt());
738         LASSERT(nid_is_lo0(&ni->ni_nid) ||
739                 (msg->msg_txcredit && msg->msg_peertxcredit));
740
741         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
742         if (rc < 0) {
743                 msg->msg_no_resend = true;
744                 lnet_finalize(msg, rc);
745         }
746 }
747
748 static int
749 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
750 {
751         int     rc;
752
753         LASSERT(!msg->msg_sending);
754         LASSERT(msg->msg_receiving);
755         LASSERT(!msg->msg_rx_ready_delay);
756         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
757
758         msg->msg_rx_ready_delay = 1;
759         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
760                                                   &msg->msg_private);
761         if (rc != 0) {
762                 CERROR("recv from %s / send to %s aborted: "
763                        "eager_recv failed %d\n",
764                        libcfs_nidstr(&msg->msg_rxpeer->lpni_nid),
765                        libcfs_idstr(&msg->msg_target), rc);
766                 LASSERT(rc < 0); /* required by my callers */
767         }
768
769         return rc;
770 }
771
772 /* Returns:
773  *  -ETIMEDOUT if the message deadline has been exceeded
774  *  -EHOSTUNREACH if the peer is down
775  *  0 if this message should not be dropped
776  */
777 static int
778 lnet_check_message_drop(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
779                         struct lnet_msg *msg)
780 {
781         /* Drop message if we've exceeded the message deadline */
782         if (ktime_after(ktime_get(), msg->msg_deadline))
783                 return -ETIMEDOUT;
784
785         if (msg->msg_target.pid & LNET_PID_USERFLAG)
786                 return 0;
787
788         if (!lnet_peer_aliveness_enabled(lpni))
789                 return 0;
790
791         /* If we're resending a message, let's attempt to send it even if
792          * the peer is down to fulfill our resend quota on the message
793          */
794         if (msg->msg_retry_count > 0)
795                 return 0;
796
797         /* try and send recovery messages regardless */
798         if (msg->msg_recovery)
799                 return 0;
800
801         /* always send any responses */
802         if (lnet_msg_is_response(msg))
803                 return 0;
804
805         /* always send non-routed messages */
806         if (!msg->msg_routing)
807                 return 0;
808
809         /* assume peer_ni is alive as long as we're within the configured
810          * peer timeout
811          */
812         if (ktime_get_seconds() <
813             (lpni->lpni_last_alive +
814              lpni->lpni_net->net_tunables.lct_peer_timeout))
815                 return 0;
816
817         if (!lnet_is_peer_ni_alive(lpni))
818                 return -EHOSTUNREACH;
819
820         return 0;
821 }
822
823 /**
824  * \param msg The message to be sent.
825  * \param do_send True if lnet_ni_send() should be called in this function.
826  *        lnet_send() is going to lnet_net_unlock immediately after this, so
827  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
828  *
829  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
830  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
831  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
832  * \retval -ECANCELED If the MD of the message has been unlinked.
833  */
834 static int
835 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
836 {
837         struct lnet_peer_ni     *lp = msg->msg_txpeer;
838         struct lnet_ni          *ni = msg->msg_txni;
839         int                     cpt = msg->msg_tx_cpt;
840         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
841         int rc;
842
843         /* non-lnet_send() callers have checked before */
844         LASSERT(!do_send || msg->msg_tx_delayed);
845         LASSERT(!msg->msg_receiving);
846         LASSERT(msg->msg_tx_committed);
847
848         /* can't get here if we're sending to the loopback interface */
849         if (the_lnet.ln_loni)
850                 LASSERT(!nid_same(&lp->lpni_nid, &the_lnet.ln_loni->ni_nid));
851
852         /* NB 'lp' is always the next hop */
853         rc = lnet_check_message_drop(ni, lp, msg);
854         if (rc) {
855                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
856                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
857                         msg->msg_len;
858                 lnet_net_unlock(cpt);
859                 if (msg->msg_txpeer)
860                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
861                                         msg->msg_type,
862                                         LNET_STATS_TYPE_DROP);
863
864                 lnet_incr_stats(&msg->msg_txni->ni_stats,
865                                 msg->msg_type,
866                                 LNET_STATS_TYPE_DROP);
867
868                 if (rc == -EHOSTUNREACH) {
869                         CNETERR("Dropping message for %s: peer not alive\n",
870                                 libcfs_idstr(&msg->msg_target));
871                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
872                 } else {
873                         CNETERR("Dropping message for %s: exceeded message deadline\n",
874                                 libcfs_idstr(&msg->msg_target));
875                         msg->msg_health_status =
876                                 LNET_MSG_STATUS_NETWORK_TIMEOUT;
877                 }
878
879                 if (do_send)
880                         lnet_finalize(msg, rc);
881
882                 lnet_net_lock(cpt);
883                 return rc;
884         }
885
886         if (msg->msg_md != NULL &&
887             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
888                 lnet_net_unlock(cpt);
889
890                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
891                         "called on the MD/ME.\n",
892                         libcfs_idstr(&msg->msg_target));
893                 if (do_send) {
894                         msg->msg_no_resend = true;
895                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
896                                msg, libcfs_idstr(&msg->msg_target));
897                         lnet_finalize(msg, -ECANCELED);
898                 }
899
900                 lnet_net_lock(cpt);
901                 return -ECANCELED;
902         }
903
904         if (!msg->msg_peertxcredit) {
905                 spin_lock(&lp->lpni_lock);
906                 LASSERT((lp->lpni_txcredits < 0) ==
907                         !list_empty(&lp->lpni_txq));
908
909                 msg->msg_peertxcredit = 1;
910                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr_nid4);
911                 lp->lpni_txcredits--;
912
913                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
914                         lp->lpni_mintxcredits = lp->lpni_txcredits;
915
916                 if (lp->lpni_txcredits < 0) {
917                         msg->msg_tx_delayed = 1;
918                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
919                         spin_unlock(&lp->lpni_lock);
920                         return LNET_CREDIT_WAIT;
921                 }
922                 spin_unlock(&lp->lpni_lock);
923         }
924
925         if (!msg->msg_txcredit) {
926                 LASSERT((tq->tq_credits < 0) ==
927                         !list_empty(&tq->tq_delayed));
928
929                 msg->msg_txcredit = 1;
930                 tq->tq_credits--;
931                 atomic_dec(&ni->ni_tx_credits);
932
933                 if (tq->tq_credits < tq->tq_credits_min)
934                         tq->tq_credits_min = tq->tq_credits;
935
936                 if (tq->tq_credits < 0) {
937                         msg->msg_tx_delayed = 1;
938                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
939                         return LNET_CREDIT_WAIT;
940                 }
941         }
942
943         if (unlikely(!list_empty(&the_lnet.ln_delay_rules)) &&
944             lnet_delay_rule_match_locked(&msg->msg_hdr, msg)) {
945                 msg->msg_tx_delayed = 1;
946                 return LNET_CREDIT_WAIT;
947         }
948
949         /* unset the tx_delay flag as we're going to send it now */
950         msg->msg_tx_delayed = 0;
951
952         if (do_send) {
953                 lnet_net_unlock(cpt);
954                 lnet_ni_send(ni, msg);
955                 lnet_net_lock(cpt);
956         }
957         return LNET_CREDIT_OK;
958 }
959
960
961 static struct lnet_rtrbufpool *
962 lnet_msg2bufpool(struct lnet_msg *msg)
963 {
964         struct lnet_rtrbufpool  *rbp;
965         int                     cpt;
966
967         LASSERT(msg->msg_rx_committed);
968
969         cpt = msg->msg_rx_cpt;
970         rbp = &the_lnet.ln_rtrpools[cpt][0];
971
972         LASSERT(msg->msg_len <= LNET_MTU);
973         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
974                 rbp++;
975                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
976         }
977
978         return rbp;
979 }
980
981 static int
982 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
983 {
984         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
985          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
986          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
987          * received or OK to receive */
988         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
989         struct lnet_peer *lp;
990         struct lnet_rtrbufpool *rbp;
991         struct lnet_rtrbuf *rb;
992
993         LASSERT(msg->msg_kiov == NULL);
994         LASSERT(msg->msg_niov == 0);
995         LASSERT(msg->msg_routing);
996         LASSERT(msg->msg_receiving);
997         LASSERT(!msg->msg_sending);
998         LASSERT(lpni->lpni_peer_net);
999         LASSERT(lpni->lpni_peer_net->lpn_peer);
1000
1001         lp = lpni->lpni_peer_net->lpn_peer;
1002
1003         /* non-lnet_parse callers only receive delayed messages */
1004         LASSERT(!do_recv || msg->msg_rx_delayed);
1005
1006         if (!msg->msg_peerrtrcredit) {
1007                 /* lpni_lock protects the credit manipulation */
1008                 spin_lock(&lpni->lpni_lock);
1009
1010                 msg->msg_peerrtrcredit = 1;
1011                 lpni->lpni_rtrcredits--;
1012                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
1013                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
1014
1015                 if (lpni->lpni_rtrcredits < 0) {
1016                         spin_unlock(&lpni->lpni_lock);
1017                         /* must have checked eager_recv before here */
1018                         LASSERT(msg->msg_rx_ready_delay);
1019                         msg->msg_rx_delayed = 1;
1020                         /* lp_lock protects the lp_rtrq */
1021                         spin_lock(&lp->lp_lock);
1022                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1023                         spin_unlock(&lp->lp_lock);
1024                         return LNET_CREDIT_WAIT;
1025                 }
1026                 spin_unlock(&lpni->lpni_lock);
1027         }
1028
1029         rbp = lnet_msg2bufpool(msg);
1030
1031         if (!msg->msg_rtrcredit) {
1032                 msg->msg_rtrcredit = 1;
1033                 rbp->rbp_credits--;
1034                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1035                         rbp->rbp_mincredits = rbp->rbp_credits;
1036
1037                 if (rbp->rbp_credits < 0) {
1038                         /* must have checked eager_recv before here */
1039                         LASSERT(msg->msg_rx_ready_delay);
1040                         msg->msg_rx_delayed = 1;
1041                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1042                         return LNET_CREDIT_WAIT;
1043                 }
1044         }
1045
1046         LASSERT(!list_empty(&rbp->rbp_bufs));
1047         rb = list_first_entry(&rbp->rbp_bufs, struct lnet_rtrbuf, rb_list);
1048         list_del(&rb->rb_list);
1049
1050         msg->msg_niov = rbp->rbp_npages;
1051         msg->msg_kiov = &rb->rb_kiov[0];
1052
1053         /* unset the msg-rx_delayed flag since we're receiving the message */
1054         msg->msg_rx_delayed = 0;
1055
1056         if (do_recv) {
1057                 int cpt = msg->msg_rx_cpt;
1058
1059                 lnet_net_unlock(cpt);
1060                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1061                              0, msg->msg_len, msg->msg_len);
1062                 lnet_net_lock(cpt);
1063         }
1064         return LNET_CREDIT_OK;
1065 }
1066
1067 void
1068 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1069 {
1070         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1071         struct lnet_ni          *txni = msg->msg_txni;
1072         struct lnet_msg         *msg2;
1073
1074         if (msg->msg_txcredit) {
1075                 struct lnet_ni       *ni = msg->msg_txni;
1076                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1077
1078                 /* give back NI txcredits */
1079                 msg->msg_txcredit = 0;
1080
1081                 LASSERT((tq->tq_credits < 0) ==
1082                         !list_empty(&tq->tq_delayed));
1083
1084                 tq->tq_credits++;
1085                 atomic_inc(&ni->ni_tx_credits);
1086                 if (tq->tq_credits <= 0) {
1087                         msg2 = list_first_entry(&tq->tq_delayed,
1088                                                 struct lnet_msg, msg_list);
1089                         list_del(&msg2->msg_list);
1090
1091                         LASSERT(msg2->msg_txni == ni);
1092                         LASSERT(msg2->msg_tx_delayed);
1093                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1094
1095                         (void) lnet_post_send_locked(msg2, 1);
1096                 }
1097         }
1098
1099         if (msg->msg_peertxcredit) {
1100                 /* give back peer txcredits */
1101                 msg->msg_peertxcredit = 0;
1102
1103                 spin_lock(&txpeer->lpni_lock);
1104                 LASSERT((txpeer->lpni_txcredits < 0) ==
1105                         !list_empty(&txpeer->lpni_txq));
1106
1107                 txpeer->lpni_txqnob -=  msg->msg_len +
1108                                         sizeof(struct lnet_hdr_nid4);
1109                 LASSERT(txpeer->lpni_txqnob >= 0);
1110
1111                 txpeer->lpni_txcredits++;
1112                 if (txpeer->lpni_txcredits <= 0) {
1113                         int msg2_cpt;
1114
1115                         msg2 = list_first_entry(&txpeer->lpni_txq,
1116                                                 struct lnet_msg, msg_list);
1117                         list_del(&msg2->msg_list);
1118                         spin_unlock(&txpeer->lpni_lock);
1119
1120                         LASSERT(msg2->msg_txpeer == txpeer);
1121                         LASSERT(msg2->msg_tx_delayed);
1122
1123                         msg2_cpt = msg2->msg_tx_cpt;
1124
1125                         /*
1126                          * The msg_cpt can be different from the msg2_cpt
1127                          * so we need to make sure we lock the correct cpt
1128                          * for msg2.
1129                          * Once we call lnet_post_send_locked() it is no
1130                          * longer safe to access msg2, since it could've
1131                          * been freed by lnet_finalize(), but we still
1132                          * need to relock the correct cpt, so we cache the
1133                          * msg2_cpt for the purpose of the check that
1134                          * follows the call to lnet_pose_send_locked().
1135                          */
1136                         if (msg2_cpt != msg->msg_tx_cpt) {
1137                                 lnet_net_unlock(msg->msg_tx_cpt);
1138                                 lnet_net_lock(msg2_cpt);
1139                         }
1140                         (void) lnet_post_send_locked(msg2, 1);
1141                         if (msg2_cpt != msg->msg_tx_cpt) {
1142                                 lnet_net_unlock(msg2_cpt);
1143                                 lnet_net_lock(msg->msg_tx_cpt);
1144                         }
1145                 } else {
1146                         spin_unlock(&txpeer->lpni_lock);
1147                 }
1148         }
1149
1150         if (txni != NULL) {
1151                 msg->msg_txni = NULL;
1152                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1153         }
1154
1155         if (txpeer != NULL) {
1156                 msg->msg_txpeer = NULL;
1157                 lnet_peer_ni_decref_locked(txpeer);
1158         }
1159 }
1160
1161 void
1162 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1163 {
1164         struct lnet_msg *msg;
1165
1166         if (list_empty(&rbp->rbp_msgs))
1167                 return;
1168         msg = list_first_entry(&rbp->rbp_msgs,
1169                                struct lnet_msg, msg_list);
1170         list_del(&msg->msg_list);
1171
1172         (void)lnet_post_routed_recv_locked(msg, 1);
1173 }
1174
1175 void
1176 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1177 {
1178         struct lnet_msg *msg;
1179         struct lnet_msg *tmp;
1180
1181         lnet_net_unlock(cpt);
1182
1183         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1184                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1185                              0, 0, 0, msg->msg_hdr.payload_length);
1186                 list_del_init(&msg->msg_list);
1187                 msg->msg_no_resend = true;
1188                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1189                 lnet_finalize(msg, -ECANCELED);
1190         }
1191
1192         lnet_net_lock(cpt);
1193 }
1194
1195 void
1196 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1197 {
1198         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1199         struct lnet_peer *lp;
1200         struct lnet_ni *rxni = msg->msg_rxni;
1201         struct lnet_msg *msg2;
1202
1203         if (msg->msg_rtrcredit) {
1204                 /* give back global router credits */
1205                 struct lnet_rtrbuf *rb;
1206                 struct lnet_rtrbufpool *rbp;
1207
1208                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1209                  * there until it gets one allocated, or aborts the wait
1210                  * itself */
1211                 LASSERT(msg->msg_kiov != NULL);
1212
1213                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1214                 rbp = rb->rb_pool;
1215
1216                 msg->msg_kiov = NULL;
1217                 msg->msg_rtrcredit = 0;
1218
1219                 LASSERT(rbp == lnet_msg2bufpool(msg));
1220
1221                 LASSERT((rbp->rbp_credits > 0) ==
1222                         !list_empty(&rbp->rbp_bufs));
1223
1224                 /* If routing is now turned off, we just drop this buffer and
1225                  * don't bother trying to return credits.  */
1226                 if (!the_lnet.ln_routing) {
1227                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1228                         goto routing_off;
1229                 }
1230
1231                 /* It is possible that a user has lowered the desired number of
1232                  * buffers in this pool.  Make sure we never put back
1233                  * more buffers than the stated number. */
1234                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1235                         /* Discard this buffer so we don't have too
1236                          * many. */
1237                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1238                         rbp->rbp_nbuffers--;
1239                 } else {
1240                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1241                         rbp->rbp_credits++;
1242                         if (rbp->rbp_credits <= 0)
1243                                 lnet_schedule_blocked_locked(rbp);
1244                 }
1245         }
1246
1247 routing_off:
1248         if (msg->msg_peerrtrcredit) {
1249                 LASSERT(rxpeerni);
1250                 LASSERT(rxpeerni->lpni_peer_net);
1251                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1252
1253                 spin_lock(&rxpeerni->lpni_lock);
1254                 /* give back peer router credits */
1255                 msg->msg_peerrtrcredit = 0;
1256                 rxpeerni->lpni_rtrcredits++;
1257                 spin_unlock(&rxpeerni->lpni_lock);
1258
1259                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1260                 spin_lock(&lp->lp_lock);
1261
1262                 /* drop all messages which are queued to be routed on that
1263                  * peer. */
1264                 if (!the_lnet.ln_routing) {
1265                         LIST_HEAD(drop);
1266                         list_splice_init(&lp->lp_rtrq, &drop);
1267                         spin_unlock(&lp->lp_lock);
1268                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1269                 } else if (!list_empty(&lp->lp_rtrq)) {
1270                         int msg2_cpt;
1271
1272                         msg2 = list_first_entry(&lp->lp_rtrq,
1273                                                 struct lnet_msg, msg_list);
1274                         list_del(&msg2->msg_list);
1275                         msg2_cpt = msg2->msg_rx_cpt;
1276                         spin_unlock(&lp->lp_lock);
1277                         /*
1278                          * messages on the lp_rtrq can be from any NID in
1279                          * the peer, which means they might have different
1280                          * cpts. We need to make sure we lock the right
1281                          * one.
1282                          */
1283                         if (msg2_cpt != msg->msg_rx_cpt) {
1284                                 lnet_net_unlock(msg->msg_rx_cpt);
1285                                 lnet_net_lock(msg2_cpt);
1286                         }
1287                         (void) lnet_post_routed_recv_locked(msg2, 1);
1288                         if (msg2_cpt != msg->msg_rx_cpt) {
1289                                 lnet_net_unlock(msg2_cpt);
1290                                 lnet_net_lock(msg->msg_rx_cpt);
1291                         }
1292                 } else {
1293                         spin_unlock(&lp->lp_lock);
1294                 }
1295         }
1296         if (rxni != NULL) {
1297                 msg->msg_rxni = NULL;
1298                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1299         }
1300         if (rxpeerni != NULL) {
1301                 msg->msg_rxpeer = NULL;
1302                 lnet_peer_ni_decref_locked(rxpeerni);
1303         }
1304 }
1305
1306 static struct lnet_peer_ni *
1307 lnet_select_peer_ni(struct lnet_ni *best_ni, struct lnet_nid *dst_nid,
1308                     struct lnet_peer *peer,
1309                     struct lnet_peer_ni *best_lpni,
1310                     struct lnet_peer_net *peer_net)
1311 {
1312         /*
1313          * Look at the peer NIs for the destination peer that connect
1314          * to the chosen net. If a peer_ni is preferred when using the
1315          * best_ni to communicate, we use that one. If there is no
1316          * preferred peer_ni, or there are multiple preferred peer_ni,
1317          * the available transmit credits are used. If the transmit
1318          * credits are equal, we round-robin over the peer_ni.
1319          */
1320         struct lnet_peer_ni *lpni = NULL;
1321         int best_lpni_credits = (best_lpni) ? best_lpni->lpni_txcredits :
1322                 INT_MIN;
1323         int best_lpni_healthv = (best_lpni) ?
1324                 atomic_read(&best_lpni->lpni_healthv) : 0;
1325         bool best_lpni_is_preferred = false;
1326         bool lpni_is_preferred;
1327         int lpni_healthv;
1328         __u32 lpni_sel_prio;
1329         __u32 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1330
1331         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1332                 /*
1333                  * if the best_ni we've chosen aleady has this lpni
1334                  * preferred, then let's use it
1335                  */
1336                 if (best_ni) {
1337                         lpni_is_preferred = lnet_peer_is_pref_nid_locked(
1338                                 lpni, &best_ni->ni_nid);
1339                         CDEBUG(D_NET, "%s lpni_is_preferred = %d\n",
1340                                libcfs_nidstr(&best_ni->ni_nid),
1341                                lpni_is_preferred);
1342                 } else {
1343                         lpni_is_preferred = false;
1344                 }
1345
1346                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1347                 lpni_sel_prio = lpni->lpni_sel_priority;
1348
1349                 if (best_lpni)
1350                         CDEBUG(D_NET, "n:[%s, %s] h:[%d, %d] p:[%d, %d] c:[%d, %d] s:[%d, %d]\n",
1351                                 libcfs_nidstr(&lpni->lpni_nid),
1352                                 libcfs_nidstr(&best_lpni->lpni_nid),
1353                                 lpni_healthv, best_lpni_healthv,
1354                                 lpni_sel_prio, best_sel_prio,
1355                                 lpni->lpni_txcredits, best_lpni_credits,
1356                                 lpni->lpni_seq, best_lpni->lpni_seq);
1357                 else
1358                         goto select_lpni;
1359
1360                 /* pick the healthiest peer ni */
1361                 if (lpni_healthv < best_lpni_healthv)
1362                         continue;
1363                 else if (lpni_healthv > best_lpni_healthv)
1364                         goto select_lpni;
1365
1366                 if (lpni_sel_prio > best_sel_prio)
1367                         continue;
1368                 else if (lpni_sel_prio < best_sel_prio)
1369                         goto select_lpni;
1370
1371                 /* If this is a preferred peer - use it. Otherwise, ignore it */
1372                 if (!best_lpni_is_preferred && lpni_is_preferred)
1373                         goto select_lpni;
1374                 else if (best_lpni_is_preferred && !lpni_is_preferred)
1375                         continue;
1376
1377                 if (lpni->lpni_txcredits < best_lpni_credits)
1378                         /* We already have a peer that has more credits
1379                          * available than this one. No need to consider
1380                          * this peer further.
1381                          */
1382                         continue;
1383                 else if (lpni->lpni_txcredits > best_lpni_credits)
1384                         goto select_lpni;
1385
1386                 /* The best peer found so far and the current peer
1387                  * have the same number of available credits let's
1388                  * make sure to select between them using Round Robin
1389                  */
1390                 if (best_lpni && (best_lpni->lpni_seq <= lpni->lpni_seq))
1391                         continue;
1392 select_lpni:
1393                 best_lpni_is_preferred = lpni_is_preferred;
1394                 best_lpni_healthv = lpni_healthv;
1395                 best_sel_prio = lpni_sel_prio;
1396                 best_lpni = lpni;
1397                 best_lpni_credits = lpni->lpni_txcredits;
1398         }
1399
1400         /* if we still can't find a peer ni then we can't reach it */
1401         if (!best_lpni) {
1402                 u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1403                              LNET_NID_NET(dst_nid);
1404                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1405                                 libcfs_net2str(net_id));
1406                 return NULL;
1407         }
1408
1409         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1410                libcfs_nidstr(&best_lpni->lpni_nid));
1411
1412         return best_lpni;
1413 }
1414
1415 /*
1416  * Prerequisite: the best_ni should already be set in the sd
1417  * Find the best lpni.
1418  * If the net id is provided then restrict lpni selection on
1419  * that particular net.
1420  * Otherwise find any reachable lpni. When dealing with an MR
1421  * gateway and it has multiple lpnis which we can use
1422  * we want to select the best one from the list of reachable
1423  * ones.
1424  */
1425 static inline struct lnet_peer_ni *
1426 lnet_find_best_lpni(struct lnet_ni *lni, struct lnet_nid *dst_nid,
1427                     struct lnet_peer *peer, u32 net_id)
1428 {
1429         struct lnet_peer_net *peer_net;
1430
1431         /* find the best_lpni on any local network */
1432         if (net_id == LNET_NET_ANY) {
1433                 struct lnet_peer_ni *best_lpni = NULL;
1434                 struct lnet_peer_net *lpn;
1435                 list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
1436                         /* no net specified find any reachable peer ni */
1437                         if (!lnet_islocalnet_locked(lpn->lpn_net_id))
1438                                 continue;
1439                         best_lpni = lnet_select_peer_ni(lni, dst_nid, peer,
1440                                                         best_lpni, lpn);
1441                 }
1442
1443                 return best_lpni;
1444         }
1445         /* restrict on the specified net */
1446         peer_net = lnet_peer_get_net_locked(peer, net_id);
1447         if (peer_net)
1448                 return lnet_select_peer_ni(lni, dst_nid, peer, NULL, peer_net);
1449
1450         return NULL;
1451 }
1452
1453 static int
1454 lnet_compare_gw_lpnis(struct lnet_peer_ni *lpni1, struct lnet_peer_ni *lpni2)
1455 {
1456         if (lpni1->lpni_txqnob < lpni2->lpni_txqnob)
1457                 return 1;
1458
1459         if (lpni1->lpni_txqnob > lpni2->lpni_txqnob)
1460                 return -1;
1461
1462         if (lpni1->lpni_txcredits > lpni2->lpni_txcredits)
1463                 return 1;
1464
1465         if (lpni1->lpni_txcredits < lpni2->lpni_txcredits)
1466                 return -1;
1467
1468         return 0;
1469 }
1470
1471 /* Compare route priorities and hop counts */
1472 static int
1473 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
1474 {
1475         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1476         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1477
1478         if (r1->lr_priority < r2->lr_priority)
1479                 return 1;
1480
1481         if (r1->lr_priority > r2->lr_priority)
1482                 return -1;
1483
1484         if (r1_hops < r2_hops)
1485                 return 1;
1486
1487         if (r1_hops > r2_hops)
1488                 return -1;
1489
1490         return 0;
1491 }
1492
1493 static struct lnet_route *
1494 lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net,
1495                        struct lnet_peer_ni *remote_lpni,
1496                        struct lnet_route **prev_route,
1497                        struct lnet_peer_ni **gwni)
1498 {
1499         struct lnet_peer_ni *lpni, *best_gw_ni = NULL;
1500         struct lnet_route *best_route;
1501         struct lnet_route *last_route;
1502         struct lnet_route *route;
1503         int rc;
1504         bool best_rte_is_preferred = false;
1505         struct lnet_nid *gw_pnid;
1506
1507         CDEBUG(D_NET, "Looking up a route to %s, from %s\n",
1508                libcfs_net2str(rnet->lrn_net), libcfs_net2str(src_net));
1509
1510         best_route = last_route = NULL;
1511         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1512                 if (!lnet_is_route_alive(route))
1513                         continue;
1514                 gw_pnid = &route->lr_gateway->lp_primary_nid;
1515
1516                 /* no protection on below fields, but it's harmless */
1517                 if (last_route && (last_route->lr_seq - route->lr_seq < 0))
1518                         last_route = route;
1519
1520                 /* if the best route found is in the preferred list then
1521                  * tag it as preferred and use it later on. But if we
1522                  * didn't find any routes which are on the preferred list
1523                  * then just use the best route possible.
1524                  */
1525                 rc = lnet_peer_is_pref_rtr_locked(remote_lpni, gw_pnid);
1526
1527                 if (!best_route || (rc && !best_rte_is_preferred)) {
1528                         /* Restrict the selection of the router NI on the
1529                          * src_net provided. If the src_net is LNET_NID_ANY,
1530                          * then select the best interface available.
1531                          */
1532                         lpni = lnet_find_best_lpni(NULL, NULL,
1533                                                    route->lr_gateway,
1534                                                    src_net);
1535                         if (!lpni) {
1536                                 CDEBUG(D_NET,
1537                                        "Gateway %s does not have a peer NI on net %s\n",
1538                                        libcfs_nidstr(gw_pnid),
1539                                        libcfs_net2str(src_net));
1540                                 continue;
1541                         }
1542                 }
1543
1544                 if (rc && !best_rte_is_preferred) {
1545                         /* This is the first preferred route we found,
1546                          * so it beats any route found previously
1547                          */
1548                         best_route = route;
1549                         if (!last_route)
1550                                 last_route = route;
1551                         best_gw_ni = lpni;
1552                         best_rte_is_preferred = true;
1553                         CDEBUG(D_NET, "preferred gw = %s\n",
1554                                libcfs_nidstr(gw_pnid));
1555                         continue;
1556                 } else if ((!rc) && best_rte_is_preferred)
1557                         /* The best route we found so far is in the preferred
1558                          * list, so it beats any non-preferred route
1559                          */
1560                         continue;
1561
1562                 if (!best_route) {
1563                         best_route = last_route = route;
1564                         best_gw_ni = lpni;
1565                         continue;
1566                 }
1567
1568                 rc = lnet_compare_routes(route, best_route);
1569                 if (rc == -1)
1570                         continue;
1571
1572                 /* Restrict the selection of the router NI on the
1573                  * src_net provided. If the src_net is LNET_NID_ANY,
1574                  * then select the best interface available.
1575                  */
1576                 lpni = lnet_find_best_lpni(NULL, NULL, route->lr_gateway,
1577                                            src_net);
1578                 if (!lpni) {
1579                         CDEBUG(D_NET,
1580                                "Gateway %s does not have a peer NI on net %s\n",
1581                                libcfs_nidstr(gw_pnid),
1582                                libcfs_net2str(src_net));
1583                         continue;
1584                 }
1585
1586                 if (rc == 1) {
1587                         best_route = route;
1588                         best_gw_ni = lpni;
1589                         continue;
1590                 }
1591
1592                 rc = lnet_compare_gw_lpnis(lpni, best_gw_ni);
1593                 if (rc == -1)
1594                         continue;
1595
1596                 if (rc == 1 || route->lr_seq <= best_route->lr_seq) {
1597                         best_route = route;
1598                         best_gw_ni = lpni;
1599                         continue;
1600                 }
1601         }
1602
1603         *prev_route = last_route;
1604         *gwni = best_gw_ni;
1605
1606         return best_route;
1607 }
1608
1609 static inline unsigned int
1610 lnet_dev_prio_of_md(struct lnet_ni *ni, unsigned int dev_idx)
1611 {
1612         if (dev_idx == UINT_MAX)
1613                 return UINT_MAX;
1614
1615         if (!ni || !ni->ni_net || !ni->ni_net->net_lnd ||
1616             !ni->ni_net->net_lnd->lnd_get_dev_prio)
1617                 return UINT_MAX;
1618
1619         return ni->ni_net->net_lnd->lnd_get_dev_prio(ni, dev_idx);
1620 }
1621
1622 static struct lnet_ni *
1623 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1624                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1625                  struct lnet_msg *msg, int md_cpt)
1626 {
1627         struct lnet_libmd *md = msg->msg_md;
1628         unsigned int offset = msg->msg_offset;
1629         unsigned int shortest_distance;
1630         struct lnet_ni *ni = NULL;
1631         int best_credits;
1632         int best_healthv;
1633         __u32 best_sel_prio;
1634         unsigned int best_dev_prio;
1635         int best_ni_fatal;
1636         unsigned int dev_idx = UINT_MAX;
1637         bool gpu = lnet_md_is_gpu(md);
1638
1639         if (gpu) {
1640                 struct page *page = lnet_get_first_page(md, offset);
1641
1642                 dev_idx = lnet_get_dev_idx(page);
1643         }
1644
1645         /*
1646          * If there is no peer_ni that we can send to on this network,
1647          * then there is no point in looking for a new best_ni here.
1648         */
1649         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1650                 return best_ni;
1651
1652         if (best_ni == NULL) {
1653                 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1654                 shortest_distance = UINT_MAX;
1655                 best_dev_prio = UINT_MAX;
1656                 best_credits = INT_MIN;
1657                 best_healthv = 0;
1658                 best_ni_fatal = true;
1659         } else {
1660                 best_dev_prio = lnet_dev_prio_of_md(best_ni, dev_idx);
1661                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1662                                                      best_ni->ni_dev_cpt);
1663                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1664                 best_healthv = atomic_read(&best_ni->ni_healthv);
1665                 best_sel_prio = best_ni->ni_sel_priority;
1666                 best_ni_fatal = atomic_read(&best_ni->ni_fatal_error_on);
1667         }
1668
1669         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1670                 unsigned int distance;
1671                 int ni_credits;
1672                 int ni_healthv;
1673                 int ni_fatal;
1674                 __u32 ni_sel_prio;
1675                 unsigned int ni_dev_prio;
1676
1677                 ni_credits = atomic_read(&ni->ni_tx_credits);
1678                 ni_healthv = atomic_read(&ni->ni_healthv);
1679                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1680                 ni_sel_prio = ni->ni_sel_priority;
1681
1682                 /*
1683                  * calculate the distance from the CPT on which
1684                  * the message memory is allocated to the CPT of
1685                  * the NI's physical device
1686                  */
1687                 distance = cfs_cpt_distance(lnet_cpt_table(),
1688                                             md_cpt,
1689                                             ni->ni_dev_cpt);
1690
1691                 ni_dev_prio = lnet_dev_prio_of_md(ni, dev_idx);
1692
1693                 /*
1694                  * All distances smaller than the NUMA range
1695                  * are treated equally.
1696                  */
1697                 if (!gpu && distance < lnet_numa_range)
1698                         distance = lnet_numa_range;
1699
1700                 /*
1701                  * Select on health, selection policy, direct dma prio,
1702                  * shorter distance, available credits, then round-robin.
1703                  */
1704                 if (best_ni)
1705                         CDEBUG(D_NET, "compare ni %s [f:%s, c:%d, d:%d, s:%d, p:%u, g:%u, h:%d] with best_ni %s [f:%s, c:%d, d:%d, s:%d, p:%u, g:%u, h:%d]\n",
1706                                libcfs_nidstr(&ni->ni_nid),
1707                                ni_fatal ? "y" : "n", ni_credits, distance,
1708                                ni->ni_seq, ni_sel_prio, ni_dev_prio, ni_healthv,
1709                                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid)
1710                                : "not selected",
1711                                best_ni_fatal ? "y" : "n", best_credits,
1712                                shortest_distance,
1713                                (best_ni) ? best_ni->ni_seq : 0,
1714                                best_sel_prio, best_dev_prio, best_healthv);
1715                 else
1716                         goto select_ni;
1717
1718                 if (ni_fatal && !best_ni_fatal)
1719                         continue;
1720                 else if (!ni_fatal && best_ni_fatal)
1721                         goto select_ni;
1722
1723                 if (ni_healthv < best_healthv)
1724                         continue;
1725                 else if (ni_healthv > best_healthv)
1726                         goto select_ni;
1727
1728                 if (ni_sel_prio > best_sel_prio)
1729                         continue;
1730                 else if (ni_sel_prio < best_sel_prio)
1731                         goto select_ni;
1732
1733                 if (ni_dev_prio > best_dev_prio)
1734                         continue;
1735                 else if (ni_dev_prio < best_dev_prio)
1736                         goto select_ni;
1737
1738                 if (distance > shortest_distance)
1739                         continue;
1740                 else if (distance < shortest_distance)
1741                         goto select_ni;
1742
1743                 if (ni_credits < best_credits)
1744                         continue;
1745                 else if (ni_credits > best_credits)
1746                         goto select_ni;
1747
1748                 if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1749                         continue;
1750
1751 select_ni:
1752                 best_sel_prio = ni_sel_prio;
1753                 best_dev_prio = ni_dev_prio;
1754                 shortest_distance = distance;
1755                 best_healthv = ni_healthv;
1756                 best_ni = ni;
1757                 best_credits = ni_credits;
1758                 best_ni_fatal = ni_fatal;
1759         }
1760
1761         CDEBUG(D_NET, "selected best_ni %s\n",
1762                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid) : "no selection");
1763
1764         return best_ni;
1765 }
1766
1767 static bool
1768 lnet_reserved_msg(struct lnet_msg *msg)
1769 {
1770         if (msg->msg_type == LNET_MSG_PUT) {
1771                 if (msg->msg_hdr.msg.put.ptl_index == LNET_RESERVED_PORTAL)
1772                         return true;
1773         } else if (msg->msg_type == LNET_MSG_GET) {
1774                 if (msg->msg_hdr.msg.get.ptl_index == LNET_RESERVED_PORTAL)
1775                         return true;
1776         }
1777         return false;
1778 }
1779
1780 /* Can the specified message trigger peer discovery?
1781  *
1782  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1783  * because such traffic is required to perform discovery. We therefore
1784  * exclude all GET and PUT on that portal. We also exclude all ACK and
1785  * REPLY traffic, but that is because the portal is not tracked in the
1786  * message structure for these message types. We could restrict this
1787  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1788  */
1789 static bool
1790 lnet_msg_discovery(struct lnet_msg *msg)
1791 {
1792         return !(lnet_reserved_msg(msg) || lnet_msg_is_response(msg));
1793 }
1794
1795 /* Is the specified message an LNet ping?
1796  */
1797 static bool
1798 lnet_msg_is_ping(struct lnet_msg *msg)
1799 {
1800         if (msg->msg_type == LNET_MSG_GET &&
1801             msg->msg_hdr.msg.get.ptl_index == LNET_RESERVED_PORTAL)
1802                 return true;
1803
1804         return false;
1805 }
1806
1807 #define SRC_SPEC        0x0001
1808 #define SRC_ANY         0x0002
1809 #define LOCAL_DST       0x0004
1810 #define REMOTE_DST      0x0008
1811 #define MR_DST          0x0010
1812 #define NMR_DST         0x0020
1813 #define SND_RESP        0x0040
1814
1815 /* The following to defines are used for return codes */
1816 #define REPEAT_SEND     0x1000
1817 #define PASS_THROUGH    0x2000
1818
1819 /* The different cases lnet_select pathway needs to handle */
1820 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1821 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1822 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1823 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1824 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1825 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1826 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1827 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1828
1829 static int
1830 lnet_handle_lo_send(struct lnet_send_data *sd)
1831 {
1832         struct lnet_msg *msg = sd->sd_msg;
1833         int cpt = sd->sd_cpt;
1834
1835         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1836                 return -ESHUTDOWN;
1837
1838         /* No send credit hassles with LOLND */
1839         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1840         msg->msg_hdr.dest_nid = the_lnet.ln_loni->ni_nid;
1841         if (!msg->msg_routing)
1842                 msg->msg_hdr.src_nid = the_lnet.ln_loni->ni_nid;
1843         msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
1844         lnet_msg_commit(msg, cpt);
1845         msg->msg_txni = the_lnet.ln_loni;
1846
1847         return LNET_CREDIT_OK;
1848 }
1849
1850 static int
1851 lnet_handle_send(struct lnet_send_data *sd)
1852 {
1853         struct lnet_ni *best_ni = sd->sd_best_ni;
1854         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1855         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1856         struct lnet_msg *msg = sd->sd_msg;
1857         int cpt2;
1858         __u32 send_case = sd->sd_send_case;
1859         int rc;
1860         __u32 routing = send_case & REMOTE_DST;
1861         struct lnet_rsp_tracker *rspt;
1862
1863         /* Increment sequence number of the selected peer, peer net,
1864          * local ni and local net so that we pick the next ones
1865          * in Round Robin.
1866          */
1867         best_lpni->lpni_peer_net->lpn_peer->lp_send_seq++;
1868         best_lpni->lpni_peer_net->lpn_seq =
1869                 best_lpni->lpni_peer_net->lpn_peer->lp_send_seq;
1870         best_lpni->lpni_seq = best_lpni->lpni_peer_net->lpn_seq;
1871         the_lnet.ln_net_seq++;
1872         best_ni->ni_net->net_seq = the_lnet.ln_net_seq;
1873         best_ni->ni_seq = best_ni->ni_net->net_seq;
1874
1875         CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n",
1876                libcfs_nidstr(&best_ni->ni_nid),
1877                best_ni->ni_seq, best_ni->ni_net->net_seq,
1878                atomic_read(&best_ni->ni_tx_credits),
1879                best_ni->ni_sel_priority,
1880                libcfs_nidstr(&best_lpni->lpni_nid),
1881                best_lpni->lpni_seq, best_lpni->lpni_peer_net->lpn_seq,
1882                best_lpni->lpni_txcredits,
1883                best_lpni->lpni_sel_priority);
1884
1885         /*
1886          * grab a reference on the peer_ni so it sticks around even if
1887          * we need to drop and relock the lnet_net_lock below.
1888          */
1889         lnet_peer_ni_addref_locked(best_lpni);
1890
1891         /*
1892          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1893          * message. This ensures that we get a CPT that is correct for
1894          * the NI when the NI has been restricted to a subset of all CPTs.
1895          * If the selected CPT differs from the one currently locked, we
1896          * must unlock and relock the lnet_net_lock(), and then check whether
1897          * the configuration has changed. We don't have a hold on the best_ni
1898          * yet, and it may have vanished.
1899          */
1900         cpt2 = lnet_cpt_of_nid_locked(&best_lpni->lpni_nid, best_ni);
1901         if (sd->sd_cpt != cpt2) {
1902                 __u32 seq = lnet_get_dlc_seq_locked();
1903                 lnet_net_unlock(sd->sd_cpt);
1904                 sd->sd_cpt = cpt2;
1905                 lnet_net_lock(sd->sd_cpt);
1906                 if (seq != lnet_get_dlc_seq_locked()) {
1907                         lnet_peer_ni_decref_locked(best_lpni);
1908                         return REPEAT_SEND;
1909                 }
1910         }
1911
1912         /*
1913          * store the best_lpni in the message right away to avoid having
1914          * to do the same operation under different conditions
1915          */
1916         msg->msg_txpeer = best_lpni;
1917         msg->msg_txni = best_ni;
1918
1919         /*
1920          * grab a reference for the best_ni since now it's in use in this
1921          * send. The reference will be dropped in lnet_finalize()
1922          */
1923         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1924
1925         /*
1926          * Always set the target.nid to the best peer picked. Either the
1927          * NID will be one of the peer NIDs selected, or the same NID as
1928          * what was originally set in the target or it will be the NID of
1929          * a router if this message should be routed
1930          */
1931         msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1932
1933         /*
1934          * lnet_msg_commit assigns the correct cpt to the message, which
1935          * is used to decrement the correct refcount on the ni when it's
1936          * time to return the credits
1937          */
1938         lnet_msg_commit(msg, sd->sd_cpt);
1939
1940         /*
1941          * If we are routing the message then we keep the src_nid that was
1942          * set by the originator. If we are not routing then we are the
1943          * originator and set it here.
1944          */
1945         if (!msg->msg_routing)
1946                 msg->msg_hdr.src_nid = msg->msg_txni->ni_nid;
1947
1948         if (routing) {
1949                 msg->msg_target_is_router = 1;
1950                 msg->msg_target.pid = LNET_PID_LUSTRE;
1951                 /*
1952                  * since we're routing we want to ensure that the
1953                  * msg_hdr.dest_nid is set to the final destination. When
1954                  * the router receives this message it knows how to route
1955                  * it.
1956                  *
1957                  * final_dst_lpni is set at the beginning of the
1958                  * lnet_select_pathway() function and is never changed.
1959                  * It's safe to use it here.
1960                  */
1961                 final_dst_lpni->lpni_peer_net->lpn_peer->lp_send_seq++;
1962                 final_dst_lpni->lpni_peer_net->lpn_seq =
1963                         final_dst_lpni->lpni_peer_net->lpn_peer->lp_send_seq;
1964                 final_dst_lpni->lpni_seq =
1965                         final_dst_lpni->lpni_peer_net->lpn_seq;
1966                 msg->msg_hdr.dest_nid = final_dst_lpni->lpni_nid;
1967         } else {
1968                 /*
1969                  * if we're not routing set the dest_nid to the best peer
1970                  * ni NID that we picked earlier in the algorithm.
1971                  */
1972                 msg->msg_hdr.dest_nid = msg->msg_txpeer->lpni_nid;
1973         }
1974
1975         /*
1976          * if we have response tracker block update it with the next hop
1977          * nid
1978          */
1979         if (msg->msg_md) {
1980                 rspt = msg->msg_md->md_rspt_ptr;
1981                 if (rspt) {
1982                         rspt->rspt_next_hop_nid =
1983                                 msg->msg_txpeer->lpni_nid;
1984                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1985                                libcfs_nidstr(&rspt->rspt_next_hop_nid));
1986                 }
1987         }
1988
1989         rc = lnet_post_send_locked(msg, 0);
1990
1991         if (!rc)
1992                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
1993                        libcfs_nidstr(&msg->msg_hdr.src_nid),
1994                        libcfs_nidstr(&msg->msg_txni->ni_nid),
1995                        libcfs_nidstr(&sd->sd_src_nid),
1996                        libcfs_nidstr(&msg->msg_hdr.dest_nid),
1997                        libcfs_nidstr(&sd->sd_dst_nid),
1998                        libcfs_nidstr(&msg->msg_txpeer->lpni_nid),
1999                        libcfs_nidstr(&sd->sd_rtr_nid),
2000                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
2001
2002         return rc;
2003 }
2004
2005 static inline void
2006 lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni,
2007                          struct lnet_msg *msg)
2008 {
2009         if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) &&
2010             !lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) {
2011                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
2012                        libcfs_nidstr(&lni->ni_nid),
2013                        libcfs_nidstr(&lpni->lpni_nid));
2014                 lnet_peer_ni_set_non_mr_pref_nid(lpni, &lni->ni_nid);
2015         }
2016 }
2017
2018 /*
2019  * Source Specified
2020  * Local Destination
2021  * non-mr peer
2022  *
2023  * use the source and destination NIDs as the pathway
2024  */
2025 static int
2026 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
2027 {
2028         /* the destination lpni is set before we get here. */
2029
2030         /* find local NI */
2031         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2032         if (!sd->sd_best_ni) {
2033                 CERROR("Can't send to %s: src %s is not a local nid\n",
2034                        libcfs_nidstr(&sd->sd_dst_nid),
2035                        libcfs_nidstr(&sd->sd_src_nid));
2036                 return -EINVAL;
2037         }
2038
2039         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2040
2041         return lnet_handle_send(sd);
2042 }
2043
2044 /*
2045  * Source Specified
2046  * Local Destination
2047  * MR Peer
2048  *
2049  * Don't run the selection algorithm on the peer NIs. By specifying the
2050  * local NID, we're also saying that we should always use the destination NID
2051  * provided. This handles the case where we should be using the same
2052  * destination NID for the all the messages which belong to the same RPC
2053  * request.
2054  */
2055 static int
2056 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
2057 {
2058         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2059         if (!sd->sd_best_ni) {
2060                 CERROR("Can't send to %s: src %s is not a local nid\n",
2061                        libcfs_nidstr(&sd->sd_dst_nid),
2062                        libcfs_nidstr(&sd->sd_src_nid));
2063                 return -EINVAL;
2064         }
2065
2066         if (sd->sd_best_lpni &&
2067             nid_same(&sd->sd_best_lpni->lpni_nid,
2068                       &the_lnet.ln_loni->ni_nid))
2069                 return lnet_handle_lo_send(sd);
2070         else if (sd->sd_best_lpni)
2071                 return lnet_handle_send(sd);
2072
2073         CERROR("can't send to %s. no NI on %s\n",
2074                libcfs_nidstr(&sd->sd_dst_nid),
2075                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
2076
2077         return -EHOSTUNREACH;
2078 }
2079
2080 static struct lnet_ni *
2081 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
2082                               struct lnet_peer *peer,
2083                               struct lnet_peer_net *peer_net,
2084                               struct lnet_msg *msg,
2085                               int cpt)
2086 {
2087         struct lnet_net *local_net;
2088         struct lnet_ni *best_ni;
2089
2090         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
2091         if (!local_net)
2092                 return NULL;
2093
2094         /*
2095          * Iterate through the NIs in this local Net and select
2096          * the NI to send from. The selection is determined by
2097          * these 3 criterion in the following priority:
2098          *      1. NUMA
2099          *      2. NI available credits
2100          *      3. Round Robin
2101          */
2102         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
2103                                    peer, peer_net, msg, cpt);
2104
2105         return best_ni;
2106 }
2107
2108 static int
2109 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg,
2110                              int cpt)
2111 {
2112         struct lnet_peer *peer;
2113         struct lnet_peer_ni *new_lpni;
2114         int rc;
2115
2116         lnet_peer_ni_addref_locked(lpni);
2117
2118         peer = lpni->lpni_peer_net->lpn_peer;
2119
2120         if (lnet_peer_gw_discovery(peer)) {
2121                 lnet_peer_ni_decref_locked(lpni);
2122                 return 0;
2123         }
2124
2125         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
2126                 lnet_peer_ni_decref_locked(lpni);
2127                 return 0;
2128         }
2129
2130         rc = lnet_discover_peer_locked(lpni, cpt, false);
2131         if (rc) {
2132                 lnet_peer_ni_decref_locked(lpni);
2133                 return rc;
2134         }
2135
2136         new_lpni = lnet_peer_ni_find_locked(&lpni->lpni_nid);
2137         if (!new_lpni) {
2138                 lnet_peer_ni_decref_locked(lpni);
2139                 return -ENOENT;
2140         }
2141
2142         peer = new_lpni->lpni_peer_net->lpn_peer;
2143         spin_lock(&peer->lp_lock);
2144         if (lpni == new_lpni && lnet_peer_is_uptodate_locked(peer)) {
2145                 /* The peer NI did not change and the peer is up to date.
2146                  * Nothing more to do.
2147                  */
2148                 spin_unlock(&peer->lp_lock);
2149                 lnet_peer_ni_decref_locked(lpni);
2150                 lnet_peer_ni_decref_locked(new_lpni);
2151                 return 0;
2152         }
2153         spin_unlock(&peer->lp_lock);
2154
2155         /* Either the peer NI changed during discovery, or the peer isn't up
2156          * to date. In both cases we want to queue the message on the
2157          * (possibly new) peer's pending queue and queue the peer for discovery
2158          */
2159         msg->msg_sending = 0;
2160         msg->msg_txpeer = NULL;
2161         lnet_net_unlock(cpt);
2162         lnet_peer_queue_message(peer, msg);
2163         lnet_net_lock(cpt);
2164
2165         lnet_peer_ni_decref_locked(lpni);
2166         lnet_peer_ni_decref_locked(new_lpni);
2167
2168         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2169                msg, libcfs_nidstr(&peer->lp_primary_nid));
2170
2171         return LNET_DC_WAIT;
2172 }
2173
2174 static int
2175 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2176                              struct lnet_nid *dst_nid,
2177                              struct lnet_peer_ni **gw_lpni,
2178                              struct lnet_peer **gw_peer)
2179 {
2180         int rc = 0;
2181         struct lnet_peer *gw;
2182         struct lnet_peer *lp;
2183         struct lnet_peer_net *lpn;
2184         struct lnet_peer_net *best_lpn = NULL;
2185         struct lnet_remotenet *rnet, *best_rnet = NULL;
2186         struct lnet_route *best_route = NULL;
2187         struct lnet_route *last_route = NULL;
2188         struct lnet_peer_ni *lpni = NULL;
2189         struct lnet_peer_ni *gwni = NULL;
2190         bool route_found = false;
2191         bool gwni_decref = false;
2192         struct lnet_nid *src_nid =
2193                 !LNET_NID_IS_ANY(&sd->sd_src_nid) || !sd->sd_best_ni
2194                 ? &sd->sd_src_nid
2195                 : &sd->sd_best_ni->ni_nid;
2196         int best_lpn_healthv = 0;
2197         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2198
2199         CDEBUG(D_NET, "%s route (%s) from local NI %s to destination %s\n",
2200                LNET_NID_IS_ANY(&sd->sd_rtr_nid) ? "Lookup" : "Specified",
2201                libcfs_nidstr(&sd->sd_rtr_nid), libcfs_nidstr(src_nid),
2202                libcfs_nidstr(&sd->sd_dst_nid));
2203
2204         /* If a router nid was specified then we are replying to a GET or
2205          * sending an ACK. In this case we use the gateway associated with the
2206          * specified router nid.
2207          */
2208         if (!LNET_NID_IS_ANY(&sd->sd_rtr_nid)) {
2209                 gwni = lnet_peer_ni_find_locked(&sd->sd_rtr_nid);
2210                 if (gwni) {
2211                         gwni_decref = true;
2212                         gw = gwni->lpni_peer_net->lpn_peer;
2213                         if (gw->lp_rtr_refcount)
2214                                 route_found = true;
2215                 } else {
2216                         CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n",
2217                               libcfs_nidstr(&sd->sd_rtr_nid));
2218                 }
2219         }
2220
2221         if (!route_found) {
2222                 if (sd->sd_msg->msg_routing || !LNET_NID_IS_ANY(src_nid)) {
2223                         /* If I'm routing this message then I need to find the
2224                          * next hop based on the destination NID
2225                          *
2226                          * We also find next hop based on the destination NID
2227                          * if the source NI was specified
2228                          */
2229                         best_rnet = lnet_find_rnet_locked(LNET_NID_NET(&sd->sd_dst_nid));
2230                         if (!best_rnet) {
2231                                 CERROR("Unable to send message from %s to %s - Route table may be misconfigured\n",
2232                                        (src_nid && LNET_NID_IS_ANY(src_nid)) ?
2233                                                 "any local NI" :
2234                                                 libcfs_nidstr(src_nid),
2235                                        libcfs_nidstr(&sd->sd_dst_nid));
2236                                 rc = -EHOSTUNREACH;
2237                                 goto out;
2238                         }
2239                         CDEBUG(D_NET, "best_rnet %s\n",
2240                                libcfs_net2str(best_rnet->lrn_net));
2241                 } else {
2242                         /* we've already looked up the initial lpni using
2243                          * dst_nid
2244                          */
2245                         lpni = sd->sd_best_lpni;
2246                         /* the peer tree must be in existence */
2247                         LASSERT(lpni && lpni->lpni_peer_net &&
2248                                 lpni->lpni_peer_net->lpn_peer);
2249                         lp = lpni->lpni_peer_net->lpn_peer;
2250
2251                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2252                                 /* is this remote network reachable?  */
2253                                 rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2254                                 if (!rnet)
2255                                         continue;
2256
2257                                 if (!best_lpn)
2258                                         goto use_lpn;
2259                                 else
2260                                         CDEBUG(D_NET, "n[%s, %s] h[%d, %d], p[%u, %u], s[%d, %d]\n",
2261                                                libcfs_net2str(lpn->lpn_net_id),
2262                                                libcfs_net2str(best_lpn->lpn_net_id),
2263                                                lpn->lpn_healthv,
2264                                                best_lpn->lpn_healthv,
2265                                                lpn->lpn_sel_priority,
2266                                                best_lpn->lpn_sel_priority,
2267                                                lpn->lpn_seq,
2268                                                best_lpn->lpn_seq);
2269
2270                                 /* select the preferred peer net */
2271                                 if (best_lpn_healthv > lpn->lpn_healthv)
2272                                         continue;
2273                                 else if (best_lpn_healthv < lpn->lpn_healthv)
2274                                         goto use_lpn;
2275
2276                                 if (best_lpn_sel_prio < lpn->lpn_sel_priority)
2277                                         continue;
2278                                 else if (best_lpn_sel_prio > lpn->lpn_sel_priority)
2279                                         goto use_lpn;
2280
2281                                 if (best_lpn->lpn_seq <= lpn->lpn_seq)
2282                                         continue;
2283 use_lpn:
2284                                 best_lpn_healthv = lpn->lpn_healthv;
2285                                 best_lpn_sel_prio = lpn->lpn_sel_priority;
2286                                 best_lpn = lpn;
2287                                 best_rnet = rnet;
2288                         }
2289
2290                         if (!best_lpn) {
2291                                 CERROR("peer %s has no available nets\n",
2292                                        libcfs_nidstr(&sd->sd_dst_nid));
2293                                 rc =  -EHOSTUNREACH;
2294                                 goto out;
2295                         }
2296
2297                         CDEBUG(D_NET, "selected best_lpn %s\n",
2298                                libcfs_net2str(best_lpn->lpn_net_id));
2299
2300                         sd->sd_best_lpni = lnet_find_best_lpni(sd->sd_best_ni,
2301                                                                &sd->sd_dst_nid,
2302                                                                lp,
2303                                                                best_lpn->lpn_net_id);
2304                         if (!sd->sd_best_lpni) {
2305                                 CERROR("peer %s is unreachable\n",
2306                                        libcfs_nidstr(&sd->sd_dst_nid));
2307                                 rc = -EHOSTUNREACH;
2308                                 goto out;
2309                         }
2310
2311                         /* We're attempting to round robin over the remote peer
2312                          * NI's so update the final destination we selected
2313                          */
2314                         sd->sd_final_dst_lpni = sd->sd_best_lpni;
2315                 }
2316
2317                 /*
2318                  * find the best route. Restrict the selection on the net of the
2319                  * local NI if we've already picked the local NI to send from.
2320                  * Otherwise, let's pick any route we can find and then find
2321                  * a local NI we can reach the route's gateway on. Any route we
2322                  * select will be reachable by virtue of the restriction we have
2323                  * when adding a route.
2324                  */
2325                 best_route = lnet_find_route_locked(best_rnet,
2326                                                     LNET_NID_NET(src_nid),
2327                                                     sd->sd_best_lpni,
2328                                                     &last_route, &gwni);
2329
2330                 if (!best_route) {
2331                         CERROR("no route to %s from %s\n",
2332                                libcfs_nidstr(dst_nid),
2333                                libcfs_nidstr(src_nid));
2334                         rc = -EHOSTUNREACH;
2335                         goto out;
2336                 }
2337
2338                 if (!gwni) {
2339                         CERROR("Internal Error. Route expected to %s from %s\n",
2340                                libcfs_nidstr(dst_nid),
2341                                libcfs_nidstr(src_nid));
2342                         rc = -EFAULT;
2343                         goto out;
2344                 }
2345
2346                 gw = best_route->lr_gateway;
2347                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2348         }
2349
2350         /*
2351          * If the router checker is not active then discover the gateway here.
2352          * This ensures we are able to take advantage of multi-rail routing, but
2353          * if the router checker is active then we do not unecessarily delay
2354          * messages while the gateway is being checked by the dedicated monitor
2355          * thread.
2356          *
2357          * NB: We're only checking the alive_router_check_interval here, rather
2358          * than calling lnet_router_checker_active(), because the other
2359          * conditions that are checked by that function are either
2360          * irrelevant (the_lnet.ln_routing) or must be true (list of routers
2361          * is not empty)
2362          */
2363         if (alive_router_check_interval <= 0) {
2364                 rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt);
2365                 if (rc)
2366                         goto out;
2367         }
2368
2369         if (!sd->sd_best_ni) {
2370                 lpn = gwni->lpni_peer_net;
2371                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw, lpn,
2372                                                                sd->sd_msg,
2373                                                                sd->sd_md_cpt);
2374                 if (!sd->sd_best_ni) {
2375                         CERROR("Internal Error. Expected local ni on %s but non found: %s\n",
2376                                libcfs_net2str(lpn->lpn_net_id),
2377                                libcfs_nidstr(&sd->sd_src_nid));
2378                         rc = -EFAULT;
2379                         goto out;
2380                 }
2381         }
2382
2383         *gw_lpni = gwni;
2384         *gw_peer = gw;
2385
2386         /*
2387          * increment the sequence number since now we're sure we're
2388          * going to use this route
2389          */
2390         if (LNET_NID_IS_ANY(&sd->sd_rtr_nid)) {
2391                 LASSERT(best_route && last_route);
2392                 best_route->lr_seq = last_route->lr_seq + 1;
2393         }
2394
2395 out:
2396         if (gwni_decref && gwni)
2397                 lnet_peer_ni_decref_locked(gwni);
2398
2399         return rc;
2400 }
2401
2402 /*
2403  * Handle two cases:
2404  *
2405  * Case 1:
2406  *  Source specified
2407  *  Remote destination
2408  *  Non-MR destination
2409  *
2410  * Case 2:
2411  *  Source specified
2412  *  Remote destination
2413  *  MR destination
2414  *
2415  * The handling of these two cases is similar. Even though the destination
2416  * can be MR or non-MR, we'll deal directly with the router.
2417  */
2418 static int
2419 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2420 {
2421         int rc;
2422         struct lnet_peer_ni *gw_lpni = NULL;
2423         struct lnet_peer *gw_peer = NULL;
2424
2425         /* find local NI */
2426         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2427         if (!sd->sd_best_ni) {
2428                 CERROR("Can't send to %s: src %s is not a local nid\n",
2429                        libcfs_nidstr(&sd->sd_dst_nid),
2430                        libcfs_nidstr(&sd->sd_src_nid));
2431                 return -EINVAL;
2432         }
2433
2434         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid,
2435                                           &gw_lpni, &gw_peer);
2436         if (rc)
2437                 return rc;
2438
2439         if (sd->sd_send_case & NMR_DST)
2440                 /*
2441                  * since the final destination is non-MR let's set its preferred
2442                  * NID before we send
2443                  */
2444                 lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni,
2445                                          sd->sd_msg);
2446
2447         /*
2448          * We're going to send to the gw found so let's set its
2449          * info
2450          */
2451         sd->sd_peer = gw_peer;
2452         sd->sd_best_lpni = gw_lpni;
2453
2454         return lnet_handle_send(sd);
2455 }
2456
2457 static struct lnet_ni *
2458 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2459                                struct lnet_msg *msg, bool discovery)
2460 {
2461         struct lnet_peer_net *lpn = NULL;
2462         struct lnet_peer_net *best_lpn = NULL;
2463         struct lnet_net *net = NULL;
2464         struct lnet_net *best_net = NULL;
2465         struct lnet_ni *best_ni = NULL;
2466         int best_lpn_healthv = 0;
2467         int best_net_healthv = 0;
2468         int net_healthv;
2469         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2470         __u32 lpn_sel_prio;
2471         __u32 best_net_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2472         __u32 net_sel_prio;
2473
2474         /* If lp_disc_net_id is set, this peer is a router undergoing
2475          * discovery, and this message is an LNet ping, then this may be a
2476          * discovery message and we need to select an NI on the peer net
2477          * specified by lp_disc_net_id
2478          */
2479         if (peer->lp_disc_net_id &&
2480             (peer->lp_state & LNET_PEER_RTR_DISCOVERY) &&
2481             lnet_msg_is_ping(msg)) {
2482                 best_lpn = lnet_peer_get_net_locked(peer, peer->lp_disc_net_id);
2483                 if (best_lpn && lnet_get_net_locked(best_lpn->lpn_net_id))
2484                         goto select_best_ni;
2485         }
2486
2487         /*
2488          * The peer can have multiple interfaces, some of them can be on
2489          * the local network and others on a routed network. We should
2490          * prefer the local network. However if the local network is not
2491          * available then we need to try the routed network
2492          */
2493
2494         /* go through all the peer nets and find the best_ni */
2495         list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
2496                 /*
2497                  * The peer's list of nets can contain non-local nets. We
2498                  * want to only examine the local ones.
2499                  */
2500                 net = lnet_get_net_locked(lpn->lpn_net_id);
2501                 if (!net)
2502                         continue;
2503
2504                 lpn_sel_prio = lpn->lpn_sel_priority;
2505                 net_healthv = lnet_get_net_healthv_locked(net);
2506                 net_sel_prio = net->net_sel_priority;
2507
2508                 if (!best_lpn || !best_net)
2509                         goto select_lpn;
2510                 else
2511                         CDEBUG(D_NET,
2512                                "n[%s, %s] ph[%d, %d], pp[%u, %u], nh[%d, %d], np[%u, %u], ps[%u, %u], ns[%u, %u]\n",
2513                                libcfs_net2str(lpn->lpn_net_id),
2514                                libcfs_net2str(best_lpn->lpn_net_id),
2515                                lpn->lpn_healthv,
2516                                best_lpn_healthv,
2517                                lpn_sel_prio,
2518                                best_lpn_sel_prio,
2519                                net_healthv,
2520                                best_net_healthv,
2521                                net_sel_prio,
2522                                best_net_sel_prio,
2523                                lpn->lpn_seq,
2524                                best_lpn->lpn_seq,
2525                                net->net_seq,
2526                                best_net->net_seq);
2527
2528                 /* always select the lpn with the best health */
2529                 if (best_lpn_healthv > lpn->lpn_healthv)
2530                         continue;
2531                 else if (best_lpn_healthv < lpn->lpn_healthv)
2532                         goto select_lpn;
2533
2534                 /* select the preferred peer and local nets */
2535                 if (best_lpn_sel_prio < lpn_sel_prio)
2536                         continue;
2537                 else if (best_lpn_sel_prio > lpn_sel_prio)
2538                         goto select_lpn;
2539
2540                 if (best_net_healthv > net_healthv)
2541                         continue;
2542                 else if (best_net_healthv < net_healthv)
2543                         goto select_lpn;
2544
2545                 if (best_net_sel_prio < net_sel_prio)
2546                         continue;
2547                 else if (best_net_sel_prio > net_sel_prio)
2548                         goto select_lpn;
2549
2550                 if (best_lpn->lpn_seq < lpn->lpn_seq)
2551                         continue;
2552                 else if (best_lpn->lpn_seq > lpn->lpn_seq)
2553                         goto select_lpn;
2554
2555                 /* round robin over the local networks */
2556                 if (best_net->net_seq <= net->net_seq)
2557                         continue;
2558
2559 select_lpn:
2560                 best_net_healthv = net_healthv;
2561                 best_net_sel_prio = net_sel_prio;
2562                 best_lpn_healthv = lpn->lpn_healthv;
2563                 best_lpn_sel_prio = lpn_sel_prio;
2564                 best_lpn = lpn;
2565                 best_net = net;
2566         }
2567
2568         if (best_lpn) {
2569                 /* Select the best NI on the same net as best_lpn chosen
2570                  * above
2571                  */
2572 select_best_ni:
2573                 CDEBUG(D_NET, "selected best_lpn %s\n",
2574                        libcfs_net2str(best_lpn->lpn_net_id));
2575                 best_ni = lnet_find_best_ni_on_spec_net(NULL, peer, best_lpn,
2576                                                         msg, md_cpt);
2577         }
2578
2579         return best_ni;
2580 }
2581
2582 static struct lnet_ni *
2583 lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt)
2584 {
2585         struct lnet_ni *best_ni = NULL;
2586         struct lnet_peer_net *peer_net = lpni->lpni_peer_net;
2587         struct lnet_peer_ni *lpni_entry;
2588
2589         /*
2590          * We must use a consistent source address when sending to a
2591          * non-MR peer. However, a non-MR peer can have multiple NIDs
2592          * on multiple networks, and we may even need to talk to this
2593          * peer on multiple networks -- certain types of
2594          * load-balancing configuration do this.
2595          *
2596          * So we need to pick the NI the peer prefers for this
2597          * particular network.
2598          */
2599         LASSERT(peer_net);
2600         list_for_each_entry(lpni_entry, &peer_net->lpn_peer_nis,
2601                             lpni_peer_nis) {
2602                 if (lpni_entry->lpni_pref_nnids == 0)
2603                         continue;
2604                 LASSERT(lpni_entry->lpni_pref_nnids == 1);
2605                 best_ni = lnet_nid_to_ni_locked(&lpni_entry->lpni_pref.nid,
2606                                                 cpt);
2607                 break;
2608         }
2609
2610         return best_ni;
2611 }
2612
2613 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2614 static int
2615 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2616 {
2617         struct lnet_ni *best_ni = NULL;
2618
2619         /*
2620          * We must use a consistent source address when sending to a
2621          * non-MR peer. However, a non-MR peer can have multiple NIDs
2622          * on multiple networks, and we may even need to talk to this
2623          * peer on multiple networks -- certain types of
2624          * load-balancing configuration do this.
2625          *
2626          * So we need to pick the NI the peer prefers for this
2627          * particular network.
2628          *
2629          * An exception is traffic on LNET_RESERVED_PORTAL. Internal LNet
2630          * traffic doesn't care which source NI is used, and we don't actually
2631          * want to restrict local recovery pings to a single source NI.
2632          */
2633         if (!lnet_reserved_msg(sd->sd_msg))
2634                 best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2635                                                                sd->sd_cpt);
2636
2637         if (!best_ni)
2638                 best_ni = lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2639                                                 sd->sd_best_lpni->lpni_peer_net,
2640                                                 sd->sd_msg,
2641                                                 sd->sd_md_cpt);
2642
2643         /* If there is no best_ni we don't have a route */
2644         if (!best_ni) {
2645                 CERROR("no path to %s from net %s\n",
2646                         libcfs_nidstr(&sd->sd_best_lpni->lpni_nid),
2647                         libcfs_net2str(sd->sd_best_lpni->lpni_net->net_id));
2648                 return -EHOSTUNREACH;
2649         }
2650
2651         sd->sd_best_ni = best_ni;
2652
2653         /* Set preferred NI if necessary. */
2654         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2655
2656         return 0;
2657 }
2658
2659
2660 /*
2661  * Source not specified
2662  * Local destination
2663  * Non-MR Peer
2664  *
2665  * always use the same source NID for NMR peers
2666  * If we've talked to that peer before then we already have a preferred
2667  * source NI associated with it. Otherwise, we select a preferred local NI
2668  * and store it in the peer
2669  */
2670 static int
2671 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2672 {
2673         int rc = 0;
2674
2675         /* sd->sd_best_lpni is already set to the final destination */
2676
2677         /*
2678          * At this point we should've created the peer ni and peer. If we
2679          * can't find it, then something went wrong. Instead of assert
2680          * output a relevant message and fail the send
2681          */
2682         if (!sd->sd_best_lpni) {
2683                 CERROR("Internal fault. Unable to send msg %s to %s. NID not known\n",
2684                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2685                        libcfs_nidstr(&sd->sd_dst_nid));
2686                 return -EFAULT;
2687         }
2688
2689         if (sd->sd_msg->msg_routing) {
2690                 /* If I'm forwarding this message then I can choose any NI
2691                  * on the destination peer net
2692                  */
2693                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL,
2694                                                                sd->sd_peer,
2695                                                                sd->sd_best_lpni->lpni_peer_net,
2696                                                                sd->sd_msg,
2697                                                                sd->sd_md_cpt);
2698                 if (!sd->sd_best_ni) {
2699                         CERROR("Unable to forward message to %s. No local NI available\n",
2700                                libcfs_nidstr(&sd->sd_dst_nid));
2701                         rc = -EHOSTUNREACH;
2702                 }
2703         } else
2704                 rc = lnet_select_preferred_best_ni(sd);
2705
2706         if (!rc)
2707                 rc = lnet_handle_send(sd);
2708
2709         return rc;
2710 }
2711
2712 static int
2713 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2714 {
2715         /*
2716          * NOTE we've already handled the remote peer case. So we only
2717          * need to worry about the local case here.
2718          *
2719          * if we're sending a response, ACK or reply, we need to send it
2720          * to the destination NID given to us. At this point we already
2721          * have the peer_ni we're suppose to send to, so just find the
2722          * best_ni on the peer net and use that. Since we're sending to an
2723          * MR peer then we can just run the selection algorithm on our
2724          * local NIs and pick the best one.
2725          */
2726         if (sd->sd_send_case & SND_RESP) {
2727                 sd->sd_best_ni =
2728                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2729                                                 sd->sd_best_lpni->lpni_peer_net,
2730                                                 sd->sd_msg,
2731                                                 sd->sd_md_cpt);
2732
2733                 if (!sd->sd_best_ni) {
2734                         /*
2735                          * We're not going to deal with not able to send
2736                          * a response to the provided final destination
2737                          */
2738                         CERROR("Can't send response to %s. No local NI available\n",
2739                                 libcfs_nidstr(&sd->sd_dst_nid));
2740                         return -EHOSTUNREACH;
2741                 }
2742
2743                 return lnet_handle_send(sd);
2744         }
2745
2746         /*
2747          * If we get here that means we're sending a fresh request, PUT or
2748          * GET, so we need to run our standard selection algorithm.
2749          * First find the best local interface that's on any of the peer's
2750          * networks.
2751          */
2752         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2753                                         sd->sd_md_cpt,
2754                                         sd->sd_msg,
2755                                         lnet_msg_discovery(sd->sd_msg));
2756         if (sd->sd_best_ni) {
2757                 sd->sd_best_lpni =
2758                   lnet_find_best_lpni(sd->sd_best_ni, &sd->sd_dst_nid,
2759                                       sd->sd_peer,
2760                                       sd->sd_best_ni->ni_net->net_id);
2761
2762                 /*
2763                  * if we're successful in selecting a peer_ni on the local
2764                  * network, then send to it. Otherwise fall through and
2765                  * try and see if we can reach it over another routed
2766                  * network
2767                  */
2768                 if (sd->sd_best_lpni &&
2769                     nid_same(&sd->sd_best_lpni->lpni_nid,
2770                              &the_lnet.ln_loni->ni_nid)) {
2771                         /*
2772                          * in case we initially started with a routed
2773                          * destination, let's reset to local
2774                          */
2775                         sd->sd_send_case &= ~REMOTE_DST;
2776                         sd->sd_send_case |= LOCAL_DST;
2777                         return lnet_handle_lo_send(sd);
2778                 } else if (sd->sd_best_lpni) {
2779                         /*
2780                          * in case we initially started with a routed
2781                          * destination, let's reset to local
2782                          */
2783                         sd->sd_send_case &= ~REMOTE_DST;
2784                         sd->sd_send_case |= LOCAL_DST;
2785                         return lnet_handle_send(sd);
2786                 }
2787
2788                 CERROR("Internal Error. Expected to have a best_lpni: "
2789                        "%s -> %s\n",
2790                        libcfs_nidstr(&sd->sd_src_nid),
2791                        libcfs_nidstr(&sd->sd_dst_nid));
2792
2793                 return -EFAULT;
2794         }
2795
2796         /*
2797          * Peer doesn't have a local network. Let's see if there is
2798          * a remote network we can reach it on.
2799          */
2800         return PASS_THROUGH;
2801 }
2802
2803 /*
2804  * Case 1:
2805  *      Source NID not specified
2806  *      Local destination
2807  *      MR peer
2808  *
2809  * Case 2:
2810  *      Source NID not speified
2811  *      Remote destination
2812  *      MR peer
2813  *
2814  * In both of these cases if we're sending a response, ACK or REPLY, then
2815  * we need to send to the destination NID provided.
2816  *
2817  * In the remote case let's deal with MR routers.
2818  *
2819  */
2820
2821 static int
2822 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2823 {
2824         int rc = 0;
2825         struct lnet_peer *gw_peer = NULL;
2826         struct lnet_peer_ni *gw_lpni = NULL;
2827
2828         /*
2829          * handle sending a response to a remote peer here so we don't
2830          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2831          */
2832         if (sd->sd_send_case & REMOTE_DST &&
2833             sd->sd_send_case & SND_RESP) {
2834                 struct lnet_peer_ni *gw;
2835                 struct lnet_peer *gw_peer;
2836
2837                 rc = lnet_handle_find_routed_path(
2838                         sd, &sd->sd_dst_nid, &gw, &gw_peer);
2839                 if (rc < 0) {
2840                         CERROR("Can't send response to %s. No route available\n",
2841                                libcfs_nidstr(&sd->sd_dst_nid));
2842                         return -EHOSTUNREACH;
2843                 } else if (rc > 0) {
2844                         return rc;
2845                 }
2846
2847                 sd->sd_best_lpni = gw;
2848                 sd->sd_peer = gw_peer;
2849
2850                 return lnet_handle_send(sd);
2851         }
2852
2853         /*
2854          * Even though the NID for the peer might not be on a local network,
2855          * since the peer is MR there could be other interfaces on the
2856          * local network. In that case we'd still like to prefer the local
2857          * network over the routed network. If we're unable to do that
2858          * then we select the best router among the different routed networks,
2859          * and if the router is MR then we can deal with it as such.
2860          */
2861         rc = lnet_handle_any_mr_dsta(sd);
2862         if (rc != PASS_THROUGH)
2863                 return rc;
2864
2865         /*
2866          * Now that we must route to the destination, we must consider the
2867          * MR case, where the destination has multiple interfaces, some of
2868          * which we can route to and others we do not. For this reason we
2869          * need to select the destination which we can route to and if
2870          * there are multiple, we need to round robin.
2871          */
2872         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid,
2873                                           &gw_lpni, &gw_peer);
2874         if (rc)
2875                 return rc;
2876
2877         sd->sd_send_case &= ~LOCAL_DST;
2878         sd->sd_send_case |= REMOTE_DST;
2879
2880         sd->sd_peer = gw_peer;
2881         sd->sd_best_lpni = gw_lpni;
2882
2883         return lnet_handle_send(sd);
2884 }
2885
2886 /*
2887  * Source not specified
2888  * Remote destination
2889  * Non-MR peer
2890  *
2891  * Must send to the specified peer NID using the same source NID that
2892  * we've used before. If it's the first time to talk to that peer then
2893  * find the source NI and assign it as preferred to that peer
2894  */
2895 static int
2896 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2897 {
2898         int rc;
2899         struct lnet_peer_ni *gw_lpni = NULL;
2900         struct lnet_peer *gw_peer = NULL;
2901
2902         /*
2903          * Let's see if we have a preferred NI to talk to this NMR peer
2904          */
2905         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2906                                                               sd->sd_cpt);
2907
2908         /*
2909          * find the router and that'll find the best NI if we didn't find
2910          * it already.
2911          */
2912         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid, &gw_lpni,
2913                                           &gw_peer);
2914         if (rc)
2915                 return rc;
2916
2917         /*
2918          * set the best_ni we've chosen as the preferred one for
2919          * this peer
2920          */
2921         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2922
2923         /* we'll be sending to the gw */
2924         sd->sd_best_lpni = gw_lpni;
2925         sd->sd_peer = gw_peer;
2926
2927         return lnet_handle_send(sd);
2928 }
2929
2930 static int
2931 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2932 {
2933         /*
2934          * turn off the SND_RESP bit.
2935          * It will be checked in the case handling
2936          */
2937         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2938
2939         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2940                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2941                 (send_case & SRC_SPEC) ? libcfs_nidstr(&sd->sd_src_nid) : "",
2942                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2943                 libcfs_nidstr(&sd->sd_dst_nid),
2944                 (send_case & LOCAL_DST) ? "local" : "routed");
2945
2946         switch (send_case) {
2947         /*
2948          * For all cases where the source is specified, we should always
2949          * use the destination NID, whether it's an MR destination or not,
2950          * since we're continuing a series of related messages for the
2951          * same RPC
2952          */
2953         case SRC_SPEC_LOCAL_NMR_DST:
2954                 return lnet_handle_spec_local_nmr_dst(sd);
2955         case SRC_SPEC_LOCAL_MR_DST:
2956                 return lnet_handle_spec_local_mr_dst(sd);
2957         case SRC_SPEC_ROUTER_NMR_DST:
2958         case SRC_SPEC_ROUTER_MR_DST:
2959                 return lnet_handle_spec_router_dst(sd);
2960         case SRC_ANY_LOCAL_NMR_DST:
2961                 return lnet_handle_any_local_nmr_dst(sd);
2962         case SRC_ANY_LOCAL_MR_DST:
2963         case SRC_ANY_ROUTER_MR_DST:
2964                 return lnet_handle_any_mr_dst(sd);
2965         case SRC_ANY_ROUTER_NMR_DST:
2966                 return lnet_handle_any_router_nmr_dst(sd);
2967         default:
2968                 CERROR("Unknown send case\n");
2969                 return -1;
2970         }
2971 }
2972
2973 static int
2974 lnet_select_pathway(struct lnet_nid *src_nid,
2975                     struct lnet_nid *dst_nid,
2976                     struct lnet_msg *msg,
2977                     struct lnet_nid *rtr_nid)
2978 {
2979         struct lnet_peer_ni *lpni;
2980         struct lnet_peer *peer;
2981         struct lnet_send_data send_data;
2982         int cpt, rc;
2983         int md_cpt;
2984         __u32 send_case = 0;
2985         bool final_hop;
2986         bool mr_forwarding_allowed;
2987
2988         memset(&send_data, 0, sizeof(send_data));
2989
2990         /*
2991          * get an initial CPT to use for locking. The idea here is not to
2992          * serialize the calls to select_pathway, so that as many
2993          * operations can run concurrently as possible. To do that we use
2994          * the CPT where this call is being executed. Later on when we
2995          * determine the CPT to use in lnet_message_commit, we switch the
2996          * lock and check if there was any configuration change.  If none,
2997          * then we proceed, if there is, then we restart the operation.
2998          */
2999         cpt = lnet_net_lock_current();
3000
3001         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
3002         if (md_cpt == CFS_CPT_ANY)
3003                 md_cpt = cpt;
3004
3005 again:
3006
3007         /*
3008          * If we're being asked to send to the loopback interface, there
3009          * is no need to go through any selection. We can just shortcut
3010          * the entire process and send over lolnd
3011          */
3012         send_data.sd_msg = msg;
3013         send_data.sd_cpt = cpt;
3014         if (nid_is_lo0(dst_nid)) {
3015                 rc = lnet_handle_lo_send(&send_data);
3016                 lnet_net_unlock(cpt);
3017                 return rc;
3018         }
3019
3020         /*
3021          * find an existing peer_ni, or create one and mark it as having been
3022          * created due to network traffic. This call will create the
3023          * peer->peer_net->peer_ni tree.
3024          */
3025         lpni = lnet_peerni_by_nid_locked(dst_nid, NULL, cpt);
3026         if (IS_ERR(lpni)) {
3027                 lnet_net_unlock(cpt);
3028                 return PTR_ERR(lpni);
3029         }
3030
3031         /*
3032          * Cache the original src_nid and rtr_nid. If we need to resend the
3033          * message then we'll need to know whether the src_nid was originally
3034          * specified for this message. If it was originally specified,
3035          * then we need to keep using the same src_nid since it's
3036          * continuing the same sequence of messages. Similarly, rtr_nid will
3037          * affect our choice of next hop.
3038          */
3039         if (src_nid)
3040                 msg->msg_src_nid_param = *src_nid;
3041         else
3042                 msg->msg_src_nid_param = LNET_ANY_NID;
3043         if (rtr_nid)
3044                 msg->msg_rtr_nid_param = *rtr_nid;
3045         else
3046                 msg->msg_rtr_nid_param = LNET_ANY_NID;
3047
3048         /*
3049          * If necessary, perform discovery on the peer that owns this peer_ni.
3050          * Note, this can result in the ownership of this peer_ni changing
3051          * to another peer object.
3052          */
3053         rc = lnet_initiate_peer_discovery(lpni, msg, cpt);
3054         if (rc) {
3055                 lnet_peer_ni_decref_locked(lpni);
3056                 lnet_net_unlock(cpt);
3057                 return rc;
3058         }
3059
3060         peer = lpni->lpni_peer_net->lpn_peer;
3061
3062         /*
3063          * Identify the different send cases
3064          */
3065         if (!src_nid || LNET_NID_IS_ANY(src_nid)) {
3066                 send_case |= SRC_ANY;
3067                 if (lnet_get_net_locked(LNET_NID_NET(dst_nid)))
3068                         send_case |= LOCAL_DST;
3069                 else
3070                         send_case |= REMOTE_DST;
3071         } else {
3072                 send_case |= SRC_SPEC;
3073                 if (LNET_NID_NET(src_nid) == LNET_NID_NET(dst_nid))
3074                         send_case |= LOCAL_DST;
3075                 else
3076                         send_case |= REMOTE_DST;
3077         }
3078
3079         final_hop = false;
3080         if (msg->msg_routing && (send_case & LOCAL_DST))
3081                 final_hop = true;
3082
3083         /* Determine whether to allow MR forwarding for this message.
3084          * NB: MR forwarding is allowed if the message originator and the
3085          * destination are both MR capable, and the destination lpni that was
3086          * originally chosen by the originator is unhealthy or down.
3087          * We check the MR capability of the destination further below
3088          */
3089         mr_forwarding_allowed = false;
3090         if (final_hop) {
3091                 struct lnet_peer *src_lp;
3092                 struct lnet_peer_ni *src_lpni;
3093
3094                 src_lpni = lnet_peerni_by_nid_locked(&msg->msg_hdr.src_nid,
3095                                                    NULL, cpt);
3096                 /* We don't fail the send if we hit any errors here. We'll just
3097                  * try to send it via non-multi-rail criteria
3098                  */
3099                 if (!IS_ERR(src_lpni)) {
3100                         /* Drop ref taken by lnet_peerni_by_nid_locked() */
3101                         lnet_peer_ni_decref_locked(src_lpni);
3102                         src_lp = lpni->lpni_peer_net->lpn_peer;
3103                         if (lnet_peer_is_multi_rail(src_lp) &&
3104                             !lnet_is_peer_ni_alive(lpni))
3105                                 mr_forwarding_allowed = true;
3106
3107                 }
3108                 CDEBUG(D_NET, "msg %p MR forwarding %s\n", msg,
3109                        mr_forwarding_allowed ? "allowed" : "not allowed");
3110         }
3111
3112         /*
3113          * Deal with the peer as NMR in the following cases:
3114          * 1. the peer is NMR
3115          * 2. We're trying to recover a specific peer NI
3116          * 3. I'm a router sending to the final destination and MR forwarding is
3117          *    not allowed for this message (as determined above).
3118          *    In this case the source of the message would've
3119          *    already selected the final destination so my job
3120          *    is to honor the selection.
3121          */
3122         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery ||
3123             (final_hop && !mr_forwarding_allowed))
3124                 send_case |= NMR_DST;
3125         else
3126                 send_case |= MR_DST;
3127
3128         if (lnet_msg_is_response(msg))
3129                 send_case |= SND_RESP;
3130
3131         /* assign parameters to the send_data */
3132         if (rtr_nid)
3133                 send_data.sd_rtr_nid = *rtr_nid;
3134         else
3135                 send_data.sd_rtr_nid = LNET_ANY_NID;
3136         if (src_nid)
3137                 send_data.sd_src_nid = *src_nid;
3138         else
3139                 send_data.sd_src_nid = LNET_ANY_NID;
3140         send_data.sd_dst_nid = *dst_nid;
3141         send_data.sd_best_lpni = lpni;
3142         /*
3143          * keep a pointer to the final destination in case we're going to
3144          * route, so we'll need to access it later
3145          */
3146         send_data.sd_final_dst_lpni = lpni;
3147         send_data.sd_peer = peer;
3148         send_data.sd_md_cpt = md_cpt;
3149         send_data.sd_send_case = send_case;
3150
3151         rc = lnet_handle_send_case_locked(&send_data);
3152
3153         /*
3154          * Update the local cpt since send_data.sd_cpt might've been
3155          * updated as a result of calling lnet_handle_send_case_locked().
3156          */
3157         cpt = send_data.sd_cpt;
3158         lnet_peer_ni_decref_locked(lpni);
3159
3160         if (rc == REPEAT_SEND)
3161                 goto again;
3162
3163         lnet_net_unlock(cpt);
3164
3165         return rc;
3166 }
3167
3168 int
3169 lnet_send(struct lnet_nid *src_nid, struct lnet_msg *msg,
3170           struct lnet_nid *rtr_nid)
3171 {
3172         struct lnet_nid *dst_nid = &msg->msg_target.nid;
3173         int rc;
3174
3175         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
3176         LASSERT(msg->msg_txpeer == NULL);
3177         LASSERT(msg->msg_txni == NULL);
3178         LASSERT(!msg->msg_sending);
3179         LASSERT(!msg->msg_target_is_router);
3180         LASSERT(!msg->msg_receiving);
3181
3182         msg->msg_sending = 1;
3183
3184         LASSERT(!msg->msg_tx_committed);
3185
3186         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
3187         if (rc < 0) {
3188                 if (rc == -EHOSTUNREACH)
3189                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
3190                 else
3191                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
3192                 return rc;
3193         }
3194
3195         if (rc == LNET_CREDIT_OK)
3196                 lnet_ni_send(msg->msg_txni, msg);
3197
3198         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
3199         return 0;
3200 }
3201
3202 enum lnet_mt_event_type {
3203         MT_TYPE_LOCAL_NI = 0,
3204         MT_TYPE_PEER_NI
3205 };
3206
3207 struct lnet_mt_event_info {
3208         enum lnet_mt_event_type mt_type;
3209         struct lnet_nid mt_nid;
3210 };
3211
3212 /* called with res_lock held */
3213 void
3214 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
3215 {
3216         struct lnet_rsp_tracker *rspt;
3217
3218         /*
3219          * msg has a refcount on the MD so the MD is not going away.
3220          * The rspt queue for the cpt is protected by
3221          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
3222          */
3223         if (!md->md_rspt_ptr)
3224                 return;
3225
3226         rspt = md->md_rspt_ptr;
3227
3228         /* debug code */
3229         LASSERT(rspt->rspt_cpt == cpt);
3230
3231         md->md_rspt_ptr = NULL;
3232
3233         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3234                 /*
3235                  * The monitor thread has invalidated this handle because the
3236                  * response timed out, but it failed to lookup the MD. That
3237                  * means this response tracker is on the zombie list. We can
3238                  * safely remove it under the resource lock (held by caller) and
3239                  * free the response tracker block.
3240                  */
3241                 list_del(&rspt->rspt_on_list);
3242                 lnet_rspt_free(rspt, cpt);
3243         } else {
3244                 /*
3245                  * invalidate the handle to indicate that a response has been
3246                  * received, which will then lead the monitor thread to clean up
3247                  * the rspt block.
3248                  */
3249                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
3250         }
3251 }
3252
3253 void
3254 lnet_clean_zombie_rstqs(void)
3255 {
3256         struct lnet_rsp_tracker *rspt, *tmp;
3257         int i;
3258
3259         cfs_cpt_for_each(i, lnet_cpt_table()) {
3260                 list_for_each_entry_safe(rspt, tmp,
3261                                          the_lnet.ln_mt_zombie_rstqs[i],
3262                                          rspt_on_list) {
3263                         list_del(&rspt->rspt_on_list);
3264                         lnet_rspt_free(rspt, i);
3265                 }
3266         }
3267
3268         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
3269 }
3270
3271 static void
3272 lnet_finalize_expired_responses(void)
3273 {
3274         struct lnet_libmd *md;
3275         struct lnet_rsp_tracker *rspt, *tmp;
3276         ktime_t now;
3277         int i;
3278
3279         if (the_lnet.ln_mt_rstq == NULL)
3280                 return;
3281
3282         cfs_cpt_for_each(i, lnet_cpt_table()) {
3283                 LIST_HEAD(local_queue);
3284
3285                 lnet_net_lock(i);
3286                 if (!the_lnet.ln_mt_rstq[i]) {
3287                         lnet_net_unlock(i);
3288                         continue;
3289                 }
3290                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
3291                 lnet_net_unlock(i);
3292
3293                 now = ktime_get();
3294
3295                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
3296                         /*
3297                          * The rspt mdh will be invalidated when a response
3298                          * is received or whenever we want to discard the
3299                          * block the monitor thread will walk the queue
3300                          * and clean up any rsts with an invalid mdh.
3301                          * The monitor thread will walk the queue until
3302                          * the first unexpired rspt block. This means that
3303                          * some rspt blocks which received their
3304                          * corresponding responses will linger in the
3305                          * queue until they are cleaned up eventually.
3306                          */
3307                         lnet_res_lock(i);
3308                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3309                                 lnet_res_unlock(i);
3310                                 list_del(&rspt->rspt_on_list);
3311                                 lnet_rspt_free(rspt, i);
3312                                 continue;
3313                         }
3314
3315                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
3316                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
3317                                 struct lnet_peer_ni *lpni;
3318                                 struct lnet_nid nid;
3319
3320                                 md = lnet_handle2md(&rspt->rspt_mdh);
3321                                 if (!md) {
3322                                         /* MD has been queued for unlink, but
3323                                          * rspt hasn't been detached (Note we've
3324                                          * checked above that the rspt_mdh is
3325                                          * valid). Since we cannot lookup the MD
3326                                          * we're unable to detach the rspt
3327                                          * ourselves. Thus, move the rspt to the
3328                                          * zombie list where we'll wait for
3329                                          * either:
3330                                          *   1. The remaining operations on the
3331                                          *   MD to complete. In this case the
3332                                          *   final operation will result in
3333                                          *   lnet_msg_detach_md()->
3334                                          *   lnet_detach_rsp_tracker() where
3335                                          *   we will clean up this response
3336                                          *   tracker.
3337                                          *   2. LNet to shutdown. In this case
3338                                          *   we'll wait until after all LND Nets
3339                                          *   have shutdown and then we can
3340                                          *   safely free any remaining response
3341                                          *   tracker blocks on the zombie list.
3342                                          * Note: We need to hold the resource
3343                                          * lock when adding to the zombie list
3344                                          * because we may have concurrent access
3345                                          * with lnet_detach_rsp_tracker().
3346                                          */
3347                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
3348                                         list_move(&rspt->rspt_on_list,
3349                                                   the_lnet.ln_mt_zombie_rstqs[i]);
3350                                         lnet_res_unlock(i);
3351                                         continue;
3352                                 }
3353                                 LASSERT(md->md_rspt_ptr == rspt);
3354                                 md->md_rspt_ptr = NULL;
3355                                 lnet_res_unlock(i);
3356
3357                                 LNetMDUnlink(rspt->rspt_mdh);
3358
3359                                 nid = rspt->rspt_next_hop_nid;
3360
3361                                 list_del(&rspt->rspt_on_list);
3362                                 lnet_rspt_free(rspt, i);
3363
3364                                 /* If we're shutting down we just want to clean
3365                                  * up the rspt blocks
3366                                  */
3367                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3368                                         continue;
3369
3370                                 lnet_net_lock(i);
3371                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
3372                                 lnet_net_unlock(i);
3373
3374                                 CDEBUG(D_NET,
3375                                        "Response timeout: md = %p: nid = %s\n",
3376                                        md, libcfs_nidstr(&nid));
3377
3378                                 /*
3379                                  * If there is a timeout on the response
3380                                  * from the next hop decrement its health
3381                                  * value so that we don't use it
3382                                  */
3383                                 lnet_net_lock(0);
3384                                 lpni = lnet_peer_ni_find_locked(&nid);
3385                                 if (lpni) {
3386                                         lnet_handle_remote_failure_locked(lpni);
3387                                         lnet_peer_ni_decref_locked(lpni);
3388                                 }
3389                                 lnet_net_unlock(0);
3390                         } else {
3391                                 lnet_res_unlock(i);
3392                                 break;
3393                         }
3394                 }
3395
3396                 if (!list_empty(&local_queue)) {
3397                         lnet_net_lock(i);
3398                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
3399                         lnet_net_unlock(i);
3400                 }
3401         }
3402 }
3403
3404 static void
3405 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
3406 {
3407         struct lnet_msg *msg;
3408
3409         while (!list_empty(resendq)) {
3410                 struct lnet_peer_ni *lpni;
3411
3412                 msg = list_first_entry(resendq, struct lnet_msg,
3413                                        msg_list);
3414
3415                 list_del_init(&msg->msg_list);
3416
3417                 lpni = lnet_peer_ni_find_locked(&msg->msg_hdr.dest_nid);
3418                 if (!lpni) {
3419                         lnet_net_unlock(cpt);
3420                         CERROR("Expected that a peer is already created for %s\n",
3421                                libcfs_nidstr(&msg->msg_hdr.dest_nid));
3422                         msg->msg_no_resend = true;
3423                         lnet_finalize(msg, -EFAULT);
3424                         lnet_net_lock(cpt);
3425                 } else {
3426                         int rc;
3427
3428                         lnet_peer_ni_decref_locked(lpni);
3429
3430                         lnet_net_unlock(cpt);
3431                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3432                                libcfs_nidstr(&msg->msg_src_nid_param),
3433                                libcfs_idstr(&msg->msg_target),
3434                                lnet_msgtyp2str(msg->msg_type),
3435                                msg->msg_recovery,
3436                                msg->msg_retry_count);
3437                         rc = lnet_send(&msg->msg_src_nid_param, msg,
3438                                        &msg->msg_rtr_nid_param);
3439                         if (rc) {
3440                                 CERROR("Error sending %s to %s: %d\n",
3441                                        lnet_msgtyp2str(msg->msg_type),
3442                                        libcfs_idstr(&msg->msg_target), rc);
3443                                 msg->msg_no_resend = true;
3444                                 lnet_finalize(msg, rc);
3445                         }
3446                         lnet_net_lock(cpt);
3447                         if (!rc)
3448                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3449                 }
3450         }
3451 }
3452
3453 static void
3454 lnet_resend_pending_msgs(void)
3455 {
3456         int i;
3457
3458         cfs_cpt_for_each(i, lnet_cpt_table()) {
3459                 lnet_net_lock(i);
3460                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3461                 lnet_net_unlock(i);
3462         }
3463 }
3464
3465 /* called with cpt and ni_lock held */
3466 static void
3467 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3468 {
3469         struct lnet_handle_md recovery_mdh;
3470
3471         LNetInvalidateMDHandle(&recovery_mdh);
3472
3473         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3474             force) {
3475                 recovery_mdh = ni->ni_ping_mdh;
3476                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3477         }
3478         lnet_ni_unlock(ni);
3479         lnet_net_unlock(cpt);
3480         if (!LNetMDHandleIsInvalid(recovery_mdh))
3481                 LNetMDUnlink(recovery_mdh);
3482         lnet_net_lock(cpt);
3483         lnet_ni_lock(ni);
3484 }
3485
3486 /* Returns the total number of local NIs in recovery.
3487  * Records up to @arrsz of the associated NIDs in the @nidarr array
3488  */
3489 static int
3490 lnet_recover_local_nis(struct lnet_nid *nidarr, unsigned int arrsz)
3491 {
3492         struct lnet_mt_event_info *ev_info;
3493         LIST_HEAD(processed_list);
3494         LIST_HEAD(local_queue);
3495         struct lnet_handle_md mdh;
3496         struct lnet_ni *tmp;
3497         struct lnet_ni *ni;
3498         struct lnet_nid nid;
3499         int healthv;
3500         int rc;
3501         time64_t now;
3502         unsigned int nnis = 0;
3503
3504         /*
3505          * splice the recovery queue on a local queue. We will iterate
3506          * through the local queue and update it as needed. Once we're
3507          * done with the traversal, we'll splice the local queue back on
3508          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3509          * will be traversed in the next iteration.
3510          */
3511         lnet_net_lock(0);
3512         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3513                          &local_queue);
3514         lnet_net_unlock(0);
3515
3516         now = ktime_get_seconds();
3517
3518         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3519                 /*
3520                  * if an NI is being deleted or it is now healthy, there
3521                  * is no need to keep it around in the recovery queue.
3522                  * The monitor thread is the only thread responsible for
3523                  * removing the NI from the recovery queue.
3524                  * Multiple threads can be adding NIs to the recovery
3525                  * queue.
3526                  */
3527                 healthv = atomic_read(&ni->ni_healthv);
3528
3529                 lnet_net_lock(0);
3530                 lnet_ni_lock(ni);
3531                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3532                     healthv == LNET_MAX_HEALTH_VALUE) {
3533                         list_del_init(&ni->ni_recovery);
3534                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3535                         lnet_ni_unlock(ni);
3536                         lnet_ni_decref_locked(ni, 0);
3537                         lnet_net_unlock(0);
3538                         continue;
3539                 }
3540
3541                 if (nnis < arrsz)
3542                         nidarr[nnis] = ni->ni_nid;
3543                 nnis++;
3544
3545                 /*
3546                  * if the local NI failed recovery we must unlink the md.
3547                  * But we want to keep the local_ni on the recovery queue
3548                  * so we can continue the attempts to recover it.
3549                  */
3550                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3551                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3552                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3553                 }
3554
3555
3556                 lnet_ni_unlock(ni);
3557
3558                 if (now < ni->ni_next_ping) {
3559                         lnet_net_unlock(0);
3560                         continue;
3561                 }
3562
3563                 lnet_net_unlock(0);
3564
3565                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3566                        libcfs_nidstr(&ni->ni_nid));
3567
3568                 lnet_ni_lock(ni);
3569                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3570                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3571                         lnet_ni_unlock(ni);
3572
3573                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3574                         if (!ev_info) {
3575                                 CERROR("out of memory. Can't recover %s\n",
3576                                        libcfs_nidstr(&ni->ni_nid));
3577                                 lnet_ni_lock(ni);
3578                                 ni->ni_recovery_state &=
3579                                   ~LNET_NI_RECOVERY_PENDING;
3580                                 lnet_ni_unlock(ni);
3581                                 continue;
3582                         }
3583
3584                         mdh = ni->ni_ping_mdh;
3585                         /*
3586                          * Invalidate the ni mdh in case it's deleted.
3587                          * We'll unlink the mdh in this case below.
3588                          */
3589                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3590                         nid = ni->ni_nid;
3591
3592                         /*
3593                          * remove the NI from the local queue and drop the
3594                          * reference count to it while we're recovering
3595                          * it. The reason for that, is that the NI could
3596                          * be deleted, and the way the code is structured
3597                          * is if we don't drop the NI, then the deletion
3598                          * code will enter a loop waiting for the
3599                          * reference count to be removed while holding the
3600                          * ln_mutex_lock(). When we look up the peer to
3601                          * send to in lnet_select_pathway() we will try to
3602                          * lock the ln_mutex_lock() as well, leading to
3603                          * a deadlock. By dropping the refcount and
3604                          * removing it from the list, we allow for the NI
3605                          * to be removed, then we use the cached NID to
3606                          * look it up again. If it's gone, then we just
3607                          * continue examining the rest of the queue.
3608                          */
3609                         lnet_net_lock(0);
3610                         list_del_init(&ni->ni_recovery);
3611                         lnet_ni_decref_locked(ni, 0);
3612                         lnet_net_unlock(0);
3613
3614                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3615                         ev_info->mt_nid = nid;
3616                         rc = lnet_send_ping(&nid, &mdh, LNET_PING_INFO_MIN_SIZE,
3617                                             ev_info, the_lnet.ln_mt_handler,
3618                                             true);
3619                         /* lookup the nid again */
3620                         lnet_net_lock(0);
3621                         ni = lnet_nid_to_ni_locked(&nid, 0);
3622                         if (!ni) {
3623                                 /*
3624                                  * the NI has been deleted when we dropped
3625                                  * the ref count
3626                                  */
3627                                 lnet_net_unlock(0);
3628                                 LNetMDUnlink(mdh);
3629                                 continue;
3630                         }
3631                         ni->ni_ping_count++;
3632
3633                         ni->ni_ping_mdh = mdh;
3634                         lnet_ni_add_to_recoveryq_locked(ni, &processed_list,
3635                                                         now);
3636
3637                         if (rc) {
3638                                 lnet_ni_lock(ni);
3639                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3640                                 lnet_ni_unlock(ni);
3641                         }
3642                         lnet_net_unlock(0);
3643                 } else
3644                         lnet_ni_unlock(ni);
3645         }
3646
3647         /*
3648          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3649          * reexamined in the next iteration.
3650          */
3651         list_splice_init(&processed_list, &local_queue);
3652         lnet_net_lock(0);
3653         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3654         lnet_net_unlock(0);
3655
3656         return nnis;
3657 }
3658
3659 static int
3660 lnet_resendqs_create(void)
3661 {
3662         struct list_head **resendqs;
3663         resendqs = lnet_create_array_of_queues();
3664
3665         if (!resendqs)
3666                 return -ENOMEM;
3667
3668         lnet_net_lock(LNET_LOCK_EX);
3669         the_lnet.ln_mt_resendqs = resendqs;
3670         lnet_net_unlock(LNET_LOCK_EX);
3671
3672         return 0;
3673 }
3674
3675 static void
3676 lnet_clean_local_ni_recoveryq(void)
3677 {
3678         struct lnet_ni *ni;
3679
3680         /* This is only called when the monitor thread has stopped */
3681         lnet_net_lock(0);
3682
3683         while ((ni = list_first_entry_or_null(&the_lnet.ln_mt_localNIRecovq,
3684                                               struct lnet_ni,
3685                                               ni_recovery)) != NULL) {
3686                 list_del_init(&ni->ni_recovery);
3687                 lnet_ni_lock(ni);
3688                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3689                 lnet_ni_unlock(ni);
3690                 lnet_ni_decref_locked(ni, 0);
3691         }
3692
3693         lnet_net_unlock(0);
3694 }
3695
3696 static void
3697 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3698                                      bool force)
3699 {
3700         struct lnet_handle_md recovery_mdh;
3701
3702         LNetInvalidateMDHandle(&recovery_mdh);
3703
3704         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3705                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3706                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3707         }
3708         spin_unlock(&lpni->lpni_lock);
3709         lnet_net_unlock(cpt);
3710         if (!LNetMDHandleIsInvalid(recovery_mdh))
3711                 LNetMDUnlink(recovery_mdh);
3712         lnet_net_lock(cpt);
3713         spin_lock(&lpni->lpni_lock);
3714 }
3715
3716 static void
3717 lnet_clean_peer_ni_recoveryq(void)
3718 {
3719         struct lnet_peer_ni *lpni, *tmp;
3720
3721         lnet_net_lock(LNET_LOCK_EX);
3722
3723         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3724                                  lpni_recovery) {
3725                 list_del_init(&lpni->lpni_recovery);
3726                 spin_lock(&lpni->lpni_lock);
3727                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3728                 spin_unlock(&lpni->lpni_lock);
3729                 lnet_peer_ni_decref_locked(lpni);
3730         }
3731
3732         lnet_net_unlock(LNET_LOCK_EX);
3733 }
3734
3735 static void
3736 lnet_clean_resendqs(void)
3737 {
3738         struct lnet_msg *msg, *tmp;
3739         LIST_HEAD(msgs);
3740         int i;
3741
3742         cfs_cpt_for_each(i, lnet_cpt_table()) {
3743                 lnet_net_lock(i);
3744                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3745                 lnet_net_unlock(i);
3746                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3747                         list_del_init(&msg->msg_list);
3748                         msg->msg_no_resend = true;
3749                         lnet_finalize(msg, -ESHUTDOWN);
3750                 }
3751         }
3752
3753         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3754 }
3755
3756 /* Returns the total number of peer NIs in recovery.
3757  * Records up to @arrsz of the associated NIDs in the @nidarr array
3758  */
3759 static unsigned int
3760 lnet_recover_peer_nis(struct lnet_nid *nidarr, unsigned int arrsz)
3761 {
3762         struct lnet_mt_event_info *ev_info;
3763         LIST_HEAD(processed_list);
3764         LIST_HEAD(local_queue);
3765         struct lnet_handle_md mdh;
3766         struct lnet_peer_ni *lpni;
3767         struct lnet_peer_ni *tmp;
3768         struct lnet_nid nid;
3769         int healthv;
3770         int rc;
3771         time64_t now;
3772         unsigned int nlpnis = 0;
3773
3774         /*
3775          * Always use cpt 0 for locking across all interactions with
3776          * ln_mt_peerNIRecovq
3777          */
3778         lnet_net_lock(0);
3779         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3780                          &local_queue);
3781         lnet_net_unlock(0);
3782
3783         now = ktime_get_seconds();
3784
3785         list_for_each_entry_safe(lpni, tmp, &local_queue,
3786                                  lpni_recovery) {
3787                 /*
3788                  * The same protection strategy is used here as is in the
3789                  * local recovery case.
3790                  */
3791                 lnet_net_lock(0);
3792                 healthv = atomic_read(&lpni->lpni_healthv);
3793                 spin_lock(&lpni->lpni_lock);
3794                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3795                     healthv == LNET_MAX_HEALTH_VALUE) {
3796                         list_del_init(&lpni->lpni_recovery);
3797                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3798                         spin_unlock(&lpni->lpni_lock);
3799                         lnet_peer_ni_decref_locked(lpni);
3800                         lnet_net_unlock(0);
3801                         continue;
3802                 }
3803
3804                 if (nlpnis < arrsz)
3805                         nidarr[nlpnis] = lpni->lpni_nid;
3806                 nlpnis++;
3807
3808                 /*
3809                  * If the peer NI has failed recovery we must unlink the
3810                  * md. But we want to keep the peer ni on the recovery
3811                  * queue so we can try to continue recovering it
3812                  */
3813                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3814                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3815                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3816                 }
3817
3818                 spin_unlock(&lpni->lpni_lock);
3819
3820                 if (now < lpni->lpni_next_ping) {
3821                         lnet_net_unlock(0);
3822                         continue;
3823                 }
3824
3825                 lnet_net_unlock(0);
3826
3827                 /*
3828                  * NOTE: we're racing with peer deletion from user space.
3829                  * It's possible that a peer is deleted after we check its
3830                  * state. In this case the recovery can create a new peer
3831                  */
3832                 spin_lock(&lpni->lpni_lock);
3833                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3834                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3835                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3836                         spin_unlock(&lpni->lpni_lock);
3837
3838                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3839                         if (!ev_info) {
3840                                 CERROR("out of memory. Can't recover %s\n",
3841                                        libcfs_nidstr(&lpni->lpni_nid));
3842                                 spin_lock(&lpni->lpni_lock);
3843                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3844                                 spin_unlock(&lpni->lpni_lock);
3845                                 continue;
3846                         }
3847
3848                         /* look at the comments in lnet_recover_local_nis() */
3849                         mdh = lpni->lpni_recovery_ping_mdh;
3850                         nid = lpni->lpni_nid;
3851                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3852                         lnet_net_lock(0);
3853                         list_del_init(&lpni->lpni_recovery);
3854                         lnet_peer_ni_decref_locked(lpni);
3855                         lnet_net_unlock(0);
3856
3857                         ev_info->mt_type = MT_TYPE_PEER_NI;
3858                         ev_info->mt_nid = nid;
3859                         rc = lnet_send_ping(&nid, &mdh, LNET_PING_INFO_MIN_SIZE,
3860                                             ev_info, the_lnet.ln_mt_handler,
3861                                             true);
3862                         lnet_net_lock(0);
3863                         /*
3864                          * lnet_peer_ni_find_locked() grabs a refcount for
3865                          * us. No need to take it explicitly.
3866                          */
3867                         lpni = lnet_peer_ni_find_locked(&nid);
3868                         if (!lpni) {
3869                                 lnet_net_unlock(0);
3870                                 LNetMDUnlink(mdh);
3871                                 continue;
3872                         }
3873
3874                         lpni->lpni_ping_count++;
3875
3876                         lpni->lpni_recovery_ping_mdh = mdh;
3877
3878                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
3879                                                              &processed_list,
3880                                                              now);
3881                         if (rc) {
3882                                 spin_lock(&lpni->lpni_lock);
3883                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3884                                 spin_unlock(&lpni->lpni_lock);
3885                         }
3886
3887                         /* Drop the ref taken by lnet_peer_ni_find_locked() */
3888                         lnet_peer_ni_decref_locked(lpni);
3889                         lnet_net_unlock(0);
3890                 } else {
3891                         spin_unlock(&lpni->lpni_lock);
3892                 }
3893         }
3894
3895         list_splice_init(&processed_list, &local_queue);
3896         lnet_net_lock(0);
3897         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3898         lnet_net_unlock(0);
3899
3900         return nlpnis;
3901 }
3902
3903 #define LNET_MAX_NNIDS 20
3904 /* @nids is array of nids that are in recovery. It has max size of
3905  * LNET_MAX_NNIDS.
3906  * @nnids is the total number of nids that are in recovery. It can be
3907  * larger than LNET_MAX_NNIDS.
3908  * @local tells us whether these are local or peer NIs in recovery.
3909  */
3910 static void
3911 lnet_print_recovery_list(struct lnet_nid *nids, unsigned int nnids,
3912                          bool local)
3913 {
3914         static bool printed;
3915         char *buf = NULL;
3916         char *tmp;
3917         int i;
3918         unsigned int arrsz;
3919         unsigned int bufsz;
3920
3921         if (!nnids)
3922                 return;
3923
3924         arrsz = nnids < LNET_MAX_NNIDS ? nnids : LNET_MAX_NNIDS;
3925
3926         /* Printing arrsz NIDs, each has max size LNET_NIDSTR_SIZE, a comma
3927          * and space for each nid after the first (2 * (arrsz - 1)),
3928          * + 1 for terminating null byte
3929          */
3930         bufsz = (arrsz * LNET_NIDSTR_SIZE) + (2 * (arrsz - 1)) + 1;
3931         LIBCFS_ALLOC(buf, bufsz);
3932         if (!buf) {
3933                 LCONSOLE(D_INFO, "%u %s NIs in recovery\n",
3934                          nnids, local ? "local" : "peer");
3935                 return;
3936         }
3937
3938         tmp = buf;
3939         tmp += sprintf(tmp, "%s", libcfs_nidstr(&nids[0]));
3940         for (i = 1; i < arrsz; i++)
3941                 tmp += sprintf(tmp, ", %s", libcfs_nidstr(&nids[i]));
3942
3943         /* LCONSOLE() used to avoid rate limiting when we have both local
3944          * and peer NIs in recovery
3945          */
3946         LCONSOLE(D_INFO, "%u %s NIs in recovery (showing %u): %s\n",
3947                  nnids, local ? "local" : "peer", arrsz, buf);
3948
3949         LIBCFS_FREE(buf, bufsz);
3950
3951         if (!printed && nnids > LNET_MAX_NNIDS) {
3952                 LCONSOLE(D_INFO, "See full list with 'lnetctl debug recovery -(p|l)'\n");
3953                 printed = true;
3954         }
3955 }
3956
3957 static void
3958 lnet_health_update_console(struct lnet_nid *lnids, unsigned int nnis,
3959                            struct lnet_nid *rnids, unsigned int nlpnis,
3960                            time64_t now)
3961 {
3962         static time64_t next_ni_update;
3963         static time64_t next_lpni_update;
3964         static time64_t next_msg_update;
3965         static unsigned int num_ni_updates;
3966         static unsigned int num_lpni_updates;
3967         static unsigned int num_msg_updates = 1;
3968         int late_count;
3969
3970         if (now >= next_ni_update) {
3971                 if (nnis) {
3972                         lnet_print_recovery_list(lnids, nnis, true);
3973                         if (num_ni_updates < 5)
3974                                 num_ni_updates++;
3975                         next_ni_update = now + (60 * num_ni_updates);
3976                 } else {
3977                         next_ni_update = 0;
3978                         num_ni_updates = 0;
3979                 }
3980         }
3981
3982
3983         if (now >= next_lpni_update) {
3984                 if (nlpnis) {
3985                         lnet_print_recovery_list(rnids, nlpnis, false);
3986                         if (num_lpni_updates < 5)
3987                                 num_lpni_updates++;
3988                         next_lpni_update = now + (60 * num_lpni_updates);
3989                 } else {
3990                         next_lpni_update = 0;
3991                         num_lpni_updates = 0;
3992                 }
3993         }
3994
3995         /* Let late_count accumulate for 60 seconds */
3996         if (unlikely(!next_msg_update))
3997                 next_msg_update = now + 60;
3998
3999         if (now >= next_msg_update) {
4000                 late_count = atomic_read(&the_lnet.ln_late_msg_count);
4001
4002                 if (late_count) {
4003                         s64 avg = atomic64_xchg(&the_lnet.ln_late_msg_nsecs, 0) /
4004                                   atomic_xchg(&the_lnet.ln_late_msg_count, 0);
4005
4006                         if (avg > NSEC_PER_SEC) {
4007                                 unsigned int avg_msec;
4008
4009                                 avg_msec = do_div(avg, NSEC_PER_SEC) /
4010                                                 NSEC_PER_MSEC;
4011                                 LCONSOLE_INFO("%u messages in past %us over their deadline by avg %lld.%03us\n",
4012                                               late_count,
4013                                               (60 * num_msg_updates), avg,
4014                                               avg_msec);
4015
4016                                 if (num_msg_updates < 5)
4017                                         num_msg_updates++;
4018                                 next_msg_update = now + (60 * num_msg_updates);
4019                         }
4020                 } else {
4021                         next_msg_update = now + 60;
4022                         num_msg_updates = 1;
4023                 }
4024         }
4025 }
4026
4027 static int
4028 lnet_monitor_thread(void *arg)
4029 {
4030         time64_t rsp_timeout = 0;
4031         time64_t now;
4032         unsigned int nnis;
4033         unsigned int nlpnis;
4034         struct lnet_nid local_nids[LNET_MAX_NNIDS];
4035         struct lnet_nid peer_nids[LNET_MAX_NNIDS];
4036
4037         wait_for_completion(&the_lnet.ln_started);
4038
4039         /*
4040          * The monitor thread takes care of the following:
4041          *  1. Checks the aliveness of routers
4042          *  2. Checks if there are messages on the resend queue to resend
4043          *     them.
4044          *  3. Checks if there are any NIs on the local recovery queue and
4045          *     pings them
4046          *  4. Checks if there are any NIs on the remote recovery queue
4047          *     and pings them.
4048          *  5. Updates the ping buffer if requested by LNDs upon interface
4049          *     state change
4050          */
4051         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
4052                 now = ktime_get_real_seconds();
4053
4054                 if (lnet_router_checker_active())
4055                         lnet_check_routers();
4056
4057                 lnet_resend_pending_msgs();
4058
4059                 if (now >= rsp_timeout) {
4060                         lnet_finalize_expired_responses();
4061                         rsp_timeout = now + (lnet_transaction_timeout / 2);
4062                 }
4063
4064                 nnis = lnet_recover_local_nis(local_nids, LNET_MAX_NNIDS);
4065                 nlpnis = lnet_recover_peer_nis(peer_nids, LNET_MAX_NNIDS);
4066                 lnet_health_update_console(local_nids, nnis, peer_nids, nlpnis,
4067                                            now);
4068
4069                 lnet_queue_ping_buffer_update();
4070
4071                 /*
4072                  * TODO do we need to check if we should sleep without
4073                  * timeout?  Technically, an active system will always
4074                  * have messages in flight so this check will always
4075                  * evaluate to false. And on an idle system do we care
4076                  * if we wake up every 1 second? Although, we've seen
4077                  * cases where we get a complaint that an idle thread
4078                  * is waking up unnecessarily.
4079                  */
4080                 wait_for_completion_interruptible_timeout(
4081                         &the_lnet.ln_mt_wait_complete,
4082                         cfs_time_seconds(1));
4083                 /* Must re-init the completion before testing anything,
4084                  * including ln_mt_state.
4085                  */
4086                 reinit_completion(&the_lnet.ln_mt_wait_complete);
4087         }
4088
4089         /* Shutting down */
4090         lnet_net_lock(LNET_LOCK_EX);
4091         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
4092         lnet_net_unlock(LNET_LOCK_EX);
4093
4094         /* signal that the monitor thread is exiting */
4095         up(&the_lnet.ln_mt_signal);
4096
4097         return 0;
4098 }
4099
4100 /*
4101  * lnet_send_ping
4102  * Sends a ping.
4103  * Returns == 0 if success
4104  * Returns > 0 if LNetMDBind or prior fails
4105  * Returns < 0 if LNetGet fails
4106  */
4107 int
4108 lnet_send_ping(struct lnet_nid *dest_nid,
4109                struct lnet_handle_md *mdh, int bytes,
4110                void *user_data, lnet_handler_t handler, bool recovery)
4111 {
4112         struct lnet_md md = { NULL };
4113         struct lnet_processid id;
4114         struct lnet_ping_buffer *pbuf;
4115         int rc;
4116
4117         if (LNET_NID_IS_ANY(dest_nid)) {
4118                 rc = -EHOSTUNREACH;
4119                 goto fail_error;
4120         }
4121
4122         pbuf = lnet_ping_buffer_alloc(bytes, GFP_NOFS);
4123         if (!pbuf) {
4124                 rc = ENOMEM;
4125                 goto fail_error;
4126         }
4127
4128         /* initialize md content */
4129         md.start     = &pbuf->pb_info;
4130         md.length    = bytes;
4131         md.threshold = 2; /* GET/REPLY */
4132         md.max_size  = 0;
4133         md.options   = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE;
4134         md.user_ptr  = user_data;
4135         md.handler   = handler;
4136
4137         rc = LNetMDBind(&md, LNET_UNLINK, mdh);
4138         if (rc) {
4139                 lnet_ping_buffer_decref(pbuf);
4140                 CERROR("Can't bind MD: %d\n", rc);
4141                 rc = -rc; /* change the rc to positive */
4142                 goto fail_error;
4143         }
4144         id.pid = LNET_PID_LUSTRE;
4145         id.nid = *dest_nid;
4146
4147         rc = LNetGet(NULL, *mdh, &id,
4148                      LNET_RESERVED_PORTAL,
4149                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
4150
4151         if (rc)
4152                 goto fail_unlink_md;
4153
4154         return 0;
4155
4156 fail_unlink_md:
4157         LNetMDUnlink(*mdh);
4158         LNetInvalidateMDHandle(mdh);
4159 fail_error:
4160         return rc;
4161 }
4162
4163 static void
4164 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
4165                            int status, bool send, bool unlink_event)
4166 {
4167         struct lnet_nid *nid = &ev_info->mt_nid;
4168
4169         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
4170                 struct lnet_ni *ni;
4171
4172                 lnet_net_lock(0);
4173                 ni = lnet_nid_to_ni_locked(nid, 0);
4174                 if (!ni) {
4175                         lnet_net_unlock(0);
4176                         return;
4177                 }
4178                 lnet_ni_lock(ni);
4179                 if (!send || (send && status != 0))
4180                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
4181                 if (status)
4182                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
4183                 lnet_ni_unlock(ni);
4184                 lnet_net_unlock(0);
4185
4186                 if (status != 0) {
4187                         CDEBUG(D_NET, "local NI (%s) recovery failed with %d\n",
4188                                libcfs_nidstr(nid), status);
4189                         return;
4190                 }
4191                 /*
4192                  * need to increment healthv for the ni here, because in
4193                  * the lnet_finalize() path we don't have access to this
4194                  * NI. And in order to get access to it, we'll need to
4195                  * carry forward too much information.
4196                  * In the peer case, it'll naturally be incremented
4197                  */
4198                 if (!unlink_event)
4199                         lnet_inc_healthv(&ni->ni_healthv,
4200                                          lnet_health_sensitivity);
4201         } else {
4202                 struct lnet_peer_ni *lpni;
4203                 int cpt;
4204
4205                 cpt = lnet_net_lock_current();
4206                 lpni = lnet_peer_ni_find_locked(nid);
4207                 if (!lpni) {
4208                         lnet_net_unlock(cpt);
4209                         return;
4210                 }
4211                 spin_lock(&lpni->lpni_lock);
4212                 if (!send || (send && status != 0))
4213                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
4214                 if (status)
4215                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
4216                 spin_unlock(&lpni->lpni_lock);
4217                 lnet_peer_ni_decref_locked(lpni);
4218                 lnet_net_unlock(cpt);
4219
4220                 if (status != 0)
4221                         CDEBUG(D_NET, "peer NI (%s) recovery failed with %d\n",
4222                                libcfs_nidstr(nid), status);
4223         }
4224 }
4225
4226 void
4227 lnet_mt_event_handler(struct lnet_event *event)
4228 {
4229         struct lnet_mt_event_info *ev_info = event->md_user_ptr;
4230         struct lnet_ping_buffer *pbuf;
4231
4232         /* TODO: remove assert */
4233         LASSERT(event->type == LNET_EVENT_REPLY ||
4234                 event->type == LNET_EVENT_SEND ||
4235                 event->type == LNET_EVENT_UNLINK);
4236
4237         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
4238                event->status);
4239
4240         switch (event->type) {
4241         case LNET_EVENT_UNLINK:
4242                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
4243                        libcfs_nidstr(&ev_info->mt_nid));
4244                 fallthrough;
4245         case LNET_EVENT_REPLY:
4246                 lnet_handle_recovery_reply(ev_info, event->status, false,
4247                                            event->type == LNET_EVENT_UNLINK);
4248                 break;
4249         case LNET_EVENT_SEND:
4250                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
4251                                libcfs_nidstr(&ev_info->mt_nid),
4252                                (event->status) ? "unsuccessfully" :
4253                                "successfully", event->status);
4254                 lnet_handle_recovery_reply(ev_info, event->status, true, false);
4255                 break;
4256         default:
4257                 CERROR("Unexpected event: %d\n", event->type);
4258                 break;
4259         }
4260         if (event->unlinked) {
4261                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
4262                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
4263                 lnet_ping_buffer_decref(pbuf);
4264         }
4265 }
4266
4267 static int
4268 lnet_rsp_tracker_create(void)
4269 {
4270         struct list_head **rstqs;
4271         rstqs = lnet_create_array_of_queues();
4272
4273         if (!rstqs)
4274                 return -ENOMEM;
4275
4276         the_lnet.ln_mt_rstq = rstqs;
4277
4278         return 0;
4279 }
4280
4281 static void
4282 lnet_rsp_tracker_clean(void)
4283 {
4284         lnet_finalize_expired_responses();
4285
4286         cfs_percpt_free(the_lnet.ln_mt_rstq);
4287         the_lnet.ln_mt_rstq = NULL;
4288 }
4289
4290 int lnet_monitor_thr_start(void)
4291 {
4292         int rc = 0;
4293         struct task_struct *task;
4294
4295         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
4296                 return -EALREADY;
4297
4298         rc = lnet_resendqs_create();
4299         if (rc)
4300                 return rc;
4301
4302         rc = lnet_rsp_tracker_create();
4303         if (rc)
4304                 goto clean_queues;
4305
4306         the_lnet.ln_pb_update_wq = alloc_workqueue("lnetpb_wq",
4307                                                    WQ_UNBOUND,
4308                                                    1);
4309         if (!the_lnet.ln_pb_update_wq) {
4310                 rc = -ENOMEM;
4311                 CERROR("Failed to allocate LNet ping buffer workqueue\n");
4312                 goto clean_queues;
4313         }
4314         atomic_set(&the_lnet.ln_pb_update_ready, 1);
4315
4316         sema_init(&the_lnet.ln_mt_signal, 0);
4317
4318         lnet_net_lock(LNET_LOCK_EX);
4319         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
4320         lnet_net_unlock(LNET_LOCK_EX);
4321         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
4322         if (IS_ERR(task)) {
4323                 rc = PTR_ERR(task);
4324                 CERROR("Can't start monitor thread: %d\n", rc);
4325                 goto clean_thread;
4326         }
4327
4328         return 0;
4329
4330 clean_thread:
4331         lnet_net_lock(LNET_LOCK_EX);
4332         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4333         lnet_net_unlock(LNET_LOCK_EX);
4334         /* block until event callback signals exit */
4335         down(&the_lnet.ln_mt_signal);
4336         /* clean up */
4337         lnet_net_lock(LNET_LOCK_EX);
4338         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
4339         lnet_net_unlock(LNET_LOCK_EX);
4340         lnet_rsp_tracker_clean();
4341         lnet_clean_local_ni_recoveryq();
4342         lnet_clean_peer_ni_recoveryq();
4343         lnet_clean_resendqs();
4344         the_lnet.ln_mt_handler = NULL;
4345         return rc;
4346 clean_queues:
4347         lnet_rsp_tracker_clean();
4348         lnet_clean_local_ni_recoveryq();
4349         lnet_clean_peer_ni_recoveryq();
4350         lnet_clean_resendqs();
4351         return rc;
4352 }
4353
4354 void lnet_monitor_thr_stop(void)
4355 {
4356         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
4357                 return;
4358
4359         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
4360
4361         /* clean up the ping buffer update workqueue before telling
4362          * the monitor thread to shut down to avoid getting stuck
4363          * on pending messages
4364          */
4365         mutex_unlock(&the_lnet.ln_api_mutex);
4366         flush_workqueue(the_lnet.ln_pb_update_wq);
4367         destroy_workqueue(the_lnet.ln_pb_update_wq);
4368         atomic_set(&the_lnet.ln_pb_update_ready, 0);
4369         the_lnet.ln_pb_update_wq = NULL;
4370         mutex_lock(&the_lnet.ln_api_mutex);
4371
4372         lnet_net_lock(LNET_LOCK_EX);
4373         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4374         lnet_net_unlock(LNET_LOCK_EX);
4375
4376         /* tell the monitor thread that we're shutting down */
4377         complete(&the_lnet.ln_mt_wait_complete);
4378
4379         /* block until monitor thread signals that it's done */
4380         mutex_unlock(&the_lnet.ln_api_mutex);
4381         down(&the_lnet.ln_mt_signal);
4382         mutex_lock(&the_lnet.ln_api_mutex);
4383         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
4384
4385         /* perform cleanup tasks */
4386         lnet_rsp_tracker_clean();
4387         lnet_clean_local_ni_recoveryq();
4388         lnet_clean_peer_ni_recoveryq();
4389         lnet_clean_resendqs();
4390 }
4391
4392 void
4393 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
4394                   __u32 msg_type)
4395 {
4396         lnet_net_lock(cpt);
4397         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
4398         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4399         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
4400         lnet_net_unlock(cpt);
4401
4402         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
4403 }
4404
4405 static void
4406 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
4407 {
4408         struct lnet_hdr *hdr = &msg->msg_hdr;
4409
4410         if (msg->msg_wanted != 0)
4411                 lnet_setpayloadbuffer(msg);
4412
4413         lnet_build_msg_event(msg, LNET_EVENT_PUT);
4414
4415         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
4416          * it back into the ACK during lnet_finalize() */
4417         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
4418                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
4419
4420         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
4421                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
4422 }
4423
4424 static int
4425 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
4426 {
4427         struct lnet_hdr         *hdr = &msg->msg_hdr;
4428         struct lnet_match_info  info;
4429         int                     rc;
4430         bool                    ready_delay;
4431
4432         /* Convert put fields to host byte order */
4433         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
4434         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
4435         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
4436
4437         /* Primary peer NID. */
4438         info.mi_id.nid = msg->msg_initiator;
4439         info.mi_id.pid  = hdr->src_pid;
4440         info.mi_opc     = LNET_MD_OP_PUT;
4441         info.mi_portal  = hdr->msg.put.ptl_index;
4442         info.mi_rlength = hdr->payload_length;
4443         info.mi_roffset = hdr->msg.put.offset;
4444         info.mi_mbits   = hdr->msg.put.match_bits;
4445         info.mi_cpt     = lnet_nid2cpt(&msg->msg_initiator, ni);
4446
4447         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
4448         ready_delay = msg->msg_rx_ready_delay;
4449
4450  again:
4451         rc = lnet_ptl_match_md(&info, msg);
4452         switch (rc) {
4453         default:
4454                 LBUG();
4455
4456         case LNET_MATCHMD_OK:
4457                 lnet_recv_put(ni, msg);
4458                 return 0;
4459
4460         case LNET_MATCHMD_NONE:
4461                 if (ready_delay)
4462                         /* no eager_recv or has already called it, should
4463                          * have been attached on delayed list */
4464                         return 0;
4465
4466                 rc = lnet_ni_eager_recv(ni, msg);
4467                 if (rc == 0) {
4468                         ready_delay = true;
4469                         goto again;
4470                 }
4471                 fallthrough;
4472
4473         case LNET_MATCHMD_DROP:
4474                 CNETERR("Dropping PUT from %s portal %d match %llu"
4475                         " offset %d length %d: %d\n",
4476                         libcfs_idstr(&info.mi_id), info.mi_portal,
4477                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
4478
4479                 return -ENOENT; /* -ve: OK but no match */
4480         }
4481 }
4482
4483 static int
4484 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
4485 {
4486         struct lnet_match_info info;
4487         struct lnet_hdr *hdr = &msg->msg_hdr;
4488         struct lnet_processid source_id;
4489         struct lnet_handle_wire reply_wmd;
4490         int rc;
4491
4492         /* Convert get fields to host byte order */
4493         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
4494         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
4495         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
4496         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
4497
4498         source_id.nid = hdr->src_nid;
4499         source_id.pid = hdr->src_pid;
4500         /* Primary peer NID */
4501         info.mi_id.nid  = msg->msg_initiator;
4502         info.mi_id.pid  = hdr->src_pid;
4503         info.mi_opc     = LNET_MD_OP_GET;
4504         info.mi_portal  = hdr->msg.get.ptl_index;
4505         info.mi_rlength = hdr->msg.get.sink_length;
4506         info.mi_roffset = hdr->msg.get.src_offset;
4507         info.mi_mbits   = hdr->msg.get.match_bits;
4508         info.mi_cpt     = lnet_nid2cpt(&msg->msg_initiator, ni);
4509
4510         rc = lnet_ptl_match_md(&info, msg);
4511         if (rc == LNET_MATCHMD_DROP) {
4512                 CNETERR("Dropping GET from %s portal %d match %llu"
4513                         " offset %d length %d\n",
4514                         libcfs_idstr(&info.mi_id), info.mi_portal,
4515                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
4516                 return -ENOENT; /* -ve: OK but no match */
4517         }
4518
4519         LASSERT(rc == LNET_MATCHMD_OK);
4520
4521         lnet_build_msg_event(msg, LNET_EVENT_GET);
4522
4523         reply_wmd = hdr->msg.get.return_wmd;
4524
4525         lnet_prep_send(msg, LNET_MSG_REPLY, &source_id,
4526                        msg->msg_offset, msg->msg_wanted);
4527
4528         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
4529
4530         if (rdma_get) {
4531                 /* The LND completes the REPLY from her recv procedure */
4532                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
4533                              msg->msg_offset, msg->msg_len, msg->msg_len);
4534                 return 0;
4535         }
4536
4537         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
4538         msg->msg_receiving = 0;
4539
4540         rc = lnet_send(&ni->ni_nid, msg, &msg->msg_from);
4541         if (rc < 0) {
4542                 /* didn't get as far as lnet_ni_send() */
4543                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
4544                        libcfs_nidstr(&ni->ni_nid),
4545                        libcfs_idstr(&info.mi_id), rc);
4546
4547                 lnet_finalize(msg, rc);
4548         }
4549
4550         return 0;
4551 }
4552
4553 static int
4554 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
4555 {
4556         void *private = msg->msg_private;
4557         struct lnet_hdr *hdr = &msg->msg_hdr;
4558         struct lnet_processid src = {};
4559         struct lnet_libmd *md;
4560         unsigned int rlength;
4561         unsigned int mlength;
4562         int cpt;
4563
4564         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4565         lnet_res_lock(cpt);
4566
4567         src.nid = hdr->src_nid;
4568         src.pid = hdr->src_pid;
4569
4570         /* NB handles only looked up by creator (no flips) */
4571         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4572         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4573                 CNETERR("%s: Dropping REPLY from %s for %s "
4574                         "MD %#llx.%#llx\n",
4575                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4576                         (md == NULL) ? "invalid" : "inactive",
4577                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4578                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4579                 if (md != NULL && md->md_me != NULL)
4580                         CERROR("REPLY MD also attached to portal %d\n",
4581                                md->md_me->me_portal);
4582
4583                 lnet_res_unlock(cpt);
4584                 return -ENOENT; /* -ve: OK but no match */
4585         }
4586
4587         LASSERT(md->md_offset == 0);
4588
4589         rlength = hdr->payload_length;
4590         mlength = min(rlength, md->md_length);
4591
4592         if (mlength < rlength &&
4593             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4594                 CNETERR("%s: Dropping REPLY from %s length %d "
4595                         "for MD %#llx would overflow (%d)\n",
4596                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4597                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4598                         mlength);
4599                 lnet_res_unlock(cpt);
4600                 return -ENOENT; /* -ve: OK but no match */
4601         }
4602
4603         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4604                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4605                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4606
4607         lnet_msg_attach_md(msg, md, 0, mlength);
4608
4609         if (mlength != 0)
4610                 lnet_setpayloadbuffer(msg);
4611
4612         lnet_res_unlock(cpt);
4613
4614         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4615
4616         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4617         return 0;
4618 }
4619
4620 static int
4621 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4622 {
4623         struct lnet_hdr *hdr = &msg->msg_hdr;
4624         struct lnet_processid src = {};
4625         struct lnet_libmd *md;
4626         int cpt;
4627
4628         src.nid = hdr->src_nid;
4629         src.pid = hdr->src_pid;
4630
4631         /* Convert ack fields to host byte order */
4632         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4633         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4634
4635         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4636         lnet_res_lock(cpt);
4637
4638         /* NB handles only looked up by creator (no flips) */
4639         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4640         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4641                 /* Don't moan; this is expected */
4642                 CDEBUG(D_NET,
4643                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4644                        libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4645                        (md == NULL) ? "invalid" : "inactive",
4646                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4647                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4648                 if (md != NULL && md->md_me != NULL)
4649                         CERROR("Source MD also attached to portal %d\n",
4650                                md->md_me->me_portal);
4651
4652                 lnet_res_unlock(cpt);
4653                 return -ENOENT;                  /* -ve! */
4654         }
4655
4656         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4657                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4658                hdr->msg.ack.dst_wmd.wh_object_cookie);
4659
4660         lnet_msg_attach_md(msg, md, 0, 0);
4661
4662         lnet_res_unlock(cpt);
4663
4664         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4665
4666         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4667         return 0;
4668 }
4669
4670 /**
4671  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4672  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4673  * \retval -ve                  error code
4674  */
4675 int
4676 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4677 {
4678         int     rc = 0;
4679
4680         if (!the_lnet.ln_routing)
4681                 return -ECANCELED;
4682
4683         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4684             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4685                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4686                         msg->msg_rx_ready_delay = 1;
4687                 } else {
4688                         lnet_net_unlock(msg->msg_rx_cpt);
4689                         rc = lnet_ni_eager_recv(ni, msg);
4690                         lnet_net_lock(msg->msg_rx_cpt);
4691                 }
4692         }
4693
4694         if (rc == 0)
4695                 rc = lnet_post_routed_recv_locked(msg, 0);
4696         return rc;
4697 }
4698
4699 int
4700 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4701 {
4702         int     rc;
4703
4704         switch (msg->msg_type) {
4705         case LNET_MSG_ACK:
4706                 rc = lnet_parse_ack(ni, msg);
4707                 break;
4708         case LNET_MSG_PUT:
4709                 rc = lnet_parse_put(ni, msg);
4710                 break;
4711         case LNET_MSG_GET:
4712                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4713                 break;
4714         case LNET_MSG_REPLY:
4715                 rc = lnet_parse_reply(ni, msg);
4716                 break;
4717         default: /* prevent an unused label if !kernel */
4718                 LASSERT(0);
4719                 return -EPROTO;
4720         }
4721
4722         LASSERT(rc == 0 || rc == -ENOENT);
4723         return rc;
4724 }
4725
4726 char *
4727 lnet_msgtyp2str (int type)
4728 {
4729         switch (type) {
4730         case LNET_MSG_ACK:
4731                 return ("ACK");
4732         case LNET_MSG_PUT:
4733                 return ("PUT");
4734         case LNET_MSG_GET:
4735                 return ("GET");
4736         case LNET_MSG_REPLY:
4737                 return ("REPLY");
4738         case LNET_MSG_HELLO:
4739                 return ("HELLO");
4740         default:
4741                 return ("<UNKNOWN>");
4742         }
4743 }
4744 EXPORT_SYMBOL(lnet_msgtyp2str);
4745
4746 int
4747 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr,
4748            struct lnet_nid *from_nid, void *private, int rdma_req)
4749 {
4750         struct lnet_peer_ni *lpni;
4751         struct lnet_msg *msg;
4752         __u32 payload_length;
4753         lnet_pid_t dest_pid;
4754         struct lnet_nid dest_nid;
4755         struct lnet_nid src_nid;
4756         bool push = false;
4757         int for_me;
4758         __u32 type;
4759         int rc = 0;
4760         int cpt;
4761         time64_t now = ktime_get_seconds();
4762
4763         LASSERT (!in_interrupt ());
4764
4765         type = hdr->type;
4766         src_nid = hdr->src_nid;
4767         dest_nid = hdr->dest_nid;
4768         dest_pid = hdr->dest_pid;
4769         payload_length = hdr->payload_length;
4770
4771         for_me = nid_same(&ni->ni_nid, &dest_nid);
4772         cpt = lnet_nid2cpt(from_nid, ni);
4773
4774         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4775                 libcfs_nidstr(&dest_nid),
4776                 libcfs_nidstr(&ni->ni_nid),
4777                 libcfs_nidstr(&src_nid),
4778                 lnet_msgtyp2str(type),
4779                 (for_me) ? "for me" : "routed");
4780
4781         switch (type) {
4782         case LNET_MSG_ACK:
4783         case LNET_MSG_GET:
4784                 if (payload_length > 0) {
4785                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4786                                libcfs_nidstr(from_nid),
4787                                libcfs_nidstr(&src_nid),
4788                                lnet_msgtyp2str(type), payload_length);
4789                         return -EPROTO;
4790                 }
4791                 break;
4792
4793         case LNET_MSG_PUT:
4794         case LNET_MSG_REPLY:
4795                 if (payload_length >
4796                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4797                         CERROR("%s, src %s: bad %s payload %d "
4798                                "(%d max expected)\n",
4799                                libcfs_nidstr(from_nid),
4800                                libcfs_nidstr(&src_nid),
4801                                lnet_msgtyp2str(type),
4802                                payload_length,
4803                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4804                         return -EPROTO;
4805                 }
4806                 break;
4807
4808         default:
4809                 CERROR("%s, src %s: Bad message type 0x%x\n",
4810                        libcfs_nidstr(from_nid),
4811                        libcfs_nidstr(&src_nid), type);
4812                 return -EPROTO;
4813         }
4814
4815         /* Only update net_last_alive for incoming GETs on the reserved portal
4816          * (i.e. incoming lnet/discovery pings).
4817          * This avoids situations where the router's own traffic results in NI
4818          * status changes
4819          */
4820         if (the_lnet.ln_routing && type == LNET_MSG_GET &&
4821             hdr->msg.get.ptl_index == LNET_RESERVED_PORTAL &&
4822             !lnet_islocalnid(&src_nid) &&
4823             ni->ni_net->net_last_alive != now) {
4824                 lnet_ni_lock(ni);
4825                 spin_lock(&ni->ni_net->net_lock);
4826                 ni->ni_net->net_last_alive = now;
4827                 spin_unlock(&ni->ni_net->net_lock);
4828                 push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP);
4829                 lnet_ni_unlock(ni);
4830         }
4831
4832         if (push)
4833                 lnet_push_update_to_peers(1);
4834
4835         /* Regard a bad destination NID as a protocol error.  Senders should
4836          * know what they're doing; if they don't they're misconfigured, buggy
4837          * or malicious so we chop them off at the knees :) */
4838
4839         if (!for_me) {
4840                 if (LNET_NID_NET(&dest_nid) == LNET_NID_NET(&ni->ni_nid)) {
4841                         /* should have gone direct */
4842                         CERROR("%s, src %s: Bad dest nid %s "
4843                                "(should have been sent direct)\n",
4844                                 libcfs_nidstr(from_nid),
4845                                 libcfs_nidstr(&src_nid),
4846                                 libcfs_nidstr(&dest_nid));
4847                         return -EPROTO;
4848                 }
4849
4850                 if (lnet_islocalnid(&dest_nid)) {
4851                         /* dest is another local NI; sender should have used
4852                          * this node's NID on its own network */
4853                         CERROR("%s, src %s: Bad dest nid %s "
4854                                "(it's my nid but on a different network)\n",
4855                                 libcfs_nidstr(from_nid),
4856                                 libcfs_nidstr(&src_nid),
4857                                 libcfs_nidstr(&dest_nid));
4858                         return -EPROTO;
4859                 }
4860
4861                 if (rdma_req && type == LNET_MSG_GET) {
4862                         CERROR("%s, src %s: Bad optimized GET for %s "
4863                                "(final destination must be me)\n",
4864                                 libcfs_nidstr(from_nid),
4865                                 libcfs_nidstr(&src_nid),
4866                                 libcfs_nidstr(&dest_nid));
4867                         return -EPROTO;
4868                 }
4869
4870                 if (!the_lnet.ln_routing) {
4871                         CERROR("%s, src %s: Dropping message for %s "
4872                                "(routing not enabled)\n",
4873                                 libcfs_nidstr(from_nid),
4874                                 libcfs_nidstr(&src_nid),
4875                                 libcfs_nidstr(&dest_nid));
4876                         goto drop;
4877                 }
4878         }
4879
4880         /* Message looks OK; we're not going to return an error, so we MUST
4881          * call back lnd_recv() come what may... */
4882
4883         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4884             fail_peer(&src_nid, 0)) {                   /* shall we now? */
4885                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4886                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4887                        lnet_msgtyp2str(type));
4888                 goto drop;
4889         }
4890
4891         if (!list_empty(&the_lnet.ln_drop_rules) &&
4892             lnet_drop_rule_match(hdr, &ni->ni_nid, NULL)) {
4893                 CDEBUG(D_NET,
4894                        "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
4895                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4896                        libcfs_nidstr(&dest_nid), lnet_msgtyp2str(type));
4897                 goto drop;
4898         }
4899
4900         msg = lnet_msg_alloc();
4901         if (msg == NULL) {
4902                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4903                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4904                        lnet_msgtyp2str(type));
4905                 goto drop;
4906         }
4907
4908         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4909          * pointers NULL etc */
4910
4911         msg->msg_type = type;
4912         msg->msg_private = private;
4913         msg->msg_receiving = 1;
4914         msg->msg_rdma_get = rdma_req;
4915         msg->msg_len = msg->msg_wanted = payload_length;
4916         msg->msg_offset = 0;
4917         msg->msg_hdr = *hdr;
4918         /* for building message event */
4919         msg->msg_from = *from_nid;
4920         if (!for_me) {
4921                 msg->msg_target.pid = dest_pid;
4922                 msg->msg_target.nid = dest_nid;
4923                 msg->msg_routing = 1;
4924         }
4925
4926         lnet_net_lock(cpt);
4927         lpni = lnet_peerni_by_nid_locked(from_nid, &ni->ni_nid, cpt);
4928         if (IS_ERR(lpni)) {
4929                 lnet_net_unlock(cpt);
4930                 rc = PTR_ERR(lpni);
4931                 CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
4932                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4933                        lnet_msgtyp2str(type), rc);
4934                 lnet_msg_free(msg);
4935                 if (rc == -ESHUTDOWN)
4936                         /* We are shutting down.  Don't do anything more */
4937                         return rc;
4938                 goto drop;
4939         }
4940
4941         /* If this message was forwarded to us from a router then we may need
4942          * to update router aliveness or check for an asymmetrical route
4943          * (or both)
4944          */
4945         if (((lnet_drop_asym_route && for_me) ||
4946              !lpni->lpni_peer_net->lpn_peer->lp_alive) &&
4947             LNET_NID_NET(&src_nid) != LNET_NID_NET(from_nid)) {
4948                 __u32 src_net_id = LNET_NID_NET(&src_nid);
4949                 struct lnet_peer *gw = lpni->lpni_peer_net->lpn_peer;
4950                 struct lnet_route *route;
4951                 bool found = false;
4952
4953                 list_for_each_entry(route, &gw->lp_routes, lr_gwlist) {
4954                         if (route->lr_net == src_net_id) {
4955                                 found = true;
4956                                 /* If we're transitioning the gateway from
4957                                  * dead -> alive, and discovery is disabled
4958                                  * locally or on the gateway, then we need to
4959                                  * update the cached route aliveness for each
4960                                  * route to the src_nid's net.
4961                                  *
4962                                  * Otherwise, we're only checking for
4963                                  * symmetrical route, and we can break the
4964                                  * loop
4965                                  */
4966                                 if (!gw->lp_alive &&
4967                                     lnet_is_discovery_disabled(gw))
4968                                         lnet_set_route_aliveness(route, true);
4969                                 else
4970                                         break;
4971                         }
4972                 }
4973                 if (lnet_drop_asym_route && for_me && !found) {
4974                         /* Drop ref taken by lnet_nid2peerni_locked() */
4975                         lnet_peer_ni_decref_locked(lpni);
4976                         lnet_net_unlock(cpt);
4977                         /* we would not use from_nid to route a message to
4978                          * src_nid
4979                          * => asymmetric routing detected but forbidden
4980                          */
4981                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4982                                libcfs_nidstr(from_nid),
4983                                libcfs_nidstr(&src_nid), lnet_msgtyp2str(type));
4984                         lnet_msg_free(msg);
4985                         goto drop;
4986                 }
4987                 if (!gw->lp_alive) {
4988                         struct lnet_peer_net *lpn;
4989                         struct lnet_peer_ni *lpni2;
4990
4991                         gw->lp_alive = true;
4992                         /* Mark all remote NIs on src_nid's net UP */
4993                         lpn = lnet_peer_get_net_locked(gw, src_net_id);
4994                         if (lpn)
4995                                 list_for_each_entry(lpni2, &lpn->lpn_peer_nis,
4996                                                     lpni_peer_nis)
4997                                         lpni2->lpni_ns_status = LNET_NI_STATUS_UP;
4998                 }
4999         }
5000
5001         lpni->lpni_last_alive = now;
5002
5003         msg->msg_rxpeer = lpni;
5004         msg->msg_rxni = ni;
5005         lnet_ni_addref_locked(ni, cpt);
5006         /* Multi-Rail: Primary NID of source. */
5007         lnet_peer_primary_nid_locked(&src_nid, &msg->msg_initiator);
5008
5009         /*
5010          * mark the status of this lpni as UP since we received a message
5011          * from it. The ping response reports back the ns_status which is
5012          * marked on the remote as up or down and we cache it here.
5013          */
5014         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
5015
5016         lnet_msg_commit(msg, cpt);
5017
5018         /* message delay simulation */
5019         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
5020                      lnet_delay_rule_match_locked(hdr, msg))) {
5021                 lnet_net_unlock(cpt);
5022                 return 0;
5023         }
5024
5025         if (!for_me) {
5026                 rc = lnet_parse_forward_locked(ni, msg);
5027                 lnet_net_unlock(cpt);
5028
5029                 if (rc < 0)
5030                         goto free_drop;
5031
5032                 if (rc == LNET_CREDIT_OK) {
5033                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
5034                                      0, payload_length, payload_length);
5035                 }
5036                 return 0;
5037         }
5038
5039         lnet_net_unlock(cpt);
5040
5041         rc = lnet_parse_local(ni, msg);
5042         if (rc != 0)
5043                 goto free_drop;
5044         return 0;
5045
5046  free_drop:
5047         LASSERT(msg->msg_md == NULL);
5048         lnet_finalize(msg, rc);
5049
5050  drop:
5051         lnet_drop_message(ni, cpt, private, payload_length, type);
5052         return 0;
5053 }
5054 EXPORT_SYMBOL(lnet_parse);
5055
5056 void
5057 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
5058 {
5059         struct lnet_msg *msg;
5060
5061         while ((msg = list_first_entry_or_null(head, struct lnet_msg,
5062                                                msg_list)) != NULL) {
5063                 struct lnet_processid id = {};
5064
5065                 list_del(&msg->msg_list);
5066
5067                 id.nid = msg->msg_hdr.src_nid;
5068                 id.pid = msg->msg_hdr.src_pid;
5069
5070                 LASSERT(msg->msg_md == NULL);
5071                 LASSERT(msg->msg_rx_delayed);
5072                 LASSERT(msg->msg_rxpeer != NULL);
5073                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
5074
5075                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
5076                       " offset %d length %d: %s\n",
5077                       libcfs_idstr(&id),
5078                       msg->msg_hdr.msg.put.ptl_index,
5079                       msg->msg_hdr.msg.put.match_bits,
5080                       msg->msg_hdr.msg.put.offset,
5081                       msg->msg_hdr.payload_length, reason);
5082
5083                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
5084                  * called lnet_drop_message(), so I just hang onto msg as well
5085                  * until that's done */
5086
5087                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
5088                                   msg->msg_private, msg->msg_len,
5089                                   msg->msg_type);
5090
5091                 msg->msg_no_resend = true;
5092                 /*
5093                  * NB: message will not generate event because w/o attached MD,
5094                  * but we still should give error code so lnet_msg_decommit()
5095                  * can skip counters operations and other checks.
5096                  */
5097                 lnet_finalize(msg, -ENOENT);
5098         }
5099 }
5100
5101 void
5102 lnet_recv_delayed_msg_list(struct list_head *head)
5103 {
5104         struct lnet_msg *msg;
5105
5106         while ((msg = list_first_entry_or_null(head, struct lnet_msg,
5107                                                msg_list)) != NULL) {
5108                 struct lnet_processid id;
5109
5110                 list_del(&msg->msg_list);
5111
5112                 /* md won't disappear under me, since each msg
5113                  * holds a ref on it */
5114
5115                 id.nid = msg->msg_hdr.src_nid;
5116                 id.pid = msg->msg_hdr.src_pid;
5117
5118                 LASSERT(msg->msg_rx_delayed);
5119                 LASSERT(msg->msg_md != NULL);
5120                 LASSERT(msg->msg_rxpeer != NULL);
5121                 LASSERT(msg->msg_rxni != NULL);
5122                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
5123
5124                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
5125                        "match %llu offset %d length %d.\n",
5126                         libcfs_idstr(&id), msg->msg_hdr.msg.put.ptl_index,
5127                         msg->msg_hdr.msg.put.match_bits,
5128                         msg->msg_hdr.msg.put.offset,
5129                         msg->msg_hdr.payload_length);
5130
5131                 lnet_recv_put(msg->msg_rxni, msg);
5132         }
5133 }
5134
5135 static void
5136 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
5137                         struct lnet_libmd *md, struct lnet_handle_md mdh)
5138 {
5139         s64 timeout_ns;
5140         struct lnet_rsp_tracker *local_rspt;
5141
5142         /*
5143          * MD has a refcount taken by message so it's not going away.
5144          * The MD however can be looked up. We need to secure the access
5145          * to the md_rspt_ptr by taking the res_lock.
5146          * The rspt can be accessed without protection up to when it gets
5147          * added to the list.
5148          */
5149
5150         lnet_res_lock(cpt);
5151         local_rspt = md->md_rspt_ptr;
5152         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
5153         if (local_rspt != NULL) {
5154                 /*
5155                  * we already have an rspt attached to the md, so we'll
5156                  * update the deadline on that one.
5157                  */
5158                 lnet_rspt_free(rspt, cpt);
5159         } else {
5160                 /* new md */
5161                 rspt->rspt_mdh = mdh;
5162                 rspt->rspt_cpt = cpt;
5163                 /* store the rspt so we can access it when we get the REPLY */
5164                 md->md_rspt_ptr = rspt;
5165                 local_rspt = rspt;
5166         }
5167         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
5168
5169         /*
5170          * add to the list of tracked responses. It's added to tail of the
5171          * list in order to expire all the older entries first.
5172          */
5173         lnet_net_lock(cpt);
5174         list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
5175         lnet_net_unlock(cpt);
5176         lnet_res_unlock(cpt);
5177 }
5178
5179 /**
5180  * Initiate an asynchronous PUT operation.
5181  *
5182  * There are several events associated with a PUT: completion of the send on
5183  * the initiator node (LNET_EVENT_SEND), and when the send completes
5184  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
5185  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
5186  * used at the target node to indicate the completion of incoming data
5187  * delivery.
5188  *
5189  * The local events will be logged in the EQ associated with the MD pointed to
5190  * by \a mdh handle. Using a MD without an associated EQ results in these
5191  * events being discarded. In this case, the caller must have another
5192  * mechanism (e.g., a higher level protocol) for determining when it is safe
5193  * to modify the memory region associated with the MD.
5194  *
5195  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
5196  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
5197  *
5198  * \param self Indicates the NID of a local interface through which to send
5199  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
5200  * \param mdh A handle for the MD that describes the memory to be sent. The MD
5201  * must be "free floating" (See LNetMDBind()).
5202  * \param ack Controls whether an acknowledgment is requested.
5203  * Acknowledgments are only sent when they are requested by the initiating
5204  * process and the target MD enables them.
5205  * \param target A process identifier for the target process.
5206  * \param portal The index in the \a target's portal table.
5207  * \param match_bits The match bits to use for MD selection at the target
5208  * process.
5209  * \param offset The offset into the target MD (only used when the target
5210  * MD has the LNET_MD_MANAGE_REMOTE option set).
5211  * \param hdr_data 64 bits of user data that can be included in the message
5212  * header. This data is written to an event queue entry at the target if an
5213  * EQ is present on the matching MD.
5214  *
5215  * \retval  0      Success, and only in this case events will be generated
5216  * and logged to EQ (if it exists).
5217  * \retval -EIO    Simulated failure.
5218  * \retval -ENOMEM Memory allocation failure.
5219  * \retval -ENOENT Invalid MD object.
5220  *
5221  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
5222  */
5223 int
5224 LNetPut(struct lnet_nid *self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
5225         struct lnet_processid *target, unsigned int portal,
5226         __u64 match_bits, unsigned int offset,
5227         __u64 hdr_data)
5228 {
5229         struct lnet_msg *msg;
5230         struct lnet_libmd *md;
5231         int cpt;
5232         int rc;
5233         struct lnet_rsp_tracker *rspt = NULL;
5234
5235         LASSERT(the_lnet.ln_refcount > 0);
5236
5237         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5238             fail_peer(&target->nid, 1)) {               /* shall we now? */
5239                 CERROR("Dropping PUT to %s: simulated failure\n",
5240                        libcfs_idstr(target));
5241                 return -EIO;
5242         }
5243
5244         msg = lnet_msg_alloc();
5245         if (msg == NULL) {
5246                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
5247                        libcfs_idstr(target));
5248                 return -ENOMEM;
5249         }
5250         msg->msg_vmflush = !!(current->flags & PF_MEMALLOC);
5251
5252         cpt = lnet_cpt_of_cookie(mdh.cookie);
5253
5254         if (ack == LNET_ACK_REQ) {
5255                 rspt = lnet_rspt_alloc(cpt);
5256                 if (!rspt) {
5257                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
5258                                 libcfs_idstr(target));
5259                         lnet_msg_free(msg);
5260                         return -ENOMEM;
5261                 }
5262                 INIT_LIST_HEAD(&rspt->rspt_on_list);
5263         }
5264
5265         lnet_res_lock(cpt);
5266
5267         md = lnet_handle2md(&mdh);
5268         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5269                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
5270                        match_bits, portal, libcfs_idstr(target),
5271                        md == NULL ? -1 : md->md_threshold);
5272                 if (md != NULL && md->md_me != NULL)
5273                         CERROR("Source MD also attached to portal %d\n",
5274                                md->md_me->me_portal);
5275                 lnet_res_unlock(cpt);
5276
5277                 if (rspt)
5278                         lnet_rspt_free(rspt, cpt);
5279
5280                 lnet_msg_free(msg);
5281                 return -ENOENT;
5282         }
5283
5284         CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_idstr(target));
5285
5286         lnet_msg_attach_md(msg, md, 0, 0);
5287
5288         lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
5289
5290         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
5291         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
5292         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
5293         msg->msg_hdr.msg.put.hdr_data = hdr_data;
5294
5295         /* NB handles only looked up by creator (no flips) */
5296         if (ack == LNET_ACK_REQ) {
5297                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5298                         the_lnet.ln_interface_cookie;
5299                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5300                         md->md_lh.lh_cookie;
5301         } else {
5302                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5303                         LNET_WIRE_HANDLE_COOKIE_NONE;
5304                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5305                         LNET_WIRE_HANDLE_COOKIE_NONE;
5306         }
5307
5308         lnet_res_unlock(cpt);
5309
5310         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5311
5312         if (rspt && lnet_response_tracking_enabled(LNET_MSG_PUT,
5313                                                    md->md_options))
5314                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5315         else if (rspt)
5316                 lnet_rspt_free(rspt, cpt);
5317
5318         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
5319                                  CFS_FAIL_ONCE))
5320                 rc = -EIO;
5321         else
5322                 rc = lnet_send(self, msg, NULL);
5323
5324         if (rc != 0) {
5325                 CNETERR("Error sending PUT to %s: %d\n",
5326                         libcfs_idstr(target), rc);
5327                 msg->msg_no_resend = true;
5328                 lnet_finalize(msg, rc);
5329         }
5330
5331         /* completion will be signalled by an event */
5332         return 0;
5333 }
5334 EXPORT_SYMBOL(LNetPut);
5335
5336 /*
5337  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
5338  * returns a msg for the LND to pass to lnet_finalize() when the sink
5339  * data has been received.
5340  *
5341  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
5342  * lnet_finalize() is called on it, so the LND must call this first
5343  */
5344 struct lnet_msg *
5345 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
5346 {
5347         struct lnet_msg *msg = lnet_msg_alloc();
5348         struct lnet_libmd *getmd = getmsg->msg_md;
5349         struct lnet_processid *peer_id = &getmsg->msg_target;
5350         int cpt;
5351
5352         LASSERT(!getmsg->msg_target_is_router);
5353         LASSERT(!getmsg->msg_routing);
5354
5355         if (msg == NULL) {
5356                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
5357                        libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id));
5358                 goto drop;
5359         }
5360
5361         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
5362         lnet_res_lock(cpt);
5363
5364         LASSERT(getmd->md_refcount > 0);
5365
5366         if (getmd->md_threshold == 0) {
5367                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
5368                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id),
5369                         getmd);
5370                 lnet_res_unlock(cpt);
5371                 goto drop;
5372         }
5373
5374         LASSERT(getmd->md_offset == 0);
5375
5376         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
5377                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id), getmd);
5378
5379         /* setup information for lnet_build_msg_event */
5380         msg->msg_initiator =
5381                 getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
5382         msg->msg_from = peer_id->nid;
5383         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
5384         msg->msg_hdr.src_nid = peer_id->nid;
5385         msg->msg_hdr.payload_length = getmd->md_length;
5386         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
5387
5388         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
5389         lnet_res_unlock(cpt);
5390
5391         cpt = lnet_nid2cpt(&peer_id->nid, ni);
5392
5393         lnet_net_lock(cpt);
5394         lnet_msg_commit(msg, cpt);
5395         lnet_net_unlock(cpt);
5396
5397         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
5398
5399         return msg;
5400
5401  drop:
5402         cpt = lnet_nid2cpt(&peer_id->nid, ni);
5403
5404         lnet_net_lock(cpt);
5405         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
5406         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
5407         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
5408                 getmd->md_length;
5409         lnet_net_unlock(cpt);
5410
5411         if (msg != NULL)
5412                 lnet_msg_free(msg);
5413
5414         return NULL;
5415 }
5416 EXPORT_SYMBOL(lnet_create_reply_msg);
5417
5418 void
5419 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
5420                        unsigned int len)
5421 {
5422         /* Set the REPLY length, now the RDMA that elides the REPLY message has
5423          * completed and I know it. */
5424         LASSERT(reply != NULL);
5425         LASSERT(reply->msg_type == LNET_MSG_GET);
5426         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
5427
5428         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
5429          * the end of my buffer, I might as well be dead. */
5430         LASSERT(len <= reply->msg_ev.mlength);
5431
5432         reply->msg_ev.mlength = len;
5433 }
5434 EXPORT_SYMBOL(lnet_set_reply_msg_len);
5435
5436 /**
5437  * Initiate an asynchronous GET operation.
5438  *
5439  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
5440  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
5441  * the target node in the REPLY has been written to local MD.
5442  *
5443  * On the target node, an LNET_EVENT_GET is logged when the GET request
5444  * arrives and is accepted into a MD.
5445  *
5446  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
5447  * \param mdh A handle for the MD that describes the memory into which the
5448  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
5449  *
5450  * \retval  0      Success, and only in this case events will be generated
5451  * and logged to EQ (if it exists) of the MD.
5452  * \retval -EIO    Simulated failure.
5453  * \retval -ENOMEM Memory allocation failure.
5454  * \retval -ENOENT Invalid MD object.
5455  */
5456 int
5457 LNetGet(struct lnet_nid *self, struct lnet_handle_md mdh,
5458         struct lnet_processid *target, unsigned int portal,
5459         __u64 match_bits, unsigned int offset, bool recovery)
5460 {
5461         struct lnet_msg *msg;
5462         struct lnet_libmd *md;
5463         struct lnet_rsp_tracker *rspt;
5464         int cpt;
5465         int rc;
5466
5467         LASSERT(the_lnet.ln_refcount > 0);
5468
5469         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5470             fail_peer(&target->nid, 1))         /* shall we now? */
5471         {
5472                 CERROR("Dropping GET to %s: simulated failure\n",
5473                        libcfs_idstr(target));
5474                 return -EIO;
5475         }
5476
5477         msg = lnet_msg_alloc();
5478         if (!msg) {
5479                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
5480                        libcfs_idstr(target));
5481                 return -ENOMEM;
5482         }
5483
5484         cpt = lnet_cpt_of_cookie(mdh.cookie);
5485
5486         rspt = lnet_rspt_alloc(cpt);
5487         if (!rspt) {
5488                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
5489                        libcfs_idstr(target));
5490                 lnet_msg_free(msg);
5491                 return -ENOMEM;
5492         }
5493         INIT_LIST_HEAD(&rspt->rspt_on_list);
5494
5495         msg->msg_recovery = recovery;
5496
5497         lnet_res_lock(cpt);
5498
5499         md = lnet_handle2md(&mdh);
5500         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5501                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
5502                        match_bits, portal, libcfs_idstr(target),
5503                        md == NULL ? -1 : md->md_threshold);
5504                 if (md != NULL && md->md_me != NULL)
5505                         CERROR("REPLY MD also attached to portal %d\n",
5506                                md->md_me->me_portal);
5507
5508                 lnet_res_unlock(cpt);
5509
5510                 lnet_msg_free(msg);
5511                 lnet_rspt_free(rspt, cpt);
5512                 return -ENOENT;
5513         }
5514
5515         CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_idstr(target));
5516
5517         lnet_msg_attach_md(msg, md, 0, 0);
5518
5519         lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
5520
5521         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5522         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5523         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5524         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5525
5526         /* NB handles only looked up by creator (no flips) */
5527         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5528                 the_lnet.ln_interface_cookie;
5529         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5530                 md->md_lh.lh_cookie;
5531
5532         lnet_res_unlock(cpt);
5533
5534         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5535
5536         if (lnet_response_tracking_enabled(LNET_MSG_GET, md->md_options))
5537                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5538         else
5539                 lnet_rspt_free(rspt, cpt);
5540
5541         rc = lnet_send(self, msg, NULL);
5542         if (rc < 0) {
5543                 CNETERR("Error sending GET to %s: %d\n",
5544                         libcfs_idstr(target), rc);
5545                 msg->msg_no_resend = true;
5546                 lnet_finalize(msg, rc);
5547         }
5548
5549         /* completion will be signalled by an event */
5550         return 0;
5551 }
5552 EXPORT_SYMBOL(LNetGet);
5553
5554 /**
5555  * Calculate distance to node at \a dstnid.
5556  *
5557  * \param dstnid Target NID.
5558  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5559  * is saved here.
5560  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5561  * here.
5562  *
5563  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5564  * local_nid_dist_zero is set, which is the default.
5565  * \retval positives Distance to target NID, i.e. number of hops plus one.
5566  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5567  */
5568 int
5569 LNetDist(struct lnet_nid *dstnid, struct lnet_nid *srcnid, __u32 *orderp)
5570 {
5571         struct lnet_ni *ni = NULL;
5572         struct lnet_remotenet *rnet;
5573         __u32 dstnet = LNET_NID_NET(dstnid);
5574         int hops;
5575         int cpt;
5576         __u32 order = 2;
5577         struct list_head *rn_list;
5578         struct lnet_ni *matched_dstnet = NULL;
5579
5580         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5581          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5582          * keep order 0 free for 0@lo and order 1 free for a local NID
5583          * match
5584          * WARNING: dstnid and srcnid might point to same place.
5585          * Don't set *srcnid until late.
5586          */
5587
5588         LASSERT(the_lnet.ln_refcount > 0);
5589
5590         cpt = lnet_net_lock_current();
5591
5592         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5593                 if (nid_same(&ni->ni_nid, dstnid)) {
5594                         if (orderp != NULL) {
5595                                 if (nid_is_lo0(dstnid))
5596                                         *orderp = 0;
5597                                 else
5598                                         *orderp = 1;
5599                         }
5600                         if (srcnid)
5601                                 *srcnid = *dstnid;
5602                         lnet_net_unlock(cpt);
5603
5604                         return local_nid_dist_zero ? 0 : 1;
5605                 }
5606
5607                 if (!matched_dstnet && LNET_NID_NET(&ni->ni_nid) == dstnet) {
5608                         matched_dstnet = ni;
5609                         /* We matched the destination net, but we may have
5610                          * additional local NIs to inspect.
5611                          *
5612                          * We record the order as appropriate, but
5613                          * they may be overwritten if we match local NI above.
5614                          */
5615
5616                         if (orderp) {
5617                                 /* Check if ni was originally created in
5618                                  * current net namespace.
5619                                  * If not, assign order above 0xffff0000,
5620                                  * to make this ni not a priority.
5621                                  */
5622                                 if (current->nsproxy &&
5623                                     !net_eq(ni->ni_net_ns,
5624                                             current->nsproxy->net_ns))
5625                                         *orderp = order + 0xffff0000;
5626                                 else
5627                                         *orderp = order;
5628                         }
5629                 }
5630
5631                 order++;
5632         }
5633
5634         if (matched_dstnet) {
5635                 if (srcnid)
5636                         *srcnid = matched_dstnet->ni_nid;
5637                 lnet_net_unlock(cpt);
5638                 return 1;
5639         }
5640
5641         rn_list = lnet_net2rnethash(dstnet);
5642         list_for_each_entry(rnet, rn_list, lrn_list) {
5643                 if (rnet->lrn_net == dstnet) {
5644                         struct lnet_route *route;
5645                         struct lnet_route *shortest = NULL;
5646                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5647                         __u32 route_hops;
5648
5649                         LASSERT(!list_empty(&rnet->lrn_routes));
5650
5651                         list_for_each_entry(route, &rnet->lrn_routes,
5652                                             lr_list) {
5653                                 route_hops = route->lr_hops;
5654                                 if (route_hops == LNET_UNDEFINED_HOPS)
5655                                         route_hops = 1;
5656                                 if (shortest == NULL ||
5657                                     route_hops < shortest_hops) {
5658                                         shortest = route;
5659                                         shortest_hops = route_hops;
5660                                 }
5661                         }
5662
5663                         LASSERT(shortest != NULL);
5664                         hops = shortest_hops;
5665                         if (srcnid) {
5666                                 struct lnet_net *net;
5667                                 net = lnet_get_net_locked(shortest->lr_lnet);
5668                                 LASSERT(net);
5669                                 ni = lnet_get_next_ni_locked(net, NULL);
5670                                 *srcnid = ni->ni_nid;
5671                         }
5672                         if (orderp != NULL)
5673                                 *orderp = order;
5674                         lnet_net_unlock(cpt);
5675                         return hops + 1;
5676                 }
5677                 order++;
5678         }
5679
5680         lnet_net_unlock(cpt);
5681         return -EHOSTUNREACH;
5682 }
5683 EXPORT_SYMBOL(LNetDist);