Whamcloud - gitweb
LU-16106 lnet: allow direct messages regardless of peer NI status
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/lib-move.c
32  *
33  * Data movement routines
34  */
35
36 #define DEBUG_SUBSYSTEM S_LNET
37
38 #include <linux/pagemap.h>
39
40 #include <lnet/lib-lnet.h>
41 #include <linux/nsproxy.h>
42 #include <lnet/lnet_rdma.h>
43 #include <net/net_namespace.h>
44
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48
49 struct lnet_send_data {
50         struct lnet_ni *sd_best_ni;
51         struct lnet_peer_ni *sd_best_lpni;
52         struct lnet_peer_ni *sd_final_dst_lpni;
53         struct lnet_peer *sd_peer;
54         struct lnet_peer *sd_gw_peer;
55         struct lnet_peer_ni *sd_gw_lpni;
56         struct lnet_peer_net *sd_peer_net;
57         struct lnet_msg *sd_msg;
58         struct lnet_nid sd_dst_nid;
59         struct lnet_nid sd_src_nid;
60         struct lnet_nid sd_rtr_nid;
61         int sd_cpt;
62         int sd_md_cpt;
63         __u32 sd_send_case;
64 };
65
66 static inline bool
67 lnet_msg_is_response(struct lnet_msg *msg)
68 {
69         return msg->msg_type == LNET_MSG_ACK || msg->msg_type == LNET_MSG_REPLY;
70 }
71
72 static inline bool
73 lnet_response_tracking_enabled(__u32 msg_type, unsigned int md_options)
74 {
75         if (md_options & LNET_MD_NO_TRACK_RESPONSE)
76                 /* Explicitly disabled in MD options */
77                 return false;
78
79         if (md_options & LNET_MD_TRACK_RESPONSE)
80                 /* Explicity enabled in MD options */
81                 return true;
82
83         if (lnet_response_tracking == 3)
84                 /* Enabled for all message types */
85                 return true;
86
87         if (msg_type == LNET_MSG_PUT)
88                 return lnet_response_tracking == 2;
89
90         if (msg_type == LNET_MSG_GET)
91                 return lnet_response_tracking == 1;
92
93         return false;
94 }
95
96 static inline struct lnet_comm_count *
97 get_stats_counts(struct lnet_element_stats *stats,
98                  enum lnet_stats_type stats_type)
99 {
100         switch (stats_type) {
101         case LNET_STATS_TYPE_SEND:
102                 return &stats->el_send_stats;
103         case LNET_STATS_TYPE_RECV:
104                 return &stats->el_recv_stats;
105         case LNET_STATS_TYPE_DROP:
106                 return &stats->el_drop_stats;
107         default:
108                 CERROR("Unknown stats type\n");
109         }
110
111         return NULL;
112 }
113
114 void lnet_incr_stats(struct lnet_element_stats *stats,
115                      enum lnet_msg_type msg_type,
116                      enum lnet_stats_type stats_type)
117 {
118         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
119         if (!counts)
120                 return;
121
122         switch (msg_type) {
123         case LNET_MSG_ACK:
124                 atomic_inc(&counts->co_ack_count);
125                 break;
126         case LNET_MSG_PUT:
127                 atomic_inc(&counts->co_put_count);
128                 break;
129         case LNET_MSG_GET:
130                 atomic_inc(&counts->co_get_count);
131                 break;
132         case LNET_MSG_REPLY:
133                 atomic_inc(&counts->co_reply_count);
134                 break;
135         case LNET_MSG_HELLO:
136                 atomic_inc(&counts->co_hello_count);
137                 break;
138         default:
139                 CERROR("There is a BUG in the code. Unknown message type\n");
140                 break;
141         }
142 }
143
144 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
145                      enum lnet_stats_type stats_type)
146 {
147         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
148         if (!counts)
149                 return 0;
150
151         return (atomic_read(&counts->co_ack_count) +
152                 atomic_read(&counts->co_put_count) +
153                 atomic_read(&counts->co_get_count) +
154                 atomic_read(&counts->co_reply_count) +
155                 atomic_read(&counts->co_hello_count));
156 }
157
158 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
159                                 struct lnet_comm_count *counts)
160 {
161         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
162         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
163         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
164         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
165         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
166 }
167
168 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
169                               struct lnet_element_stats *stats)
170 {
171         struct lnet_comm_count *counts;
172
173         LASSERT(msg_stats);
174         LASSERT(stats);
175
176         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
177         if (!counts)
178                 return;
179         assign_stats(&msg_stats->im_send_stats, counts);
180
181         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
182         if (!counts)
183                 return;
184         assign_stats(&msg_stats->im_recv_stats, counts);
185
186         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
187         if (!counts)
188                 return;
189         assign_stats(&msg_stats->im_drop_stats, counts);
190 }
191
192 int
193 lnet_fail_nid(lnet_nid_t nid4, unsigned int threshold)
194 {
195         struct lnet_test_peer *tp;
196         struct list_head *el;
197         struct list_head *next;
198         struct lnet_nid nid;
199         LIST_HEAD(cull);
200
201         lnet_nid4_to_nid(nid4, &nid);
202         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
203         if (threshold != 0) {
204                 /* Adding a new entry */
205                 LIBCFS_ALLOC(tp, sizeof(*tp));
206                 if (tp == NULL)
207                         return -ENOMEM;
208
209                 tp->tp_nid = nid;
210                 tp->tp_threshold = threshold;
211
212                 lnet_net_lock(0);
213                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
214                 lnet_net_unlock(0);
215                 return 0;
216         }
217
218         lnet_net_lock(0);
219
220         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
221                 tp = list_entry(el, struct lnet_test_peer, tp_list);
222
223                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
224                     LNET_NID_IS_ANY(&nid) ||    /* removing all entries */
225                     nid_same(&tp->tp_nid, &nid)) {      /* matched this one */
226                         list_move(&tp->tp_list, &cull);
227                 }
228         }
229
230         lnet_net_unlock(0);
231
232         while ((tp = list_first_entry_or_null(&cull,
233                                               struct lnet_test_peer,
234                                               tp_list)) != NULL) {
235                 list_del(&tp->tp_list);
236                 LIBCFS_FREE(tp, sizeof(*tp));
237         }
238         return 0;
239 }
240
241 static int
242 fail_peer(struct lnet_nid *nid, int outgoing)
243 {
244         struct lnet_test_peer *tp;
245         struct list_head *el;
246         struct list_head *next;
247         LIST_HEAD(cull);
248         int fail = 0;
249
250         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
251         lnet_net_lock(0);
252
253         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
254                 tp = list_entry(el, struct lnet_test_peer, tp_list);
255
256                 if (tp->tp_threshold == 0) {
257                         /* zombie entry */
258                         if (outgoing) {
259                                 /* only cull zombies on outgoing tests,
260                                  * since we may be at interrupt priority on
261                                  * incoming messages. */
262                                 list_move(&tp->tp_list, &cull);
263                         }
264                         continue;
265                 }
266
267                 if (LNET_NID_IS_ANY(&tp->tp_nid) ||     /* fail every peer */
268                     nid_same(nid, &tp->tp_nid)) {       /* fail this peer */
269                         fail = 1;
270
271                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
272                                 tp->tp_threshold--;
273                                 if (outgoing &&
274                                     tp->tp_threshold == 0) {
275                                         /* see above */
276                                         list_move(&tp->tp_list, &cull);
277                                 }
278                         }
279                         break;
280                 }
281         }
282
283         lnet_net_unlock(0);
284
285         while ((tp = list_first_entry_or_null(&cull,
286                                               struct lnet_test_peer,
287                                               tp_list)) != NULL) {
288                 list_del(&tp->tp_list);
289                 LIBCFS_FREE(tp, sizeof(*tp));
290         }
291
292         return fail;
293 }
294
295 unsigned int
296 lnet_iov_nob(unsigned int niov, struct kvec *iov)
297 {
298         unsigned int nob = 0;
299
300         LASSERT(niov == 0 || iov != NULL);
301         while (niov-- > 0)
302                 nob += (iov++)->iov_len;
303
304         return (nob);
305 }
306 EXPORT_SYMBOL(lnet_iov_nob);
307
308 void
309 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
310                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
311                   unsigned int nob)
312 {
313         /* NB diov, siov are READ-ONLY */
314         unsigned int this_nob;
315
316         if (nob == 0)
317                 return;
318
319         /* skip complete frags before 'doffset' */
320         LASSERT(ndiov > 0);
321         while (doffset >= diov->iov_len) {
322                 doffset -= diov->iov_len;
323                 diov++;
324                 ndiov--;
325                 LASSERT(ndiov > 0);
326         }
327
328         /* skip complete frags before 'soffset' */
329         LASSERT(nsiov > 0);
330         while (soffset >= siov->iov_len) {
331                 soffset -= siov->iov_len;
332                 siov++;
333                 nsiov--;
334                 LASSERT(nsiov > 0);
335         }
336
337         do {
338                 LASSERT(ndiov > 0);
339                 LASSERT(nsiov > 0);
340                 this_nob = min3((unsigned int)diov->iov_len - doffset,
341                                 (unsigned int)siov->iov_len - soffset,
342                                 nob);
343
344                 memcpy((char *)diov->iov_base + doffset,
345                        (char *)siov->iov_base + soffset, this_nob);
346                 nob -= this_nob;
347
348                 if (diov->iov_len > doffset + this_nob) {
349                         doffset += this_nob;
350                 } else {
351                         diov++;
352                         ndiov--;
353                         doffset = 0;
354                 }
355
356                 if (siov->iov_len > soffset + this_nob) {
357                         soffset += this_nob;
358                 } else {
359                         siov++;
360                         nsiov--;
361                         soffset = 0;
362                 }
363         } while (nob > 0);
364 }
365 EXPORT_SYMBOL(lnet_copy_iov2iov);
366
367 unsigned int
368 lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
369 {
370         unsigned int  nob = 0;
371
372         LASSERT(niov == 0 || kiov != NULL);
373         while (niov-- > 0)
374                 nob += (kiov++)->bv_len;
375
376         return (nob);
377 }
378 EXPORT_SYMBOL(lnet_kiov_nob);
379
380 void
381 lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov,
382                     unsigned int doffset,
383                     unsigned int nsiov, struct bio_vec *siov,
384                     unsigned int soffset,
385                     unsigned int nob)
386 {
387         /* NB diov, siov are READ-ONLY */
388         unsigned int    this_nob;
389         char           *daddr = NULL;
390         char           *saddr = NULL;
391
392         if (nob == 0)
393                 return;
394
395         LASSERT (!in_interrupt ());
396
397         LASSERT (ndiov > 0);
398         while (doffset >= diov->bv_len) {
399                 doffset -= diov->bv_len;
400                 diov++;
401                 ndiov--;
402                 LASSERT(ndiov > 0);
403         }
404
405         LASSERT(nsiov > 0);
406         while (soffset >= siov->bv_len) {
407                 soffset -= siov->bv_len;
408                 siov++;
409                 nsiov--;
410                 LASSERT(nsiov > 0);
411         }
412
413         do {
414                 LASSERT(ndiov > 0);
415                 LASSERT(nsiov > 0);
416                 this_nob = min3(diov->bv_len - doffset,
417                                 siov->bv_len - soffset,
418                                 nob);
419
420                 if (daddr == NULL)
421                         daddr = ((char *)kmap(diov->bv_page)) +
422                                 diov->bv_offset + doffset;
423                 if (saddr == NULL)
424                         saddr = ((char *)kmap(siov->bv_page)) +
425                                 siov->bv_offset + soffset;
426
427                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
428                  * However in practice at least one of the kiovs will be mapped
429                  * kernel pages and the map/unmap will be NOOPs */
430
431                 memcpy (daddr, saddr, this_nob);
432                 nob -= this_nob;
433
434                 if (diov->bv_len > doffset + this_nob) {
435                         daddr += this_nob;
436                         doffset += this_nob;
437                 } else {
438                         kunmap(diov->bv_page);
439                         daddr = NULL;
440                         diov++;
441                         ndiov--;
442                         doffset = 0;
443                 }
444
445                 if (siov->bv_len > soffset + this_nob) {
446                         saddr += this_nob;
447                         soffset += this_nob;
448                 } else {
449                         kunmap(siov->bv_page);
450                         saddr = NULL;
451                         siov++;
452                         nsiov--;
453                         soffset = 0;
454                 }
455         } while (nob > 0);
456
457         if (daddr != NULL)
458                 kunmap(diov->bv_page);
459         if (saddr != NULL)
460                 kunmap(siov->bv_page);
461 }
462 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
463
464 void
465 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
466                     unsigned int nkiov, struct bio_vec *kiov,
467                     unsigned int kiovoffset,
468                     unsigned int nob)
469 {
470         /* NB iov, kiov are READ-ONLY */
471         unsigned int    this_nob;
472         char           *addr = NULL;
473
474         if (nob == 0)
475                 return;
476
477         LASSERT (!in_interrupt ());
478
479         LASSERT (niov > 0);
480         while (iovoffset >= iov->iov_len) {
481                 iovoffset -= iov->iov_len;
482                 iov++;
483                 niov--;
484                 LASSERT(niov > 0);
485         }
486
487         LASSERT(nkiov > 0);
488         while (kiovoffset >= kiov->bv_len) {
489                 kiovoffset -= kiov->bv_len;
490                 kiov++;
491                 nkiov--;
492                 LASSERT(nkiov > 0);
493         }
494
495         do {
496                 LASSERT(niov > 0);
497                 LASSERT(nkiov > 0);
498                 this_nob = min3((unsigned int)iov->iov_len - iovoffset,
499                                 (unsigned int)kiov->bv_len - kiovoffset,
500                                 nob);
501
502                 if (addr == NULL)
503                         addr = ((char *)kmap(kiov->bv_page)) +
504                                 kiov->bv_offset + kiovoffset;
505
506                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
507                 nob -= this_nob;
508
509                 if (iov->iov_len > iovoffset + this_nob) {
510                         iovoffset += this_nob;
511                 } else {
512                         iov++;
513                         niov--;
514                         iovoffset = 0;
515                 }
516
517                 if (kiov->bv_len > kiovoffset + this_nob) {
518                         addr += this_nob;
519                         kiovoffset += this_nob;
520                 } else {
521                         kunmap(kiov->bv_page);
522                         addr = NULL;
523                         kiov++;
524                         nkiov--;
525                         kiovoffset = 0;
526                 }
527
528         } while (nob > 0);
529
530         if (addr != NULL)
531                 kunmap(kiov->bv_page);
532 }
533 EXPORT_SYMBOL(lnet_copy_kiov2iov);
534
535 void
536 lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
537                    unsigned int kiovoffset,
538                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
539                    unsigned int nob)
540 {
541         /* NB kiov, iov are READ-ONLY */
542         unsigned int    this_nob;
543         char           *addr = NULL;
544
545         if (nob == 0)
546                 return;
547
548         LASSERT (!in_interrupt ());
549
550         LASSERT (nkiov > 0);
551         while (kiovoffset >= kiov->bv_len) {
552                 kiovoffset -= kiov->bv_len;
553                 kiov++;
554                 nkiov--;
555                 LASSERT(nkiov > 0);
556         }
557
558         LASSERT(niov > 0);
559         while (iovoffset >= iov->iov_len) {
560                 iovoffset -= iov->iov_len;
561                 iov++;
562                 niov--;
563                 LASSERT(niov > 0);
564         }
565
566         do {
567                 LASSERT(nkiov > 0);
568                 LASSERT(niov > 0);
569                 this_nob = min3((unsigned int)kiov->bv_len - kiovoffset,
570                                 (unsigned int)iov->iov_len - iovoffset,
571                                 nob);
572
573                 if (addr == NULL)
574                         addr = ((char *)kmap(kiov->bv_page)) +
575                                 kiov->bv_offset + kiovoffset;
576
577                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
578                 nob -= this_nob;
579
580                 if (kiov->bv_len > kiovoffset + this_nob) {
581                         addr += this_nob;
582                         kiovoffset += this_nob;
583                 } else {
584                         kunmap(kiov->bv_page);
585                         addr = NULL;
586                         kiov++;
587                         nkiov--;
588                         kiovoffset = 0;
589                 }
590
591                 if (iov->iov_len > iovoffset + this_nob) {
592                         iovoffset += this_nob;
593                 } else {
594                         iov++;
595                         niov--;
596                         iovoffset = 0;
597                 }
598         } while (nob > 0);
599
600         if (addr != NULL)
601                 kunmap(kiov->bv_page);
602 }
603 EXPORT_SYMBOL(lnet_copy_iov2kiov);
604
605 int
606 lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
607                   int src_niov, struct bio_vec *src,
608                   unsigned int offset, unsigned int len)
609 {
610         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
611          * for exactly 'len' bytes, and return the number of entries.
612          * NB not destructive to 'src' */
613         unsigned int    frag_len;
614         unsigned int    niov;
615
616         if (len == 0)                           /* no data => */
617                 return (0);                     /* no frags */
618
619         LASSERT(src_niov > 0);
620         while (offset >= src->bv_len) {      /* skip initial frags */
621                 offset -= src->bv_len;
622                 src_niov--;
623                 src++;
624                 LASSERT(src_niov > 0);
625         }
626
627         niov = 1;
628         for (;;) {
629                 LASSERT(src_niov > 0);
630                 LASSERT((int)niov <= dst_niov);
631
632                 frag_len = src->bv_len - offset;
633                 dst->bv_page = src->bv_page;
634                 dst->bv_offset = src->bv_offset + offset;
635
636                 if (len <= frag_len) {
637                         dst->bv_len = len;
638                         LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
639                         return niov;
640                 }
641
642                 dst->bv_len = frag_len;
643                 LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
644
645                 len -= frag_len;
646                 dst++;
647                 src++;
648                 niov++;
649                 src_niov--;
650                 offset = 0;
651         }
652 }
653 EXPORT_SYMBOL(lnet_extract_kiov);
654
655 void
656 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
657              int delayed, unsigned int offset, unsigned int mlen,
658              unsigned int rlen)
659 {
660         unsigned int niov = 0;
661         struct kvec *iov = NULL;
662         struct bio_vec  *kiov = NULL;
663         int rc;
664
665         LASSERT (!in_interrupt ());
666         LASSERT (mlen == 0 || msg != NULL);
667
668         if (msg != NULL) {
669                 LASSERT(msg->msg_receiving);
670                 LASSERT(!msg->msg_sending);
671                 LASSERT(rlen == msg->msg_len);
672                 LASSERT(mlen <= msg->msg_len);
673                 LASSERT(msg->msg_offset == offset);
674                 LASSERT(msg->msg_wanted == mlen);
675
676                 msg->msg_receiving = 0;
677
678                 if (mlen != 0) {
679                         niov = msg->msg_niov;
680                         kiov = msg->msg_kiov;
681
682                         LASSERT (niov > 0);
683                         LASSERT ((iov == NULL) != (kiov == NULL));
684                 }
685         }
686
687         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
688                                              niov, kiov, offset, mlen,
689                                              rlen);
690         if (rc < 0)
691                 lnet_finalize(msg, rc);
692 }
693
694 static void
695 lnet_setpayloadbuffer(struct lnet_msg *msg)
696 {
697         struct lnet_libmd *md = msg->msg_md;
698
699         LASSERT(msg->msg_len > 0);
700         LASSERT(!msg->msg_routing);
701         LASSERT(md != NULL);
702         LASSERT(msg->msg_niov == 0);
703         LASSERT(msg->msg_kiov == NULL);
704
705         msg->msg_niov = md->md_niov;
706         msg->msg_kiov = md->md_kiov;
707 }
708
709 void
710 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_processid *target,
711                unsigned int offset, unsigned int len)
712 {
713         msg->msg_type = type;
714         msg->msg_target = *target;
715         msg->msg_len = len;
716         msg->msg_offset = offset;
717
718         if (len != 0)
719                 lnet_setpayloadbuffer(msg);
720
721         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
722         msg->msg_hdr.type           = type;
723         /* dest_nid will be overwritten by lnet_select_pathway() */
724         msg->msg_hdr.dest_nid = target->nid;
725         msg->msg_hdr.dest_pid = target->pid;
726         /* src_nid will be set later */
727         msg->msg_hdr.src_pid        = the_lnet.ln_pid;
728         msg->msg_hdr.payload_length = len;
729 }
730
731 void
732 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
733 {
734         void *priv = msg->msg_private;
735         int rc;
736
737         LASSERT(!in_interrupt());
738         LASSERT(nid_is_lo0(&ni->ni_nid) ||
739                 (msg->msg_txcredit && msg->msg_peertxcredit));
740
741         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
742         if (rc < 0) {
743                 msg->msg_no_resend = true;
744                 lnet_finalize(msg, rc);
745         }
746 }
747
748 static int
749 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
750 {
751         int     rc;
752
753         LASSERT(!msg->msg_sending);
754         LASSERT(msg->msg_receiving);
755         LASSERT(!msg->msg_rx_ready_delay);
756         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
757
758         msg->msg_rx_ready_delay = 1;
759         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
760                                                   &msg->msg_private);
761         if (rc != 0) {
762                 CERROR("recv from %s / send to %s aborted: "
763                        "eager_recv failed %d\n",
764                        libcfs_nidstr(&msg->msg_rxpeer->lpni_nid),
765                        libcfs_idstr(&msg->msg_target), rc);
766                 LASSERT(rc < 0); /* required by my callers */
767         }
768
769         return rc;
770 }
771
772 /* returns true if this message should be dropped */
773 static bool
774 lnet_check_message_drop(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
775                         struct lnet_msg *msg)
776 {
777         if (msg->msg_target.pid & LNET_PID_USERFLAG)
778                 return false;
779
780         if (!lnet_peer_aliveness_enabled(lpni))
781                 return false;
782
783         /* If we're resending a message, let's attempt to send it even if
784          * the peer is down to fulfill our resend quota on the message
785          */
786         if (msg->msg_retry_count > 0)
787                 return false;
788
789         /* try and send recovery messages irregardless */
790         if (msg->msg_recovery)
791                 return false;
792
793         /* always send any responses */
794         if (lnet_msg_is_response(msg))
795                 return false;
796
797         /* always send non-routed messages */
798         if (!msg->msg_routing)
799                 return false;
800
801         /* assume peer_ni is alive as long as we're within the configured
802          * peer timeout
803          */
804         return ktime_get_seconds() >=
805                 (lpni->lpni_last_alive +
806                  lpni->lpni_net->net_tunables.lct_peer_timeout);
807 }
808
809 /**
810  * \param msg The message to be sent.
811  * \param do_send True if lnet_ni_send() should be called in this function.
812  *        lnet_send() is going to lnet_net_unlock immediately after this, so
813  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
814  *
815  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
816  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
817  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
818  * \retval -ECANCELED If the MD of the message has been unlinked.
819  */
820 static int
821 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
822 {
823         struct lnet_peer_ni     *lp = msg->msg_txpeer;
824         struct lnet_ni          *ni = msg->msg_txni;
825         int                     cpt = msg->msg_tx_cpt;
826         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
827
828         /* non-lnet_send() callers have checked before */
829         LASSERT(!do_send || msg->msg_tx_delayed);
830         LASSERT(!msg->msg_receiving);
831         LASSERT(msg->msg_tx_committed);
832
833         /* can't get here if we're sending to the loopback interface */
834         if (the_lnet.ln_loni)
835                 LASSERT(!nid_same(&lp->lpni_nid, &the_lnet.ln_loni->ni_nid));
836
837         /* NB 'lp' is always the next hop */
838         if (lnet_check_message_drop(ni, lp, msg)) {
839                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
840                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
841                         msg->msg_len;
842                 lnet_net_unlock(cpt);
843                 if (msg->msg_txpeer)
844                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
845                                         msg->msg_type,
846                                         LNET_STATS_TYPE_DROP);
847                 if (msg->msg_txni)
848                         lnet_incr_stats(&msg->msg_txni->ni_stats,
849                                         msg->msg_type,
850                                         LNET_STATS_TYPE_DROP);
851
852                 CNETERR("Dropping message for %s: peer not alive\n",
853                         libcfs_idstr(&msg->msg_target));
854                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
855                 if (do_send)
856                         lnet_finalize(msg, -EHOSTUNREACH);
857
858                 lnet_net_lock(cpt);
859                 return -EHOSTUNREACH;
860         }
861
862         if (msg->msg_md != NULL &&
863             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
864                 lnet_net_unlock(cpt);
865
866                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
867                         "called on the MD/ME.\n",
868                         libcfs_idstr(&msg->msg_target));
869                 if (do_send) {
870                         msg->msg_no_resend = true;
871                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
872                                msg, libcfs_idstr(&msg->msg_target));
873                         lnet_finalize(msg, -ECANCELED);
874                 }
875
876                 lnet_net_lock(cpt);
877                 return -ECANCELED;
878         }
879
880         if (!msg->msg_peertxcredit) {
881                 spin_lock(&lp->lpni_lock);
882                 LASSERT((lp->lpni_txcredits < 0) ==
883                         !list_empty(&lp->lpni_txq));
884
885                 msg->msg_peertxcredit = 1;
886                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr_nid4);
887                 lp->lpni_txcredits--;
888
889                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
890                         lp->lpni_mintxcredits = lp->lpni_txcredits;
891
892                 if (lp->lpni_txcredits < 0) {
893                         msg->msg_tx_delayed = 1;
894                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
895                         spin_unlock(&lp->lpni_lock);
896                         return LNET_CREDIT_WAIT;
897                 }
898                 spin_unlock(&lp->lpni_lock);
899         }
900
901         if (!msg->msg_txcredit) {
902                 LASSERT((tq->tq_credits < 0) ==
903                         !list_empty(&tq->tq_delayed));
904
905                 msg->msg_txcredit = 1;
906                 tq->tq_credits--;
907                 atomic_dec(&ni->ni_tx_credits);
908
909                 if (tq->tq_credits < tq->tq_credits_min)
910                         tq->tq_credits_min = tq->tq_credits;
911
912                 if (tq->tq_credits < 0) {
913                         msg->msg_tx_delayed = 1;
914                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
915                         return LNET_CREDIT_WAIT;
916                 }
917         }
918
919         if (unlikely(!list_empty(&the_lnet.ln_delay_rules)) &&
920             lnet_delay_rule_match_locked(&msg->msg_hdr, msg)) {
921                 msg->msg_tx_delayed = 1;
922                 return LNET_CREDIT_WAIT;
923         }
924
925         /* unset the tx_delay flag as we're going to send it now */
926         msg->msg_tx_delayed = 0;
927
928         if (do_send) {
929                 lnet_net_unlock(cpt);
930                 lnet_ni_send(ni, msg);
931                 lnet_net_lock(cpt);
932         }
933         return LNET_CREDIT_OK;
934 }
935
936
937 static struct lnet_rtrbufpool *
938 lnet_msg2bufpool(struct lnet_msg *msg)
939 {
940         struct lnet_rtrbufpool  *rbp;
941         int                     cpt;
942
943         LASSERT(msg->msg_rx_committed);
944
945         cpt = msg->msg_rx_cpt;
946         rbp = &the_lnet.ln_rtrpools[cpt][0];
947
948         LASSERT(msg->msg_len <= LNET_MTU);
949         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
950                 rbp++;
951                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
952         }
953
954         return rbp;
955 }
956
957 static int
958 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
959 {
960         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
961          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
962          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
963          * received or OK to receive */
964         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
965         struct lnet_peer *lp;
966         struct lnet_rtrbufpool *rbp;
967         struct lnet_rtrbuf *rb;
968
969         LASSERT(msg->msg_kiov == NULL);
970         LASSERT(msg->msg_niov == 0);
971         LASSERT(msg->msg_routing);
972         LASSERT(msg->msg_receiving);
973         LASSERT(!msg->msg_sending);
974         LASSERT(lpni->lpni_peer_net);
975         LASSERT(lpni->lpni_peer_net->lpn_peer);
976
977         lp = lpni->lpni_peer_net->lpn_peer;
978
979         /* non-lnet_parse callers only receive delayed messages */
980         LASSERT(!do_recv || msg->msg_rx_delayed);
981
982         if (!msg->msg_peerrtrcredit) {
983                 /* lpni_lock protects the credit manipulation */
984                 spin_lock(&lpni->lpni_lock);
985
986                 msg->msg_peerrtrcredit = 1;
987                 lpni->lpni_rtrcredits--;
988                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
989                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
990
991                 if (lpni->lpni_rtrcredits < 0) {
992                         spin_unlock(&lpni->lpni_lock);
993                         /* must have checked eager_recv before here */
994                         LASSERT(msg->msg_rx_ready_delay);
995                         msg->msg_rx_delayed = 1;
996                         /* lp_lock protects the lp_rtrq */
997                         spin_lock(&lp->lp_lock);
998                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
999                         spin_unlock(&lp->lp_lock);
1000                         return LNET_CREDIT_WAIT;
1001                 }
1002                 spin_unlock(&lpni->lpni_lock);
1003         }
1004
1005         rbp = lnet_msg2bufpool(msg);
1006
1007         if (!msg->msg_rtrcredit) {
1008                 msg->msg_rtrcredit = 1;
1009                 rbp->rbp_credits--;
1010                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1011                         rbp->rbp_mincredits = rbp->rbp_credits;
1012
1013                 if (rbp->rbp_credits < 0) {
1014                         /* must have checked eager_recv before here */
1015                         LASSERT(msg->msg_rx_ready_delay);
1016                         msg->msg_rx_delayed = 1;
1017                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1018                         return LNET_CREDIT_WAIT;
1019                 }
1020         }
1021
1022         LASSERT(!list_empty(&rbp->rbp_bufs));
1023         rb = list_first_entry(&rbp->rbp_bufs, struct lnet_rtrbuf, rb_list);
1024         list_del(&rb->rb_list);
1025
1026         msg->msg_niov = rbp->rbp_npages;
1027         msg->msg_kiov = &rb->rb_kiov[0];
1028
1029         /* unset the msg-rx_delayed flag since we're receiving the message */
1030         msg->msg_rx_delayed = 0;
1031
1032         if (do_recv) {
1033                 int cpt = msg->msg_rx_cpt;
1034
1035                 lnet_net_unlock(cpt);
1036                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1037                              0, msg->msg_len, msg->msg_len);
1038                 lnet_net_lock(cpt);
1039         }
1040         return LNET_CREDIT_OK;
1041 }
1042
1043 void
1044 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1045 {
1046         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1047         struct lnet_ni          *txni = msg->msg_txni;
1048         struct lnet_msg         *msg2;
1049
1050         if (msg->msg_txcredit) {
1051                 struct lnet_ni       *ni = msg->msg_txni;
1052                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1053
1054                 /* give back NI txcredits */
1055                 msg->msg_txcredit = 0;
1056
1057                 LASSERT((tq->tq_credits < 0) ==
1058                         !list_empty(&tq->tq_delayed));
1059
1060                 tq->tq_credits++;
1061                 atomic_inc(&ni->ni_tx_credits);
1062                 if (tq->tq_credits <= 0) {
1063                         msg2 = list_first_entry(&tq->tq_delayed,
1064                                                 struct lnet_msg, msg_list);
1065                         list_del(&msg2->msg_list);
1066
1067                         LASSERT(msg2->msg_txni == ni);
1068                         LASSERT(msg2->msg_tx_delayed);
1069                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1070
1071                         (void) lnet_post_send_locked(msg2, 1);
1072                 }
1073         }
1074
1075         if (msg->msg_peertxcredit) {
1076                 /* give back peer txcredits */
1077                 msg->msg_peertxcredit = 0;
1078
1079                 spin_lock(&txpeer->lpni_lock);
1080                 LASSERT((txpeer->lpni_txcredits < 0) ==
1081                         !list_empty(&txpeer->lpni_txq));
1082
1083                 txpeer->lpni_txqnob -=  msg->msg_len +
1084                                         sizeof(struct lnet_hdr_nid4);
1085                 LASSERT(txpeer->lpni_txqnob >= 0);
1086
1087                 txpeer->lpni_txcredits++;
1088                 if (txpeer->lpni_txcredits <= 0) {
1089                         int msg2_cpt;
1090
1091                         msg2 = list_first_entry(&txpeer->lpni_txq,
1092                                                 struct lnet_msg, msg_list);
1093                         list_del(&msg2->msg_list);
1094                         spin_unlock(&txpeer->lpni_lock);
1095
1096                         LASSERT(msg2->msg_txpeer == txpeer);
1097                         LASSERT(msg2->msg_tx_delayed);
1098
1099                         msg2_cpt = msg2->msg_tx_cpt;
1100
1101                         /*
1102                          * The msg_cpt can be different from the msg2_cpt
1103                          * so we need to make sure we lock the correct cpt
1104                          * for msg2.
1105                          * Once we call lnet_post_send_locked() it is no
1106                          * longer safe to access msg2, since it could've
1107                          * been freed by lnet_finalize(), but we still
1108                          * need to relock the correct cpt, so we cache the
1109                          * msg2_cpt for the purpose of the check that
1110                          * follows the call to lnet_pose_send_locked().
1111                          */
1112                         if (msg2_cpt != msg->msg_tx_cpt) {
1113                                 lnet_net_unlock(msg->msg_tx_cpt);
1114                                 lnet_net_lock(msg2_cpt);
1115                         }
1116                         (void) lnet_post_send_locked(msg2, 1);
1117                         if (msg2_cpt != msg->msg_tx_cpt) {
1118                                 lnet_net_unlock(msg2_cpt);
1119                                 lnet_net_lock(msg->msg_tx_cpt);
1120                         }
1121                 } else {
1122                         spin_unlock(&txpeer->lpni_lock);
1123                 }
1124         }
1125
1126         if (txni != NULL) {
1127                 msg->msg_txni = NULL;
1128                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1129         }
1130
1131         if (txpeer != NULL) {
1132                 msg->msg_txpeer = NULL;
1133                 lnet_peer_ni_decref_locked(txpeer);
1134         }
1135 }
1136
1137 void
1138 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1139 {
1140         struct lnet_msg *msg;
1141
1142         if (list_empty(&rbp->rbp_msgs))
1143                 return;
1144         msg = list_first_entry(&rbp->rbp_msgs,
1145                                struct lnet_msg, msg_list);
1146         list_del(&msg->msg_list);
1147
1148         (void)lnet_post_routed_recv_locked(msg, 1);
1149 }
1150
1151 void
1152 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1153 {
1154         struct lnet_msg *msg;
1155         struct lnet_msg *tmp;
1156
1157         lnet_net_unlock(cpt);
1158
1159         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1160                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1161                              0, 0, 0, msg->msg_hdr.payload_length);
1162                 list_del_init(&msg->msg_list);
1163                 msg->msg_no_resend = true;
1164                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1165                 lnet_finalize(msg, -ECANCELED);
1166         }
1167
1168         lnet_net_lock(cpt);
1169 }
1170
1171 void
1172 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1173 {
1174         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1175         struct lnet_peer *lp;
1176         struct lnet_ni *rxni = msg->msg_rxni;
1177         struct lnet_msg *msg2;
1178
1179         if (msg->msg_rtrcredit) {
1180                 /* give back global router credits */
1181                 struct lnet_rtrbuf *rb;
1182                 struct lnet_rtrbufpool *rbp;
1183
1184                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1185                  * there until it gets one allocated, or aborts the wait
1186                  * itself */
1187                 LASSERT(msg->msg_kiov != NULL);
1188
1189                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1190                 rbp = rb->rb_pool;
1191
1192                 msg->msg_kiov = NULL;
1193                 msg->msg_rtrcredit = 0;
1194
1195                 LASSERT(rbp == lnet_msg2bufpool(msg));
1196
1197                 LASSERT((rbp->rbp_credits > 0) ==
1198                         !list_empty(&rbp->rbp_bufs));
1199
1200                 /* If routing is now turned off, we just drop this buffer and
1201                  * don't bother trying to return credits.  */
1202                 if (!the_lnet.ln_routing) {
1203                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1204                         goto routing_off;
1205                 }
1206
1207                 /* It is possible that a user has lowered the desired number of
1208                  * buffers in this pool.  Make sure we never put back
1209                  * more buffers than the stated number. */
1210                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1211                         /* Discard this buffer so we don't have too
1212                          * many. */
1213                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1214                         rbp->rbp_nbuffers--;
1215                 } else {
1216                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1217                         rbp->rbp_credits++;
1218                         if (rbp->rbp_credits <= 0)
1219                                 lnet_schedule_blocked_locked(rbp);
1220                 }
1221         }
1222
1223 routing_off:
1224         if (msg->msg_peerrtrcredit) {
1225                 LASSERT(rxpeerni);
1226                 LASSERT(rxpeerni->lpni_peer_net);
1227                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1228
1229                 /* give back peer router credits */
1230                 msg->msg_peerrtrcredit = 0;
1231
1232                 spin_lock(&rxpeerni->lpni_lock);
1233                 rxpeerni->lpni_rtrcredits++;
1234                 spin_unlock(&rxpeerni->lpni_lock);
1235
1236                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1237                 spin_lock(&lp->lp_lock);
1238
1239                 /* drop all messages which are queued to be routed on that
1240                  * peer. */
1241                 if (!the_lnet.ln_routing) {
1242                         LIST_HEAD(drop);
1243                         list_splice_init(&lp->lp_rtrq, &drop);
1244                         spin_unlock(&lp->lp_lock);
1245                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1246                 } else if (!list_empty(&lp->lp_rtrq)) {
1247                         int msg2_cpt;
1248
1249                         msg2 = list_first_entry(&lp->lp_rtrq,
1250                                                 struct lnet_msg, msg_list);
1251                         list_del(&msg2->msg_list);
1252                         msg2_cpt = msg2->msg_rx_cpt;
1253                         spin_unlock(&lp->lp_lock);
1254                         /*
1255                          * messages on the lp_rtrq can be from any NID in
1256                          * the peer, which means they might have different
1257                          * cpts. We need to make sure we lock the right
1258                          * one.
1259                          */
1260                         if (msg2_cpt != msg->msg_rx_cpt) {
1261                                 lnet_net_unlock(msg->msg_rx_cpt);
1262                                 lnet_net_lock(msg2_cpt);
1263                         }
1264                         (void) lnet_post_routed_recv_locked(msg2, 1);
1265                         if (msg2_cpt != msg->msg_rx_cpt) {
1266                                 lnet_net_unlock(msg2_cpt);
1267                                 lnet_net_lock(msg->msg_rx_cpt);
1268                         }
1269                 } else {
1270                         spin_unlock(&lp->lp_lock);
1271                 }
1272         }
1273         if (rxni != NULL) {
1274                 msg->msg_rxni = NULL;
1275                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1276         }
1277         if (rxpeerni != NULL) {
1278                 msg->msg_rxpeer = NULL;
1279                 lnet_peer_ni_decref_locked(rxpeerni);
1280         }
1281 }
1282
1283 static struct lnet_peer_ni *
1284 lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid,
1285                     struct lnet_peer *peer,
1286                     struct lnet_peer_ni *best_lpni,
1287                     struct lnet_peer_net *peer_net)
1288 {
1289         /*
1290          * Look at the peer NIs for the destination peer that connect
1291          * to the chosen net. If a peer_ni is preferred when using the
1292          * best_ni to communicate, we use that one. If there is no
1293          * preferred peer_ni, or there are multiple preferred peer_ni,
1294          * the available transmit credits are used. If the transmit
1295          * credits are equal, we round-robin over the peer_ni.
1296          */
1297         struct lnet_peer_ni *lpni = NULL;
1298         int best_lpni_credits = (best_lpni) ? best_lpni->lpni_txcredits :
1299                 INT_MIN;
1300         int best_lpni_healthv = (best_lpni) ?
1301                 atomic_read(&best_lpni->lpni_healthv) : 0;
1302         bool best_lpni_is_preferred = false;
1303         bool lpni_is_preferred;
1304         int lpni_healthv;
1305         __u32 lpni_sel_prio;
1306         __u32 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1307
1308         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1309                 /*
1310                  * if the best_ni we've chosen aleady has this lpni
1311                  * preferred, then let's use it
1312                  */
1313                 if (best_ni) {
1314                         lpni_is_preferred = lnet_peer_is_pref_nid_locked(
1315                                 lpni, &best_ni->ni_nid);
1316                         CDEBUG(D_NET, "%s lpni_is_preferred = %d\n",
1317                                libcfs_nidstr(&best_ni->ni_nid),
1318                                lpni_is_preferred);
1319                 } else {
1320                         lpni_is_preferred = false;
1321                 }
1322
1323                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1324                 lpni_sel_prio = lpni->lpni_sel_priority;
1325
1326                 if (best_lpni)
1327                         CDEBUG(D_NET, "n:[%s, %s] h:[%d, %d] p:[%d, %d] c:[%d, %d] s:[%d, %d]\n",
1328                                 libcfs_nidstr(&lpni->lpni_nid),
1329                                 libcfs_nidstr(&best_lpni->lpni_nid),
1330                                 lpni_healthv, best_lpni_healthv,
1331                                 lpni_sel_prio, best_sel_prio,
1332                                 lpni->lpni_txcredits, best_lpni_credits,
1333                                 lpni->lpni_seq, best_lpni->lpni_seq);
1334                 else
1335                         goto select_lpni;
1336
1337                 /* pick the healthiest peer ni */
1338                 if (lpni_healthv < best_lpni_healthv)
1339                         continue;
1340                 else if (lpni_healthv > best_lpni_healthv) {
1341                         if (best_lpni_is_preferred)
1342                                 best_lpni_is_preferred = false;
1343                         goto select_lpni;
1344                 }
1345
1346                 if (lpni_sel_prio > best_sel_prio)
1347                         continue;
1348                 else if (lpni_sel_prio < best_sel_prio) {
1349                         if (best_lpni_is_preferred)
1350                                 best_lpni_is_preferred = false;
1351                         goto select_lpni;
1352                 }
1353
1354                 /* if this is a preferred peer use it */
1355                 if (!best_lpni_is_preferred && lpni_is_preferred) {
1356                         best_lpni_is_preferred = true;
1357                         goto select_lpni;
1358                 } else if (best_lpni_is_preferred && !lpni_is_preferred) {
1359                         /* this is not the preferred peer so let's ignore
1360                          * it.
1361                          */
1362                         continue;
1363                 }
1364
1365                 if (lpni->lpni_txcredits < best_lpni_credits)
1366                         /* We already have a peer that has more credits
1367                          * available than this one. No need to consider
1368                          * this peer further.
1369                          */
1370                         continue;
1371                 else if (lpni->lpni_txcredits > best_lpni_credits)
1372                         goto select_lpni;
1373
1374                 /* The best peer found so far and the current peer
1375                  * have the same number of available credits let's
1376                  * make sure to select between them using Round Robin
1377                  */
1378                 if (best_lpni && (best_lpni->lpni_seq <= lpni->lpni_seq))
1379                         continue;
1380 select_lpni:
1381                 best_lpni_is_preferred = lpni_is_preferred;
1382                 best_lpni_healthv = lpni_healthv;
1383                 best_sel_prio = lpni_sel_prio;
1384                 best_lpni = lpni;
1385                 best_lpni_credits = lpni->lpni_txcredits;
1386         }
1387
1388         /* if we still can't find a peer ni then we can't reach it */
1389         if (!best_lpni) {
1390                 __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1391                         LNET_NIDNET(dst_nid);
1392                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1393                                 libcfs_net2str(net_id));
1394                 return NULL;
1395         }
1396
1397         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1398                libcfs_nidstr(&best_lpni->lpni_nid));
1399
1400         return best_lpni;
1401 }
1402
1403 /*
1404  * Prerequisite: the best_ni should already be set in the sd
1405  * Find the best lpni.
1406  * If the net id is provided then restrict lpni selection on
1407  * that particular net.
1408  * Otherwise find any reachable lpni. When dealing with an MR
1409  * gateway and it has multiple lpnis which we can use
1410  * we want to select the best one from the list of reachable
1411  * ones.
1412  */
1413 static inline struct lnet_peer_ni *
1414 lnet_find_best_lpni(struct lnet_ni *lni, lnet_nid_t dst_nid,
1415                     struct lnet_peer *peer, __u32 net_id)
1416 {
1417         struct lnet_peer_net *peer_net;
1418
1419         /* find the best_lpni on any local network */
1420         if (net_id == LNET_NET_ANY) {
1421                 struct lnet_peer_ni *best_lpni = NULL;
1422                 struct lnet_peer_net *lpn;
1423                 list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
1424                         /* no net specified find any reachable peer ni */
1425                         if (!lnet_islocalnet_locked(lpn->lpn_net_id))
1426                                 continue;
1427                         best_lpni = lnet_select_peer_ni(lni, dst_nid, peer,
1428                                                         best_lpni, lpn);
1429                 }
1430
1431                 return best_lpni;
1432         }
1433         /* restrict on the specified net */
1434         peer_net = lnet_peer_get_net_locked(peer, net_id);
1435         if (peer_net)
1436                 return lnet_select_peer_ni(lni, dst_nid, peer, NULL, peer_net);
1437
1438         return NULL;
1439 }
1440
1441 static int
1442 lnet_compare_gw_lpnis(struct lnet_peer_ni *lpni1, struct lnet_peer_ni *lpni2)
1443 {
1444         if (lpni1->lpni_txqnob < lpni2->lpni_txqnob)
1445                 return 1;
1446
1447         if (lpni1->lpni_txqnob > lpni2->lpni_txqnob)
1448                 return -1;
1449
1450         if (lpni1->lpni_txcredits > lpni2->lpni_txcredits)
1451                 return 1;
1452
1453         if (lpni1->lpni_txcredits < lpni2->lpni_txcredits)
1454                 return -1;
1455
1456         return 0;
1457 }
1458
1459 /* Compare route priorities and hop counts */
1460 static int
1461 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
1462 {
1463         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1464         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1465
1466         if (r1->lr_priority < r2->lr_priority)
1467                 return 1;
1468
1469         if (r1->lr_priority > r2->lr_priority)
1470                 return -1;
1471
1472         if (r1_hops < r2_hops)
1473                 return 1;
1474
1475         if (r1_hops > r2_hops)
1476                 return -1;
1477
1478         return 0;
1479 }
1480
1481 static struct lnet_route *
1482 lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net,
1483                        struct lnet_peer_ni *remote_lpni,
1484                        struct lnet_route **prev_route,
1485                        struct lnet_peer_ni **gwni)
1486 {
1487         struct lnet_peer_ni *lpni, *best_gw_ni = NULL;
1488         struct lnet_route *best_route;
1489         struct lnet_route *last_route;
1490         struct lnet_route *route;
1491         int rc;
1492         bool best_rte_is_preferred = false;
1493         struct lnet_nid *gw_pnid;
1494
1495         CDEBUG(D_NET, "Looking up a route to %s, from %s\n",
1496                libcfs_net2str(rnet->lrn_net), libcfs_net2str(src_net));
1497
1498         best_route = last_route = NULL;
1499         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1500                 if (!lnet_is_route_alive(route))
1501                         continue;
1502                 gw_pnid = &route->lr_gateway->lp_primary_nid;
1503
1504                 /* no protection on below fields, but it's harmless */
1505                 if (last_route && (last_route->lr_seq - route->lr_seq < 0))
1506                         last_route = route;
1507
1508                 /* if the best route found is in the preferred list then
1509                  * tag it as preferred and use it later on. But if we
1510                  * didn't find any routes which are on the preferred list
1511                  * then just use the best route possible.
1512                  */
1513                 rc = lnet_peer_is_pref_rtr_locked(remote_lpni, gw_pnid);
1514
1515                 if (!best_route || (rc && !best_rte_is_preferred)) {
1516                         /* Restrict the selection of the router NI on the
1517                          * src_net provided. If the src_net is LNET_NID_ANY,
1518                          * then select the best interface available.
1519                          */
1520                         lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1521                                                    route->lr_gateway,
1522                                                    src_net);
1523                         if (!lpni) {
1524                                 CDEBUG(D_NET,
1525                                        "Gateway %s does not have a peer NI on net %s\n",
1526                                        libcfs_nidstr(gw_pnid),
1527                                        libcfs_net2str(src_net));
1528                                 continue;
1529                         }
1530                 }
1531
1532                 if (rc && !best_rte_is_preferred) {
1533                         /* This is the first preferred route we found,
1534                          * so it beats any route found previously
1535                          */
1536                         best_route = route;
1537                         if (!last_route)
1538                                 last_route = route;
1539                         best_gw_ni = lpni;
1540                         best_rte_is_preferred = true;
1541                         CDEBUG(D_NET, "preferred gw = %s\n",
1542                                libcfs_nidstr(gw_pnid));
1543                         continue;
1544                 } else if ((!rc) && best_rte_is_preferred)
1545                         /* The best route we found so far is in the preferred
1546                          * list, so it beats any non-preferred route
1547                          */
1548                         continue;
1549
1550                 if (!best_route) {
1551                         best_route = last_route = route;
1552                         best_gw_ni = lpni;
1553                         continue;
1554                 }
1555
1556                 rc = lnet_compare_routes(route, best_route);
1557                 if (rc == -1)
1558                         continue;
1559
1560                 /* Restrict the selection of the router NI on the
1561                  * src_net provided. If the src_net is LNET_NID_ANY,
1562                  * then select the best interface available.
1563                  */
1564                 lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1565                                            route->lr_gateway,
1566                                            src_net);
1567                 if (!lpni) {
1568                         CDEBUG(D_NET,
1569                                "Gateway %s does not have a peer NI on net %s\n",
1570                                libcfs_nidstr(gw_pnid),
1571                                libcfs_net2str(src_net));
1572                         continue;
1573                 }
1574
1575                 if (rc == 1) {
1576                         best_route = route;
1577                         best_gw_ni = lpni;
1578                         continue;
1579                 }
1580
1581                 rc = lnet_compare_gw_lpnis(lpni, best_gw_ni);
1582                 if (rc == -1)
1583                         continue;
1584
1585                 if (rc == 1 || route->lr_seq <= best_route->lr_seq) {
1586                         best_route = route;
1587                         best_gw_ni = lpni;
1588                         continue;
1589                 }
1590         }
1591
1592         *prev_route = last_route;
1593         *gwni = best_gw_ni;
1594
1595         return best_route;
1596 }
1597
1598 static inline unsigned int
1599 lnet_dev_prio_of_md(struct lnet_ni *ni, unsigned int dev_idx)
1600 {
1601         if (dev_idx == UINT_MAX)
1602                 return UINT_MAX;
1603
1604         if (!ni || !ni->ni_net || !ni->ni_net->net_lnd ||
1605             !ni->ni_net->net_lnd->lnd_get_dev_prio)
1606                 return UINT_MAX;
1607
1608         return ni->ni_net->net_lnd->lnd_get_dev_prio(ni, dev_idx);
1609 }
1610
1611 static struct lnet_ni *
1612 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1613                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1614                  struct lnet_msg *msg, int md_cpt)
1615 {
1616         struct lnet_libmd *md = msg->msg_md;
1617         unsigned int offset = msg->msg_offset;
1618         unsigned int shortest_distance;
1619         struct lnet_ni *ni = NULL;
1620         int best_credits;
1621         int best_healthv;
1622         __u32 best_sel_prio;
1623         unsigned int best_dev_prio;
1624         int best_ni_fatal;
1625         unsigned int dev_idx = UINT_MAX;
1626         bool gpu = md ? (md->md_flags & LNET_MD_FLAG_GPU) : false;
1627
1628         if (gpu) {
1629                 struct page *page = lnet_get_first_page(md, offset);
1630
1631                 dev_idx = lnet_get_dev_idx(page);
1632         }
1633
1634         /*
1635          * If there is no peer_ni that we can send to on this network,
1636          * then there is no point in looking for a new best_ni here.
1637         */
1638         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1639                 return best_ni;
1640
1641         if (best_ni == NULL) {
1642                 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1643                 shortest_distance = UINT_MAX;
1644                 best_dev_prio = UINT_MAX;
1645                 best_credits = INT_MIN;
1646                 best_healthv = 0;
1647                 best_ni_fatal = true;
1648         } else {
1649                 best_dev_prio = lnet_dev_prio_of_md(best_ni, dev_idx);
1650                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1651                                                      best_ni->ni_dev_cpt);
1652                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1653                 best_healthv = atomic_read(&best_ni->ni_healthv);
1654                 best_sel_prio = best_ni->ni_sel_priority;
1655                 best_ni_fatal = atomic_read(&best_ni->ni_fatal_error_on);
1656         }
1657
1658         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1659                 unsigned int distance;
1660                 int ni_credits;
1661                 int ni_healthv;
1662                 int ni_fatal;
1663                 __u32 ni_sel_prio;
1664                 unsigned int ni_dev_prio;
1665
1666                 ni_credits = atomic_read(&ni->ni_tx_credits);
1667                 ni_healthv = atomic_read(&ni->ni_healthv);
1668                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1669                 ni_sel_prio = ni->ni_sel_priority;
1670
1671                 /*
1672                  * calculate the distance from the CPT on which
1673                  * the message memory is allocated to the CPT of
1674                  * the NI's physical device
1675                  */
1676                 distance = cfs_cpt_distance(lnet_cpt_table(),
1677                                             md_cpt,
1678                                             ni->ni_dev_cpt);
1679
1680                 ni_dev_prio = lnet_dev_prio_of_md(ni, dev_idx);
1681
1682                 /*
1683                  * All distances smaller than the NUMA range
1684                  * are treated equally.
1685                  */
1686                 if (!gpu && distance < lnet_numa_range)
1687                         distance = lnet_numa_range;
1688
1689                 /*
1690                  * Select on health, selection policy, direct dma prio,
1691                  * shorter distance, available credits, then round-robin.
1692                  */
1693                 if (best_ni)
1694                         CDEBUG(D_NET, "compare ni %s [f:%s, c:%d, d:%d, s:%d, p:%u, g:%u, h:%d] with best_ni %s [f:%s, c:%d, d:%d, s:%d, p:%u, g:%u, h:%d]\n",
1695                                libcfs_nidstr(&ni->ni_nid),
1696                                ni_fatal ? "y" : "n", ni_credits, distance,
1697                                ni->ni_seq, ni_sel_prio, ni_dev_prio, ni_healthv,
1698                                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid)
1699                                : "not selected",
1700                                best_ni_fatal ? "y" : "n", best_credits,
1701                                shortest_distance,
1702                                (best_ni) ? best_ni->ni_seq : 0,
1703                                best_sel_prio, best_dev_prio, best_healthv);
1704                 else
1705                         goto select_ni;
1706
1707                 if (ni_fatal && !best_ni_fatal)
1708                         continue;
1709                 else if (!ni_fatal && best_ni_fatal)
1710                         goto select_ni;
1711
1712                 if (ni_healthv < best_healthv)
1713                         continue;
1714                 else if (ni_healthv > best_healthv)
1715                         goto select_ni;
1716
1717                 if (ni_sel_prio > best_sel_prio)
1718                         continue;
1719                 else if (ni_sel_prio < best_sel_prio)
1720                         goto select_ni;
1721
1722                 if (ni_dev_prio > best_dev_prio)
1723                         continue;
1724                 else if (ni_dev_prio < best_dev_prio)
1725                         goto select_ni;
1726
1727                 if (distance > shortest_distance)
1728                         continue;
1729                 else if (distance < shortest_distance)
1730                         goto select_ni;
1731
1732                 if (ni_credits < best_credits)
1733                         continue;
1734                 else if (ni_credits > best_credits)
1735                         goto select_ni;
1736
1737                 if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1738                         continue;
1739
1740 select_ni:
1741                 best_sel_prio = ni_sel_prio;
1742                 best_dev_prio = ni_dev_prio;
1743                 shortest_distance = distance;
1744                 best_healthv = ni_healthv;
1745                 best_ni = ni;
1746                 best_credits = ni_credits;
1747                 best_ni_fatal = ni_fatal;
1748         }
1749
1750         CDEBUG(D_NET, "selected best_ni %s\n",
1751                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid) : "no selection");
1752
1753         return best_ni;
1754 }
1755
1756 static bool
1757 lnet_reserved_msg(struct lnet_msg *msg)
1758 {
1759         if (msg->msg_type == LNET_MSG_PUT) {
1760                 if (msg->msg_hdr.msg.put.ptl_index == LNET_RESERVED_PORTAL)
1761                         return true;
1762         } else if (msg->msg_type == LNET_MSG_GET) {
1763                 if (msg->msg_hdr.msg.get.ptl_index == LNET_RESERVED_PORTAL)
1764                         return true;
1765         }
1766         return false;
1767 }
1768
1769 /* Can the specified message trigger peer discovery?
1770  *
1771  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1772  * because such traffic is required to perform discovery. We therefore
1773  * exclude all GET and PUT on that portal. We also exclude all ACK and
1774  * REPLY traffic, but that is because the portal is not tracked in the
1775  * message structure for these message types. We could restrict this
1776  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1777  */
1778 static bool
1779 lnet_msg_discovery(struct lnet_msg *msg)
1780 {
1781         return !(lnet_reserved_msg(msg) || lnet_msg_is_response(msg));
1782 }
1783
1784 /* Is the specified message an LNet ping?
1785  */
1786 static bool
1787 lnet_msg_is_ping(struct lnet_msg *msg)
1788 {
1789         if (msg->msg_type == LNET_MSG_GET &&
1790             msg->msg_hdr.msg.get.ptl_index == LNET_RESERVED_PORTAL)
1791                 return true;
1792
1793         return false;
1794 }
1795
1796 #define SRC_SPEC        0x0001
1797 #define SRC_ANY         0x0002
1798 #define LOCAL_DST       0x0004
1799 #define REMOTE_DST      0x0008
1800 #define MR_DST          0x0010
1801 #define NMR_DST         0x0020
1802 #define SND_RESP        0x0040
1803
1804 /* The following to defines are used for return codes */
1805 #define REPEAT_SEND     0x1000
1806 #define PASS_THROUGH    0x2000
1807
1808 /* The different cases lnet_select pathway needs to handle */
1809 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1810 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1811 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1812 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1813 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1814 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1815 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1816 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1817
1818 static int
1819 lnet_handle_lo_send(struct lnet_send_data *sd)
1820 {
1821         struct lnet_msg *msg = sd->sd_msg;
1822         int cpt = sd->sd_cpt;
1823
1824         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1825                 return -ESHUTDOWN;
1826
1827         /* No send credit hassles with LOLND */
1828         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1829         msg->msg_hdr.dest_nid = the_lnet.ln_loni->ni_nid;
1830         if (!msg->msg_routing)
1831                 msg->msg_hdr.src_nid = the_lnet.ln_loni->ni_nid;
1832         msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
1833         lnet_msg_commit(msg, cpt);
1834         msg->msg_txni = the_lnet.ln_loni;
1835
1836         return LNET_CREDIT_OK;
1837 }
1838
1839 static int
1840 lnet_handle_send(struct lnet_send_data *sd)
1841 {
1842         struct lnet_ni *best_ni = sd->sd_best_ni;
1843         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1844         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1845         struct lnet_msg *msg = sd->sd_msg;
1846         int cpt2;
1847         __u32 send_case = sd->sd_send_case;
1848         int rc;
1849         __u32 routing = send_case & REMOTE_DST;
1850         struct lnet_rsp_tracker *rspt;
1851
1852         /* Increment sequence number of the selected peer, peer net,
1853          * local ni and local net so that we pick the next ones
1854          * in Round Robin.
1855          */
1856         best_lpni->lpni_peer_net->lpn_peer->lp_send_seq++;
1857         best_lpni->lpni_peer_net->lpn_seq =
1858                 best_lpni->lpni_peer_net->lpn_peer->lp_send_seq;
1859         best_lpni->lpni_seq = best_lpni->lpni_peer_net->lpn_seq;
1860         the_lnet.ln_net_seq++;
1861         best_ni->ni_net->net_seq = the_lnet.ln_net_seq;
1862         best_ni->ni_seq = best_ni->ni_net->net_seq;
1863
1864         CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n",
1865                libcfs_nidstr(&best_ni->ni_nid),
1866                best_ni->ni_seq, best_ni->ni_net->net_seq,
1867                atomic_read(&best_ni->ni_tx_credits),
1868                best_ni->ni_sel_priority,
1869                libcfs_nidstr(&best_lpni->lpni_nid),
1870                best_lpni->lpni_seq, best_lpni->lpni_peer_net->lpn_seq,
1871                best_lpni->lpni_txcredits,
1872                best_lpni->lpni_sel_priority);
1873
1874         /*
1875          * grab a reference on the peer_ni so it sticks around even if
1876          * we need to drop and relock the lnet_net_lock below.
1877          */
1878         lnet_peer_ni_addref_locked(best_lpni);
1879
1880         /*
1881          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1882          * message. This ensures that we get a CPT that is correct for
1883          * the NI when the NI has been restricted to a subset of all CPTs.
1884          * If the selected CPT differs from the one currently locked, we
1885          * must unlock and relock the lnet_net_lock(), and then check whether
1886          * the configuration has changed. We don't have a hold on the best_ni
1887          * yet, and it may have vanished.
1888          */
1889         cpt2 = lnet_cpt_of_nid_locked(&best_lpni->lpni_nid, best_ni);
1890         if (sd->sd_cpt != cpt2) {
1891                 __u32 seq = lnet_get_dlc_seq_locked();
1892                 lnet_net_unlock(sd->sd_cpt);
1893                 sd->sd_cpt = cpt2;
1894                 lnet_net_lock(sd->sd_cpt);
1895                 if (seq != lnet_get_dlc_seq_locked()) {
1896                         lnet_peer_ni_decref_locked(best_lpni);
1897                         return REPEAT_SEND;
1898                 }
1899         }
1900
1901         /*
1902          * store the best_lpni in the message right away to avoid having
1903          * to do the same operation under different conditions
1904          */
1905         msg->msg_txpeer = best_lpni;
1906         msg->msg_txni = best_ni;
1907
1908         /*
1909          * grab a reference for the best_ni since now it's in use in this
1910          * send. The reference will be dropped in lnet_finalize()
1911          */
1912         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1913
1914         /*
1915          * Always set the target.nid to the best peer picked. Either the
1916          * NID will be one of the peer NIDs selected, or the same NID as
1917          * what was originally set in the target or it will be the NID of
1918          * a router if this message should be routed
1919          */
1920         msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1921
1922         /*
1923          * lnet_msg_commit assigns the correct cpt to the message, which
1924          * is used to decrement the correct refcount on the ni when it's
1925          * time to return the credits
1926          */
1927         lnet_msg_commit(msg, sd->sd_cpt);
1928
1929         /*
1930          * If we are routing the message then we keep the src_nid that was
1931          * set by the originator. If we are not routing then we are the
1932          * originator and set it here.
1933          */
1934         if (!msg->msg_routing)
1935                 msg->msg_hdr.src_nid = msg->msg_txni->ni_nid;
1936
1937         if (routing) {
1938                 msg->msg_target_is_router = 1;
1939                 msg->msg_target.pid = LNET_PID_LUSTRE;
1940                 /*
1941                  * since we're routing we want to ensure that the
1942                  * msg_hdr.dest_nid is set to the final destination. When
1943                  * the router receives this message it knows how to route
1944                  * it.
1945                  *
1946                  * final_dst_lpni is set at the beginning of the
1947                  * lnet_select_pathway() function and is never changed.
1948                  * It's safe to use it here.
1949                  */
1950                 final_dst_lpni->lpni_peer_net->lpn_peer->lp_send_seq++;
1951                 final_dst_lpni->lpni_peer_net->lpn_seq =
1952                         final_dst_lpni->lpni_peer_net->lpn_peer->lp_send_seq;
1953                 final_dst_lpni->lpni_seq =
1954                         final_dst_lpni->lpni_peer_net->lpn_seq;
1955                 msg->msg_hdr.dest_nid = final_dst_lpni->lpni_nid;
1956         } else {
1957                 /*
1958                  * if we're not routing set the dest_nid to the best peer
1959                  * ni NID that we picked earlier in the algorithm.
1960                  */
1961                 msg->msg_hdr.dest_nid = msg->msg_txpeer->lpni_nid;
1962         }
1963
1964         /*
1965          * if we have response tracker block update it with the next hop
1966          * nid
1967          */
1968         if (msg->msg_md) {
1969                 rspt = msg->msg_md->md_rspt_ptr;
1970                 if (rspt) {
1971                         rspt->rspt_next_hop_nid =
1972                                 msg->msg_txpeer->lpni_nid;
1973                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1974                                libcfs_nidstr(&rspt->rspt_next_hop_nid));
1975                 }
1976         }
1977
1978         rc = lnet_post_send_locked(msg, 0);
1979
1980         if (!rc)
1981                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
1982                        libcfs_nidstr(&msg->msg_hdr.src_nid),
1983                        libcfs_nidstr(&msg->msg_txni->ni_nid),
1984                        libcfs_nidstr(&sd->sd_src_nid),
1985                        libcfs_nidstr(&msg->msg_hdr.dest_nid),
1986                        libcfs_nidstr(&sd->sd_dst_nid),
1987                        libcfs_nidstr(&msg->msg_txpeer->lpni_nid),
1988                        libcfs_nidstr(&sd->sd_rtr_nid),
1989                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
1990
1991         return rc;
1992 }
1993
1994 static inline void
1995 lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni,
1996                          struct lnet_msg *msg)
1997 {
1998         if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) &&
1999             !lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) {
2000                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
2001                        libcfs_nidstr(&lni->ni_nid),
2002                        libcfs_nidstr(&lpni->lpni_nid));
2003                 lnet_peer_ni_set_non_mr_pref_nid(lpni, &lni->ni_nid);
2004         }
2005 }
2006
2007 /*
2008  * Source Specified
2009  * Local Destination
2010  * non-mr peer
2011  *
2012  * use the source and destination NIDs as the pathway
2013  */
2014 static int
2015 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
2016 {
2017         /* the destination lpni is set before we get here. */
2018
2019         /* find local NI */
2020         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2021         if (!sd->sd_best_ni) {
2022                 CERROR("Can't send to %s: src %s is not a local nid\n",
2023                        libcfs_nidstr(&sd->sd_dst_nid),
2024                        libcfs_nidstr(&sd->sd_src_nid));
2025                 return -EINVAL;
2026         }
2027
2028         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2029
2030         return lnet_handle_send(sd);
2031 }
2032
2033 /*
2034  * Source Specified
2035  * Local Destination
2036  * MR Peer
2037  *
2038  * Don't run the selection algorithm on the peer NIs. By specifying the
2039  * local NID, we're also saying that we should always use the destination NID
2040  * provided. This handles the case where we should be using the same
2041  * destination NID for the all the messages which belong to the same RPC
2042  * request.
2043  */
2044 static int
2045 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
2046 {
2047         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2048         if (!sd->sd_best_ni) {
2049                 CERROR("Can't send to %s: src %s is not a local nid\n",
2050                        libcfs_nidstr(&sd->sd_dst_nid),
2051                        libcfs_nidstr(&sd->sd_src_nid));
2052                 return -EINVAL;
2053         }
2054
2055         if (sd->sd_best_lpni &&
2056             nid_same(&sd->sd_best_lpni->lpni_nid,
2057                       &the_lnet.ln_loni->ni_nid))
2058                 return lnet_handle_lo_send(sd);
2059         else if (sd->sd_best_lpni)
2060                 return lnet_handle_send(sd);
2061
2062         CERROR("can't send to %s. no NI on %s\n",
2063                libcfs_nidstr(&sd->sd_dst_nid),
2064                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
2065
2066         return -EHOSTUNREACH;
2067 }
2068
2069 struct lnet_ni *
2070 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
2071                               struct lnet_peer *peer,
2072                               struct lnet_peer_net *peer_net,
2073                               struct lnet_msg *msg,
2074                               int cpt)
2075 {
2076         struct lnet_net *local_net;
2077         struct lnet_ni *best_ni;
2078
2079         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
2080         if (!local_net)
2081                 return NULL;
2082
2083         /*
2084          * Iterate through the NIs in this local Net and select
2085          * the NI to send from. The selection is determined by
2086          * these 3 criterion in the following priority:
2087          *      1. NUMA
2088          *      2. NI available credits
2089          *      3. Round Robin
2090          */
2091         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
2092                                    peer, peer_net, msg, cpt);
2093
2094         return best_ni;
2095 }
2096
2097 static int
2098 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg,
2099                              int cpt)
2100 {
2101         struct lnet_peer *peer;
2102         struct lnet_peer_ni *new_lpni;
2103         int rc;
2104
2105         lnet_peer_ni_addref_locked(lpni);
2106
2107         peer = lpni->lpni_peer_net->lpn_peer;
2108
2109         if (lnet_peer_gw_discovery(peer)) {
2110                 lnet_peer_ni_decref_locked(lpni);
2111                 return 0;
2112         }
2113
2114         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
2115                 lnet_peer_ni_decref_locked(lpni);
2116                 return 0;
2117         }
2118
2119         rc = lnet_discover_peer_locked(lpni, cpt, false);
2120         if (rc) {
2121                 lnet_peer_ni_decref_locked(lpni);
2122                 return rc;
2123         }
2124
2125         new_lpni = lnet_peer_ni_find_locked(&lpni->lpni_nid);
2126         if (!new_lpni) {
2127                 lnet_peer_ni_decref_locked(lpni);
2128                 return -ENOENT;
2129         }
2130
2131         peer = new_lpni->lpni_peer_net->lpn_peer;
2132         spin_lock(&peer->lp_lock);
2133         if (lpni == new_lpni && lnet_peer_is_uptodate_locked(peer)) {
2134                 /* The peer NI did not change and the peer is up to date.
2135                  * Nothing more to do.
2136                  */
2137                 spin_unlock(&peer->lp_lock);
2138                 lnet_peer_ni_decref_locked(lpni);
2139                 lnet_peer_ni_decref_locked(new_lpni);
2140                 return 0;
2141         }
2142         spin_unlock(&peer->lp_lock);
2143
2144         /* Either the peer NI changed during discovery, or the peer isn't up
2145          * to date. In both cases we want to queue the message on the
2146          * (possibly new) peer's pending queue and queue the peer for discovery
2147          */
2148         msg->msg_sending = 0;
2149         msg->msg_txpeer = NULL;
2150         lnet_net_unlock(cpt);
2151         lnet_peer_queue_message(peer, msg);
2152         lnet_net_lock(cpt);
2153
2154         lnet_peer_ni_decref_locked(lpni);
2155         lnet_peer_ni_decref_locked(new_lpni);
2156
2157         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2158                msg, libcfs_nidstr(&peer->lp_primary_nid));
2159
2160         return LNET_DC_WAIT;
2161 }
2162
2163 static int
2164 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2165                              struct lnet_nid *dst_nid,
2166                              struct lnet_peer_ni **gw_lpni,
2167                              struct lnet_peer **gw_peer)
2168 {
2169         int rc;
2170         struct lnet_peer *gw;
2171         struct lnet_peer *lp;
2172         struct lnet_peer_net *lpn;
2173         struct lnet_peer_net *best_lpn = NULL;
2174         struct lnet_remotenet *rnet, *best_rnet = NULL;
2175         struct lnet_route *best_route = NULL;
2176         struct lnet_route *last_route = NULL;
2177         struct lnet_peer_ni *lpni = NULL;
2178         struct lnet_peer_ni *gwni = NULL;
2179         bool route_found = false;
2180         struct lnet_nid *src_nid =
2181                 !LNET_NID_IS_ANY(&sd->sd_src_nid) || !sd->sd_best_ni
2182                 ? &sd->sd_src_nid
2183                 : &sd->sd_best_ni->ni_nid;
2184         int best_lpn_healthv = 0;
2185         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2186
2187         CDEBUG(D_NET, "%s route (%s) from local NI %s to destination %s\n",
2188                LNET_NID_IS_ANY(&sd->sd_rtr_nid) ? "Lookup" : "Specified",
2189                libcfs_nidstr(&sd->sd_rtr_nid), libcfs_nidstr(src_nid),
2190                libcfs_nidstr(&sd->sd_dst_nid));
2191
2192         /* If a router nid was specified then we are replying to a GET or
2193          * sending an ACK. In this case we use the gateway associated with the
2194          * specified router nid.
2195          */
2196         if (!LNET_NID_IS_ANY(&sd->sd_rtr_nid)) {
2197                 gwni = lnet_peer_ni_find_locked(&sd->sd_rtr_nid);
2198                 if (gwni) {
2199                         gw = gwni->lpni_peer_net->lpn_peer;
2200                         lnet_peer_ni_decref_locked(gwni);
2201                         if (gw->lp_rtr_refcount)
2202                                 route_found = true;
2203                 } else {
2204                         CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n",
2205                               libcfs_nidstr(&sd->sd_rtr_nid));
2206                 }
2207         }
2208
2209         if (!route_found) {
2210                 if (sd->sd_msg->msg_routing || !LNET_NID_IS_ANY(src_nid)) {
2211                         /* If I'm routing this message then I need to find the
2212                          * next hop based on the destination NID
2213                          *
2214                          * We also find next hop based on the destination NID
2215                          * if the source NI was specified
2216                          */
2217                         best_rnet = lnet_find_rnet_locked(LNET_NID_NET(&sd->sd_dst_nid));
2218                         if (!best_rnet) {
2219                                 CERROR("Unable to send message from %s to %s - Route table may be misconfigured\n",
2220                                        (src_nid && LNET_NID_IS_ANY(src_nid)) ?
2221                                                 "any local NI" :
2222                                                 libcfs_nidstr(src_nid),
2223                                        libcfs_nidstr(&sd->sd_dst_nid));
2224                                 return -EHOSTUNREACH;
2225                         }
2226                         CDEBUG(D_NET, "best_rnet %s\n",
2227                                libcfs_net2str(best_rnet->lrn_net));
2228                 } else {
2229                         /* we've already looked up the initial lpni using
2230                          * dst_nid
2231                          */
2232                         lpni = sd->sd_best_lpni;
2233                         /* the peer tree must be in existence */
2234                         LASSERT(lpni && lpni->lpni_peer_net &&
2235                                 lpni->lpni_peer_net->lpn_peer);
2236                         lp = lpni->lpni_peer_net->lpn_peer;
2237
2238                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2239                                 /* is this remote network reachable?  */
2240                                 rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2241                                 if (!rnet)
2242                                         continue;
2243
2244                                 if (!best_lpn)
2245                                         goto use_lpn;
2246                                 else
2247                                         CDEBUG(D_NET, "n[%s, %s] h[%d, %d], p[%u, %u], s[%d, %d]\n",
2248                                                libcfs_net2str(lpn->lpn_net_id),
2249                                                libcfs_net2str(best_lpn->lpn_net_id),
2250                                                lpn->lpn_healthv,
2251                                                best_lpn->lpn_healthv,
2252                                                lpn->lpn_sel_priority,
2253                                                best_lpn->lpn_sel_priority,
2254                                                lpn->lpn_seq,
2255                                                best_lpn->lpn_seq);
2256
2257                                 /* select the preferred peer net */
2258                                 if (best_lpn_healthv > lpn->lpn_healthv)
2259                                         continue;
2260                                 else if (best_lpn_healthv < lpn->lpn_healthv)
2261                                         goto use_lpn;
2262
2263                                 if (best_lpn_sel_prio < lpn->lpn_sel_priority)
2264                                         continue;
2265                                 else if (best_lpn_sel_prio > lpn->lpn_sel_priority)
2266                                         goto use_lpn;
2267
2268                                 if (best_lpn->lpn_seq <= lpn->lpn_seq)
2269                                         continue;
2270 use_lpn:
2271                                 best_lpn_healthv = lpn->lpn_healthv;
2272                                 best_lpn_sel_prio = lpn->lpn_sel_priority;
2273                                 best_lpn = lpn;
2274                                 best_rnet = rnet;
2275                         }
2276
2277                         if (!best_lpn) {
2278                                 CERROR("peer %s has no available nets\n",
2279                                        libcfs_nidstr(&sd->sd_dst_nid));
2280                                 return -EHOSTUNREACH;
2281                         }
2282
2283                         CDEBUG(D_NET, "selected best_lpn %s\n",
2284                                libcfs_net2str(best_lpn->lpn_net_id));
2285
2286                         sd->sd_best_lpni = lnet_find_best_lpni(sd->sd_best_ni,
2287                                                                lnet_nid_to_nid4(&sd->sd_dst_nid),
2288                                                                lp,
2289                                                                best_lpn->lpn_net_id);
2290                         if (!sd->sd_best_lpni) {
2291                                 CERROR("peer %s is unreachable\n",
2292                                        libcfs_nidstr(&sd->sd_dst_nid));
2293                                 return -EHOSTUNREACH;
2294                         }
2295
2296                         /* We're attempting to round robin over the remote peer
2297                          * NI's so update the final destination we selected
2298                          */
2299                         sd->sd_final_dst_lpni = sd->sd_best_lpni;
2300                 }
2301
2302                 /*
2303                  * find the best route. Restrict the selection on the net of the
2304                  * local NI if we've already picked the local NI to send from.
2305                  * Otherwise, let's pick any route we can find and then find
2306                  * a local NI we can reach the route's gateway on. Any route we
2307                  * select will be reachable by virtue of the restriction we have
2308                  * when adding a route.
2309                  */
2310                 best_route = lnet_find_route_locked(best_rnet,
2311                                                     LNET_NID_NET(src_nid),
2312                                                     sd->sd_best_lpni,
2313                                                     &last_route, &gwni);
2314
2315                 if (!best_route) {
2316                         CERROR("no route to %s from %s\n",
2317                                libcfs_nidstr(dst_nid),
2318                                libcfs_nidstr(src_nid));
2319                         return -EHOSTUNREACH;
2320                 }
2321
2322                 if (!gwni) {
2323                         CERROR("Internal Error. Route expected to %s from %s\n",
2324                                libcfs_nidstr(dst_nid),
2325                                libcfs_nidstr(src_nid));
2326                         return -EFAULT;
2327                 }
2328
2329                 gw = best_route->lr_gateway;
2330                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2331         }
2332
2333         /*
2334          * If the router checker is not active then discover the gateway here.
2335          * This ensures we are able to take advantage of multi-rail routing, but
2336          * if the router checker is active then we do not unecessarily delay
2337          * messages while the gateway is being checked by the dedicated monitor
2338          * thread.
2339          *
2340          * NB: We're only checking the alive_router_check_interval here, rather
2341          * than calling lnet_router_checker_active(), because the other
2342          * conditions that are checked by that function are either
2343          * irrelevant (the_lnet.ln_routing) or must be true (list of routers
2344          * is not empty)
2345          */
2346         if (alive_router_check_interval <= 0) {
2347                 rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt);
2348                 if (rc)
2349                         return rc;
2350         }
2351
2352         if (!sd->sd_best_ni) {
2353                 lpn = gwni->lpni_peer_net;
2354                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw, lpn,
2355                                                                sd->sd_msg,
2356                                                                sd->sd_md_cpt);
2357                 if (!sd->sd_best_ni) {
2358                         CERROR("Internal Error. Expected local ni on %s but non found: %s\n",
2359                                libcfs_net2str(lpn->lpn_net_id),
2360                                libcfs_nidstr(&sd->sd_src_nid));
2361                         return -EFAULT;
2362                 }
2363         }
2364
2365         *gw_lpni = gwni;
2366         *gw_peer = gw;
2367
2368         /*
2369          * increment the sequence number since now we're sure we're
2370          * going to use this route
2371          */
2372         if (LNET_NID_IS_ANY(&sd->sd_rtr_nid)) {
2373                 LASSERT(best_route && last_route);
2374                 best_route->lr_seq = last_route->lr_seq + 1;
2375         }
2376
2377         return 0;
2378 }
2379
2380 /*
2381  * Handle two cases:
2382  *
2383  * Case 1:
2384  *  Source specified
2385  *  Remote destination
2386  *  Non-MR destination
2387  *
2388  * Case 2:
2389  *  Source specified
2390  *  Remote destination
2391  *  MR destination
2392  *
2393  * The handling of these two cases is similar. Even though the destination
2394  * can be MR or non-MR, we'll deal directly with the router.
2395  */
2396 static int
2397 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2398 {
2399         int rc;
2400         struct lnet_peer_ni *gw_lpni = NULL;
2401         struct lnet_peer *gw_peer = NULL;
2402
2403         /* find local NI */
2404         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2405         if (!sd->sd_best_ni) {
2406                 CERROR("Can't send to %s: src %s is not a local nid\n",
2407                        libcfs_nidstr(&sd->sd_dst_nid),
2408                        libcfs_nidstr(&sd->sd_src_nid));
2409                 return -EINVAL;
2410         }
2411
2412         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid,
2413                                           &gw_lpni, &gw_peer);
2414         if (rc)
2415                 return rc;
2416
2417         if (sd->sd_send_case & NMR_DST)
2418                 /*
2419                  * since the final destination is non-MR let's set its preferred
2420                  * NID before we send
2421                  */
2422                 lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni,
2423                                          sd->sd_msg);
2424
2425         /*
2426          * We're going to send to the gw found so let's set its
2427          * info
2428          */
2429         sd->sd_peer = gw_peer;
2430         sd->sd_best_lpni = gw_lpni;
2431
2432         return lnet_handle_send(sd);
2433 }
2434
2435 struct lnet_ni *
2436 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2437                                struct lnet_msg *msg, bool discovery)
2438 {
2439         struct lnet_peer_net *lpn = NULL;
2440         struct lnet_peer_net *best_lpn = NULL;
2441         struct lnet_net *net = NULL;
2442         struct lnet_net *best_net = NULL;
2443         struct lnet_ni *best_ni = NULL;
2444         int best_lpn_healthv = 0;
2445         int best_net_healthv = 0;
2446         int net_healthv;
2447         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2448         __u32 lpn_sel_prio;
2449         __u32 best_net_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2450         __u32 net_sel_prio;
2451
2452         /* If lp_disc_net_id is set, this peer is a router undergoing
2453          * discovery, and this message is an LNet ping, then this may be a
2454          * discovery message and we need to select an NI on the peer net
2455          * specified by lp_disc_net_id
2456          */
2457         if (peer->lp_disc_net_id &&
2458             (peer->lp_state & LNET_PEER_RTR_DISCOVERY) &&
2459             lnet_msg_is_ping(msg)) {
2460                 best_lpn = lnet_peer_get_net_locked(peer, peer->lp_disc_net_id);
2461                 if (best_lpn && lnet_get_net_locked(best_lpn->lpn_net_id))
2462                         goto select_best_ni;
2463         }
2464
2465         /*
2466          * The peer can have multiple interfaces, some of them can be on
2467          * the local network and others on a routed network. We should
2468          * prefer the local network. However if the local network is not
2469          * available then we need to try the routed network
2470          */
2471
2472         /* go through all the peer nets and find the best_ni */
2473         list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
2474                 /*
2475                  * The peer's list of nets can contain non-local nets. We
2476                  * want to only examine the local ones.
2477                  */
2478                 net = lnet_get_net_locked(lpn->lpn_net_id);
2479                 if (!net)
2480                         continue;
2481
2482                 lpn_sel_prio = lpn->lpn_sel_priority;
2483                 net_healthv = lnet_get_net_healthv_locked(net);
2484                 net_sel_prio = net->net_sel_priority;
2485
2486                 if (!best_lpn)
2487                         goto select_lpn;
2488                 else
2489                         CDEBUG(D_NET,
2490                                "n[%s, %s] ph[%d, %d], pp[%u, %u], nh[%d, %d], np[%u, %u], ps[%u, %u], ns[%u, %u]\n",
2491                                libcfs_net2str(lpn->lpn_net_id),
2492                                libcfs_net2str(best_lpn->lpn_net_id),
2493                                lpn->lpn_healthv,
2494                                best_lpn_healthv,
2495                                lpn_sel_prio,
2496                                best_lpn_sel_prio,
2497                                net_healthv,
2498                                best_net_healthv,
2499                                net_sel_prio,
2500                                best_net_sel_prio,
2501                                lpn->lpn_seq,
2502                                best_lpn->lpn_seq,
2503                                net->net_seq,
2504                                best_net->net_seq);
2505
2506                 /* always select the lpn with the best health */
2507                 if (best_lpn_healthv > lpn->lpn_healthv)
2508                         continue;
2509                 else if (best_lpn_healthv < lpn->lpn_healthv)
2510                         goto select_lpn;
2511
2512                 /* select the preferred peer and local nets */
2513                 if (best_lpn_sel_prio < lpn_sel_prio)
2514                         continue;
2515                 else if (best_lpn_sel_prio > lpn_sel_prio)
2516                         goto select_lpn;
2517
2518                 if (best_net_healthv > net_healthv)
2519                         continue;
2520                 else if (best_net_healthv < net_healthv)
2521                         goto select_lpn;
2522
2523                 if (best_net_sel_prio < net_sel_prio)
2524                         continue;
2525                 else if (best_net_sel_prio > net_sel_prio)
2526                         goto select_lpn;
2527
2528                 if (best_lpn->lpn_seq < lpn->lpn_seq)
2529                         continue;
2530                 else if (best_lpn->lpn_seq > lpn->lpn_seq)
2531                         goto select_lpn;
2532
2533                 /* round robin over the local networks */
2534                 if (best_net->net_seq <= net->net_seq)
2535                         continue;
2536
2537 select_lpn:
2538                 best_net_healthv = net_healthv;
2539                 best_net_sel_prio = net_sel_prio;
2540                 best_lpn_healthv = lpn->lpn_healthv;
2541                 best_lpn_sel_prio = lpn_sel_prio;
2542                 best_lpn = lpn;
2543                 best_net = net;
2544         }
2545
2546         if (best_lpn) {
2547                 /* Select the best NI on the same net as best_lpn chosen
2548                  * above
2549                  */
2550 select_best_ni:
2551                 CDEBUG(D_NET, "selected best_lpn %s\n",
2552                        libcfs_net2str(best_lpn->lpn_net_id));
2553                 best_ni = lnet_find_best_ni_on_spec_net(NULL, peer, best_lpn,
2554                                                         msg, md_cpt);
2555         }
2556
2557         return best_ni;
2558 }
2559
2560 static struct lnet_ni *
2561 lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt)
2562 {
2563         struct lnet_ni *best_ni = NULL;
2564         struct lnet_peer_net *peer_net = lpni->lpni_peer_net;
2565         struct lnet_peer_ni *lpni_entry;
2566
2567         /*
2568          * We must use a consistent source address when sending to a
2569          * non-MR peer. However, a non-MR peer can have multiple NIDs
2570          * on multiple networks, and we may even need to talk to this
2571          * peer on multiple networks -- certain types of
2572          * load-balancing configuration do this.
2573          *
2574          * So we need to pick the NI the peer prefers for this
2575          * particular network.
2576          */
2577         LASSERT(peer_net);
2578         list_for_each_entry(lpni_entry, &peer_net->lpn_peer_nis,
2579                             lpni_peer_nis) {
2580                 if (lpni_entry->lpni_pref_nnids == 0)
2581                         continue;
2582                 LASSERT(lpni_entry->lpni_pref_nnids == 1);
2583                 best_ni = lnet_nid_to_ni_locked(&lpni_entry->lpni_pref.nid,
2584                                                 cpt);
2585                 break;
2586         }
2587
2588         return best_ni;
2589 }
2590
2591 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2592 static int
2593 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2594 {
2595         struct lnet_ni *best_ni = NULL;
2596
2597         /*
2598          * We must use a consistent source address when sending to a
2599          * non-MR peer. However, a non-MR peer can have multiple NIDs
2600          * on multiple networks, and we may even need to talk to this
2601          * peer on multiple networks -- certain types of
2602          * load-balancing configuration do this.
2603          *
2604          * So we need to pick the NI the peer prefers for this
2605          * particular network.
2606          *
2607          * An exception is traffic on LNET_RESERVED_PORTAL. Internal LNet
2608          * traffic doesn't care which source NI is used, and we don't actually
2609          * want to restrict local recovery pings to a single source NI.
2610          */
2611         if (!lnet_reserved_msg(sd->sd_msg))
2612                 best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2613                                                                sd->sd_cpt);
2614
2615         if (!best_ni)
2616                 best_ni = lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2617                                                 sd->sd_best_lpni->lpni_peer_net,
2618                                                 sd->sd_msg,
2619                                                 sd->sd_md_cpt);
2620
2621         /* If there is no best_ni we don't have a route */
2622         if (!best_ni) {
2623                 CERROR("no path to %s from net %s\n",
2624                         libcfs_nidstr(&sd->sd_best_lpni->lpni_nid),
2625                         libcfs_net2str(sd->sd_best_lpni->lpni_net->net_id));
2626                 return -EHOSTUNREACH;
2627         }
2628
2629         sd->sd_best_ni = best_ni;
2630
2631         /* Set preferred NI if necessary. */
2632         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2633
2634         return 0;
2635 }
2636
2637
2638 /*
2639  * Source not specified
2640  * Local destination
2641  * Non-MR Peer
2642  *
2643  * always use the same source NID for NMR peers
2644  * If we've talked to that peer before then we already have a preferred
2645  * source NI associated with it. Otherwise, we select a preferred local NI
2646  * and store it in the peer
2647  */
2648 static int
2649 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2650 {
2651         int rc = 0;
2652
2653         /* sd->sd_best_lpni is already set to the final destination */
2654
2655         /*
2656          * At this point we should've created the peer ni and peer. If we
2657          * can't find it, then something went wrong. Instead of assert
2658          * output a relevant message and fail the send
2659          */
2660         if (!sd->sd_best_lpni) {
2661                 CERROR("Internal fault. Unable to send msg %s to %s. NID not known\n",
2662                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2663                        libcfs_nidstr(&sd->sd_dst_nid));
2664                 return -EFAULT;
2665         }
2666
2667         if (sd->sd_msg->msg_routing) {
2668                 /* If I'm forwarding this message then I can choose any NI
2669                  * on the destination peer net
2670                  */
2671                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL,
2672                                                                sd->sd_peer,
2673                                                                sd->sd_best_lpni->lpni_peer_net,
2674                                                                sd->sd_msg,
2675                                                                sd->sd_md_cpt);
2676                 if (!sd->sd_best_ni) {
2677                         CERROR("Unable to forward message to %s. No local NI available\n",
2678                                libcfs_nidstr(&sd->sd_dst_nid));
2679                         rc = -EHOSTUNREACH;
2680                 }
2681         } else
2682                 rc = lnet_select_preferred_best_ni(sd);
2683
2684         if (!rc)
2685                 rc = lnet_handle_send(sd);
2686
2687         return rc;
2688 }
2689
2690 static int
2691 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2692 {
2693         /*
2694          * NOTE we've already handled the remote peer case. So we only
2695          * need to worry about the local case here.
2696          *
2697          * if we're sending a response, ACK or reply, we need to send it
2698          * to the destination NID given to us. At this point we already
2699          * have the peer_ni we're suppose to send to, so just find the
2700          * best_ni on the peer net and use that. Since we're sending to an
2701          * MR peer then we can just run the selection algorithm on our
2702          * local NIs and pick the best one.
2703          */
2704         if (sd->sd_send_case & SND_RESP) {
2705                 sd->sd_best_ni =
2706                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2707                                                 sd->sd_best_lpni->lpni_peer_net,
2708                                                 sd->sd_msg,
2709                                                 sd->sd_md_cpt);
2710
2711                 if (!sd->sd_best_ni) {
2712                         /*
2713                          * We're not going to deal with not able to send
2714                          * a response to the provided final destination
2715                          */
2716                         CERROR("Can't send response to %s. No local NI available\n",
2717                                 libcfs_nidstr(&sd->sd_dst_nid));
2718                         return -EHOSTUNREACH;
2719                 }
2720
2721                 return lnet_handle_send(sd);
2722         }
2723
2724         /*
2725          * If we get here that means we're sending a fresh request, PUT or
2726          * GET, so we need to run our standard selection algorithm.
2727          * First find the best local interface that's on any of the peer's
2728          * networks.
2729          */
2730         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2731                                         sd->sd_md_cpt,
2732                                         sd->sd_msg,
2733                                         lnet_msg_discovery(sd->sd_msg));
2734         if (sd->sd_best_ni) {
2735                 sd->sd_best_lpni =
2736                   lnet_find_best_lpni(sd->sd_best_ni,
2737                                              lnet_nid_to_nid4(&sd->sd_dst_nid),
2738                                       sd->sd_peer,
2739                                       sd->sd_best_ni->ni_net->net_id);
2740
2741                 /*
2742                  * if we're successful in selecting a peer_ni on the local
2743                  * network, then send to it. Otherwise fall through and
2744                  * try and see if we can reach it over another routed
2745                  * network
2746                  */
2747                 if (sd->sd_best_lpni &&
2748                     nid_same(&sd->sd_best_lpni->lpni_nid,
2749                              &the_lnet.ln_loni->ni_nid)) {
2750                         /*
2751                          * in case we initially started with a routed
2752                          * destination, let's reset to local
2753                          */
2754                         sd->sd_send_case &= ~REMOTE_DST;
2755                         sd->sd_send_case |= LOCAL_DST;
2756                         return lnet_handle_lo_send(sd);
2757                 } else if (sd->sd_best_lpni) {
2758                         /*
2759                          * in case we initially started with a routed
2760                          * destination, let's reset to local
2761                          */
2762                         sd->sd_send_case &= ~REMOTE_DST;
2763                         sd->sd_send_case |= LOCAL_DST;
2764                         return lnet_handle_send(sd);
2765                 }
2766
2767                 CERROR("Internal Error. Expected to have a best_lpni: "
2768                        "%s -> %s\n",
2769                        libcfs_nidstr(&sd->sd_src_nid),
2770                        libcfs_nidstr(&sd->sd_dst_nid));
2771
2772                 return -EFAULT;
2773         }
2774
2775         /*
2776          * Peer doesn't have a local network. Let's see if there is
2777          * a remote network we can reach it on.
2778          */
2779         return PASS_THROUGH;
2780 }
2781
2782 /*
2783  * Case 1:
2784  *      Source NID not specified
2785  *      Local destination
2786  *      MR peer
2787  *
2788  * Case 2:
2789  *      Source NID not speified
2790  *      Remote destination
2791  *      MR peer
2792  *
2793  * In both of these cases if we're sending a response, ACK or REPLY, then
2794  * we need to send to the destination NID provided.
2795  *
2796  * In the remote case let's deal with MR routers.
2797  *
2798  */
2799
2800 static int
2801 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2802 {
2803         int rc = 0;
2804         struct lnet_peer *gw_peer = NULL;
2805         struct lnet_peer_ni *gw_lpni = NULL;
2806
2807         /*
2808          * handle sending a response to a remote peer here so we don't
2809          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2810          */
2811         if (sd->sd_send_case & REMOTE_DST &&
2812             sd->sd_send_case & SND_RESP) {
2813                 struct lnet_peer_ni *gw;
2814                 struct lnet_peer *gw_peer;
2815
2816                 rc = lnet_handle_find_routed_path(
2817                         sd, &sd->sd_dst_nid, &gw, &gw_peer);
2818                 if (rc < 0) {
2819                         CERROR("Can't send response to %s. No route available\n",
2820                                libcfs_nidstr(&sd->sd_dst_nid));
2821                         return -EHOSTUNREACH;
2822                 } else if (rc > 0) {
2823                         return rc;
2824                 }
2825
2826                 sd->sd_best_lpni = gw;
2827                 sd->sd_peer = gw_peer;
2828
2829                 return lnet_handle_send(sd);
2830         }
2831
2832         /*
2833          * Even though the NID for the peer might not be on a local network,
2834          * since the peer is MR there could be other interfaces on the
2835          * local network. In that case we'd still like to prefer the local
2836          * network over the routed network. If we're unable to do that
2837          * then we select the best router among the different routed networks,
2838          * and if the router is MR then we can deal with it as such.
2839          */
2840         rc = lnet_handle_any_mr_dsta(sd);
2841         if (rc != PASS_THROUGH)
2842                 return rc;
2843
2844         /*
2845          * Now that we must route to the destination, we must consider the
2846          * MR case, where the destination has multiple interfaces, some of
2847          * which we can route to and others we do not. For this reason we
2848          * need to select the destination which we can route to and if
2849          * there are multiple, we need to round robin.
2850          */
2851         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid,
2852                                           &gw_lpni, &gw_peer);
2853         if (rc)
2854                 return rc;
2855
2856         sd->sd_send_case &= ~LOCAL_DST;
2857         sd->sd_send_case |= REMOTE_DST;
2858
2859         sd->sd_peer = gw_peer;
2860         sd->sd_best_lpni = gw_lpni;
2861
2862         return lnet_handle_send(sd);
2863 }
2864
2865 /*
2866  * Source not specified
2867  * Remote destination
2868  * Non-MR peer
2869  *
2870  * Must send to the specified peer NID using the same source NID that
2871  * we've used before. If it's the first time to talk to that peer then
2872  * find the source NI and assign it as preferred to that peer
2873  */
2874 static int
2875 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2876 {
2877         int rc;
2878         struct lnet_peer_ni *gw_lpni = NULL;
2879         struct lnet_peer *gw_peer = NULL;
2880
2881         /*
2882          * Let's see if we have a preferred NI to talk to this NMR peer
2883          */
2884         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2885                                                               sd->sd_cpt);
2886
2887         /*
2888          * find the router and that'll find the best NI if we didn't find
2889          * it already.
2890          */
2891         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid, &gw_lpni,
2892                                           &gw_peer);
2893         if (rc)
2894                 return rc;
2895
2896         /*
2897          * set the best_ni we've chosen as the preferred one for
2898          * this peer
2899          */
2900         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2901
2902         /* we'll be sending to the gw */
2903         sd->sd_best_lpni = gw_lpni;
2904         sd->sd_peer = gw_peer;
2905
2906         return lnet_handle_send(sd);
2907 }
2908
2909 static int
2910 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2911 {
2912         /*
2913          * turn off the SND_RESP bit.
2914          * It will be checked in the case handling
2915          */
2916         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2917
2918         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2919                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2920                 (send_case & SRC_SPEC) ? libcfs_nidstr(&sd->sd_src_nid) : "",
2921                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2922                 libcfs_nidstr(&sd->sd_dst_nid),
2923                 (send_case & LOCAL_DST) ? "local" : "routed");
2924
2925         switch (send_case) {
2926         /*
2927          * For all cases where the source is specified, we should always
2928          * use the destination NID, whether it's an MR destination or not,
2929          * since we're continuing a series of related messages for the
2930          * same RPC
2931          */
2932         case SRC_SPEC_LOCAL_NMR_DST:
2933                 return lnet_handle_spec_local_nmr_dst(sd);
2934         case SRC_SPEC_LOCAL_MR_DST:
2935                 return lnet_handle_spec_local_mr_dst(sd);
2936         case SRC_SPEC_ROUTER_NMR_DST:
2937         case SRC_SPEC_ROUTER_MR_DST:
2938                 return lnet_handle_spec_router_dst(sd);
2939         case SRC_ANY_LOCAL_NMR_DST:
2940                 return lnet_handle_any_local_nmr_dst(sd);
2941         case SRC_ANY_LOCAL_MR_DST:
2942         case SRC_ANY_ROUTER_MR_DST:
2943                 return lnet_handle_any_mr_dst(sd);
2944         case SRC_ANY_ROUTER_NMR_DST:
2945                 return lnet_handle_any_router_nmr_dst(sd);
2946         default:
2947                 CERROR("Unknown send case\n");
2948                 return -1;
2949         }
2950 }
2951
2952 static int
2953 lnet_select_pathway(struct lnet_nid *src_nid,
2954                     struct lnet_nid *dst_nid,
2955                     struct lnet_msg *msg,
2956                     struct lnet_nid *rtr_nid)
2957 {
2958         struct lnet_peer_ni *lpni;
2959         struct lnet_peer *peer;
2960         struct lnet_send_data send_data;
2961         int cpt, rc;
2962         int md_cpt;
2963         __u32 send_case = 0;
2964         bool final_hop;
2965         bool mr_forwarding_allowed;
2966
2967         memset(&send_data, 0, sizeof(send_data));
2968
2969         /*
2970          * get an initial CPT to use for locking. The idea here is not to
2971          * serialize the calls to select_pathway, so that as many
2972          * operations can run concurrently as possible. To do that we use
2973          * the CPT where this call is being executed. Later on when we
2974          * determine the CPT to use in lnet_message_commit, we switch the
2975          * lock and check if there was any configuration change.  If none,
2976          * then we proceed, if there is, then we restart the operation.
2977          */
2978         cpt = lnet_net_lock_current();
2979
2980         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
2981         if (md_cpt == CFS_CPT_ANY)
2982                 md_cpt = cpt;
2983
2984 again:
2985
2986         /*
2987          * If we're being asked to send to the loopback interface, there
2988          * is no need to go through any selection. We can just shortcut
2989          * the entire process and send over lolnd
2990          */
2991         send_data.sd_msg = msg;
2992         send_data.sd_cpt = cpt;
2993         if (nid_is_lo0(dst_nid)) {
2994                 rc = lnet_handle_lo_send(&send_data);
2995                 lnet_net_unlock(cpt);
2996                 return rc;
2997         }
2998
2999         /*
3000          * find an existing peer_ni, or create one and mark it as having been
3001          * created due to network traffic. This call will create the
3002          * peer->peer_net->peer_ni tree.
3003          */
3004         lpni = lnet_peerni_by_nid_locked(dst_nid, NULL, cpt);
3005         if (IS_ERR(lpni)) {
3006                 lnet_net_unlock(cpt);
3007                 return PTR_ERR(lpni);
3008         }
3009
3010         /*
3011          * Cache the original src_nid and rtr_nid. If we need to resend the
3012          * message then we'll need to know whether the src_nid was originally
3013          * specified for this message. If it was originally specified,
3014          * then we need to keep using the same src_nid since it's
3015          * continuing the same sequence of messages. Similarly, rtr_nid will
3016          * affect our choice of next hop.
3017          */
3018         if (src_nid)
3019                 msg->msg_src_nid_param = *src_nid;
3020         else
3021                 msg->msg_src_nid_param = LNET_ANY_NID;
3022         if (rtr_nid)
3023                 msg->msg_rtr_nid_param = *rtr_nid;
3024         else
3025                 msg->msg_rtr_nid_param = LNET_ANY_NID;
3026
3027         /*
3028          * If necessary, perform discovery on the peer that owns this peer_ni.
3029          * Note, this can result in the ownership of this peer_ni changing
3030          * to another peer object.
3031          */
3032         rc = lnet_initiate_peer_discovery(lpni, msg, cpt);
3033         if (rc) {
3034                 lnet_peer_ni_decref_locked(lpni);
3035                 lnet_net_unlock(cpt);
3036                 return rc;
3037         }
3038         lnet_peer_ni_decref_locked(lpni);
3039
3040         peer = lpni->lpni_peer_net->lpn_peer;
3041
3042         /*
3043          * Identify the different send cases
3044          */
3045         if (!src_nid || LNET_NID_IS_ANY(src_nid)) {
3046                 send_case |= SRC_ANY;
3047                 if (lnet_get_net_locked(LNET_NID_NET(dst_nid)))
3048                         send_case |= LOCAL_DST;
3049                 else
3050                         send_case |= REMOTE_DST;
3051         } else {
3052                 send_case |= SRC_SPEC;
3053                 if (LNET_NID_NET(src_nid) == LNET_NID_NET(dst_nid))
3054                         send_case |= LOCAL_DST;
3055                 else
3056                         send_case |= REMOTE_DST;
3057         }
3058
3059         final_hop = false;
3060         if (msg->msg_routing && (send_case & LOCAL_DST))
3061                 final_hop = true;
3062
3063         /* Determine whether to allow MR forwarding for this message.
3064          * NB: MR forwarding is allowed if the message originator and the
3065          * destination are both MR capable, and the destination lpni that was
3066          * originally chosen by the originator is unhealthy or down.
3067          * We check the MR capability of the destination further below
3068          */
3069         mr_forwarding_allowed = false;
3070         if (final_hop) {
3071                 struct lnet_peer *src_lp;
3072                 struct lnet_peer_ni *src_lpni;
3073
3074                 src_lpni = lnet_peerni_by_nid_locked(&msg->msg_hdr.src_nid,
3075                                                    NULL, cpt);
3076                 /* We don't fail the send if we hit any errors here. We'll just
3077                  * try to send it via non-multi-rail criteria
3078                  */
3079                 if (!IS_ERR(src_lpni)) {
3080                         /* Drop ref taken by lnet_peerni_by_nid_locked() */
3081                         lnet_peer_ni_decref_locked(src_lpni);
3082                         src_lp = lpni->lpni_peer_net->lpn_peer;
3083                         if (lnet_peer_is_multi_rail(src_lp) &&
3084                             !lnet_is_peer_ni_alive(lpni))
3085                                 mr_forwarding_allowed = true;
3086
3087                 }
3088                 CDEBUG(D_NET, "msg %p MR forwarding %s\n", msg,
3089                        mr_forwarding_allowed ? "allowed" : "not allowed");
3090         }
3091
3092         /*
3093          * Deal with the peer as NMR in the following cases:
3094          * 1. the peer is NMR
3095          * 2. We're trying to recover a specific peer NI
3096          * 3. I'm a router sending to the final destination and MR forwarding is
3097          *    not allowed for this message (as determined above).
3098          *    In this case the source of the message would've
3099          *    already selected the final destination so my job
3100          *    is to honor the selection.
3101          */
3102         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery ||
3103             (final_hop && !mr_forwarding_allowed))
3104                 send_case |= NMR_DST;
3105         else
3106                 send_case |= MR_DST;
3107
3108         if (lnet_msg_is_response(msg))
3109                 send_case |= SND_RESP;
3110
3111         /* assign parameters to the send_data */
3112         if (rtr_nid)
3113                 send_data.sd_rtr_nid = *rtr_nid;
3114         else
3115                 send_data.sd_rtr_nid = LNET_ANY_NID;
3116         if (src_nid)
3117                 send_data.sd_src_nid = *src_nid;
3118         else
3119                 send_data.sd_src_nid = LNET_ANY_NID;
3120         send_data.sd_dst_nid = *dst_nid;
3121         send_data.sd_best_lpni = lpni;
3122         /*
3123          * keep a pointer to the final destination in case we're going to
3124          * route, so we'll need to access it later
3125          */
3126         send_data.sd_final_dst_lpni = lpni;
3127         send_data.sd_peer = peer;
3128         send_data.sd_md_cpt = md_cpt;
3129         send_data.sd_send_case = send_case;
3130
3131         rc = lnet_handle_send_case_locked(&send_data);
3132
3133         /*
3134          * Update the local cpt since send_data.sd_cpt might've been
3135          * updated as a result of calling lnet_handle_send_case_locked().
3136          */
3137         cpt = send_data.sd_cpt;
3138
3139         if (rc == REPEAT_SEND)
3140                 goto again;
3141
3142         lnet_net_unlock(cpt);
3143
3144         return rc;
3145 }
3146
3147 int
3148 lnet_send(struct lnet_nid *src_nid, struct lnet_msg *msg,
3149           struct lnet_nid *rtr_nid)
3150 {
3151         struct lnet_nid *dst_nid = &msg->msg_target.nid;
3152         int rc;
3153
3154         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
3155         LASSERT(msg->msg_txpeer == NULL);
3156         LASSERT(msg->msg_txni == NULL);
3157         LASSERT(!msg->msg_sending);
3158         LASSERT(!msg->msg_target_is_router);
3159         LASSERT(!msg->msg_receiving);
3160
3161         msg->msg_sending = 1;
3162
3163         LASSERT(!msg->msg_tx_committed);
3164
3165         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
3166         if (rc < 0) {
3167                 if (rc == -EHOSTUNREACH)
3168                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
3169                 else
3170                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
3171                 return rc;
3172         }
3173
3174         if (rc == LNET_CREDIT_OK)
3175                 lnet_ni_send(msg->msg_txni, msg);
3176
3177         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
3178         return 0;
3179 }
3180
3181 enum lnet_mt_event_type {
3182         MT_TYPE_LOCAL_NI = 0,
3183         MT_TYPE_PEER_NI
3184 };
3185
3186 struct lnet_mt_event_info {
3187         enum lnet_mt_event_type mt_type;
3188         struct lnet_nid mt_nid;
3189 };
3190
3191 /* called with res_lock held */
3192 void
3193 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
3194 {
3195         struct lnet_rsp_tracker *rspt;
3196
3197         /*
3198          * msg has a refcount on the MD so the MD is not going away.
3199          * The rspt queue for the cpt is protected by
3200          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
3201          */
3202         if (!md->md_rspt_ptr)
3203                 return;
3204
3205         rspt = md->md_rspt_ptr;
3206
3207         /* debug code */
3208         LASSERT(rspt->rspt_cpt == cpt);
3209
3210         md->md_rspt_ptr = NULL;
3211
3212         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3213                 /*
3214                  * The monitor thread has invalidated this handle because the
3215                  * response timed out, but it failed to lookup the MD. That
3216                  * means this response tracker is on the zombie list. We can
3217                  * safely remove it under the resource lock (held by caller) and
3218                  * free the response tracker block.
3219                  */
3220                 list_del(&rspt->rspt_on_list);
3221                 lnet_rspt_free(rspt, cpt);
3222         } else {
3223                 /*
3224                  * invalidate the handle to indicate that a response has been
3225                  * received, which will then lead the monitor thread to clean up
3226                  * the rspt block.
3227                  */
3228                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
3229         }
3230 }
3231
3232 void
3233 lnet_clean_zombie_rstqs(void)
3234 {
3235         struct lnet_rsp_tracker *rspt, *tmp;
3236         int i;
3237
3238         cfs_cpt_for_each(i, lnet_cpt_table()) {
3239                 list_for_each_entry_safe(rspt, tmp,
3240                                          the_lnet.ln_mt_zombie_rstqs[i],
3241                                          rspt_on_list) {
3242                         list_del(&rspt->rspt_on_list);
3243                         lnet_rspt_free(rspt, i);
3244                 }
3245         }
3246
3247         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
3248 }
3249
3250 static void
3251 lnet_finalize_expired_responses(void)
3252 {
3253         struct lnet_libmd *md;
3254         struct lnet_rsp_tracker *rspt, *tmp;
3255         ktime_t now;
3256         int i;
3257
3258         if (the_lnet.ln_mt_rstq == NULL)
3259                 return;
3260
3261         cfs_cpt_for_each(i, lnet_cpt_table()) {
3262                 LIST_HEAD(local_queue);
3263
3264                 lnet_net_lock(i);
3265                 if (!the_lnet.ln_mt_rstq[i]) {
3266                         lnet_net_unlock(i);
3267                         continue;
3268                 }
3269                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
3270                 lnet_net_unlock(i);
3271
3272                 now = ktime_get();
3273
3274                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
3275                         /*
3276                          * The rspt mdh will be invalidated when a response
3277                          * is received or whenever we want to discard the
3278                          * block the monitor thread will walk the queue
3279                          * and clean up any rsts with an invalid mdh.
3280                          * The monitor thread will walk the queue until
3281                          * the first unexpired rspt block. This means that
3282                          * some rspt blocks which received their
3283                          * corresponding responses will linger in the
3284                          * queue until they are cleaned up eventually.
3285                          */
3286                         lnet_res_lock(i);
3287                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3288                                 lnet_res_unlock(i);
3289                                 list_del(&rspt->rspt_on_list);
3290                                 lnet_rspt_free(rspt, i);
3291                                 continue;
3292                         }
3293
3294                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
3295                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
3296                                 struct lnet_peer_ni *lpni;
3297                                 struct lnet_nid nid;
3298
3299                                 md = lnet_handle2md(&rspt->rspt_mdh);
3300                                 if (!md) {
3301                                         /* MD has been queued for unlink, but
3302                                          * rspt hasn't been detached (Note we've
3303                                          * checked above that the rspt_mdh is
3304                                          * valid). Since we cannot lookup the MD
3305                                          * we're unable to detach the rspt
3306                                          * ourselves. Thus, move the rspt to the
3307                                          * zombie list where we'll wait for
3308                                          * either:
3309                                          *   1. The remaining operations on the
3310                                          *   MD to complete. In this case the
3311                                          *   final operation will result in
3312                                          *   lnet_msg_detach_md()->
3313                                          *   lnet_detach_rsp_tracker() where
3314                                          *   we will clean up this response
3315                                          *   tracker.
3316                                          *   2. LNet to shutdown. In this case
3317                                          *   we'll wait until after all LND Nets
3318                                          *   have shutdown and then we can
3319                                          *   safely free any remaining response
3320                                          *   tracker blocks on the zombie list.
3321                                          * Note: We need to hold the resource
3322                                          * lock when adding to the zombie list
3323                                          * because we may have concurrent access
3324                                          * with lnet_detach_rsp_tracker().
3325                                          */
3326                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
3327                                         list_move(&rspt->rspt_on_list,
3328                                                   the_lnet.ln_mt_zombie_rstqs[i]);
3329                                         lnet_res_unlock(i);
3330                                         continue;
3331                                 }
3332                                 LASSERT(md->md_rspt_ptr == rspt);
3333                                 md->md_rspt_ptr = NULL;
3334                                 lnet_res_unlock(i);
3335
3336                                 LNetMDUnlink(rspt->rspt_mdh);
3337
3338                                 nid = rspt->rspt_next_hop_nid;
3339
3340                                 list_del(&rspt->rspt_on_list);
3341                                 lnet_rspt_free(rspt, i);
3342
3343                                 /* If we're shutting down we just want to clean
3344                                  * up the rspt blocks
3345                                  */
3346                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3347                                         continue;
3348
3349                                 lnet_net_lock(i);
3350                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
3351                                 lnet_net_unlock(i);
3352
3353                                 CDEBUG(D_NET,
3354                                        "Response timeout: md = %p: nid = %s\n",
3355                                        md, libcfs_nidstr(&nid));
3356
3357                                 /*
3358                                  * If there is a timeout on the response
3359                                  * from the next hop decrement its health
3360                                  * value so that we don't use it
3361                                  */
3362                                 lnet_net_lock(0);
3363                                 lpni = lnet_peer_ni_find_locked(&nid);
3364                                 if (lpni) {
3365                                         lnet_handle_remote_failure_locked(lpni);
3366                                         lnet_peer_ni_decref_locked(lpni);
3367                                 }
3368                                 lnet_net_unlock(0);
3369                         } else {
3370                                 lnet_res_unlock(i);
3371                                 break;
3372                         }
3373                 }
3374
3375                 if (!list_empty(&local_queue)) {
3376                         lnet_net_lock(i);
3377                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
3378                         lnet_net_unlock(i);
3379                 }
3380         }
3381 }
3382
3383 static void
3384 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
3385 {
3386         struct lnet_msg *msg;
3387
3388         while (!list_empty(resendq)) {
3389                 struct lnet_peer_ni *lpni;
3390
3391                 msg = list_entry(resendq->next, struct lnet_msg,
3392                                  msg_list);
3393
3394                 list_del_init(&msg->msg_list);
3395
3396                 lpni = lnet_peer_ni_find_locked(&msg->msg_hdr.dest_nid);
3397                 if (!lpni) {
3398                         lnet_net_unlock(cpt);
3399                         CERROR("Expected that a peer is already created for %s\n",
3400                                libcfs_nidstr(&msg->msg_hdr.dest_nid));
3401                         msg->msg_no_resend = true;
3402                         lnet_finalize(msg, -EFAULT);
3403                         lnet_net_lock(cpt);
3404                 } else {
3405                         int rc;
3406
3407                         lnet_peer_ni_decref_locked(lpni);
3408
3409                         lnet_net_unlock(cpt);
3410                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3411                                libcfs_nidstr(&msg->msg_src_nid_param),
3412                                libcfs_idstr(&msg->msg_target),
3413                                lnet_msgtyp2str(msg->msg_type),
3414                                msg->msg_recovery,
3415                                msg->msg_retry_count);
3416                         rc = lnet_send(&msg->msg_src_nid_param, msg,
3417                                        &msg->msg_rtr_nid_param);
3418                         if (rc) {
3419                                 CERROR("Error sending %s to %s: %d\n",
3420                                        lnet_msgtyp2str(msg->msg_type),
3421                                        libcfs_idstr(&msg->msg_target), rc);
3422                                 msg->msg_no_resend = true;
3423                                 lnet_finalize(msg, rc);
3424                         }
3425                         lnet_net_lock(cpt);
3426                         if (!rc)
3427                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3428                 }
3429         }
3430 }
3431
3432 static void
3433 lnet_resend_pending_msgs(void)
3434 {
3435         int i;
3436
3437         cfs_cpt_for_each(i, lnet_cpt_table()) {
3438                 lnet_net_lock(i);
3439                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3440                 lnet_net_unlock(i);
3441         }
3442 }
3443
3444 /* called with cpt and ni_lock held */
3445 static void
3446 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3447 {
3448         struct lnet_handle_md recovery_mdh;
3449
3450         LNetInvalidateMDHandle(&recovery_mdh);
3451
3452         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3453             force) {
3454                 recovery_mdh = ni->ni_ping_mdh;
3455                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3456         }
3457         lnet_ni_unlock(ni);
3458         lnet_net_unlock(cpt);
3459         if (!LNetMDHandleIsInvalid(recovery_mdh))
3460                 LNetMDUnlink(recovery_mdh);
3461         lnet_net_lock(cpt);
3462         lnet_ni_lock(ni);
3463 }
3464
3465 static void
3466 lnet_recover_local_nis(void)
3467 {
3468         struct lnet_mt_event_info *ev_info;
3469         LIST_HEAD(processed_list);
3470         LIST_HEAD(local_queue);
3471         struct lnet_handle_md mdh;
3472         struct lnet_ni *tmp;
3473         struct lnet_ni *ni;
3474         struct lnet_nid nid;
3475         int healthv;
3476         int rc;
3477         time64_t now;
3478
3479         /*
3480          * splice the recovery queue on a local queue. We will iterate
3481          * through the local queue and update it as needed. Once we're
3482          * done with the traversal, we'll splice the local queue back on
3483          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3484          * will be traversed in the next iteration.
3485          */
3486         lnet_net_lock(0);
3487         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3488                          &local_queue);
3489         lnet_net_unlock(0);
3490
3491         now = ktime_get_seconds();
3492
3493         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3494                 /*
3495                  * if an NI is being deleted or it is now healthy, there
3496                  * is no need to keep it around in the recovery queue.
3497                  * The monitor thread is the only thread responsible for
3498                  * removing the NI from the recovery queue.
3499                  * Multiple threads can be adding NIs to the recovery
3500                  * queue.
3501                  */
3502                 healthv = atomic_read(&ni->ni_healthv);
3503
3504                 lnet_net_lock(0);
3505                 lnet_ni_lock(ni);
3506                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3507                     healthv == LNET_MAX_HEALTH_VALUE) {
3508                         list_del_init(&ni->ni_recovery);
3509                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3510                         lnet_ni_unlock(ni);
3511                         lnet_ni_decref_locked(ni, 0);
3512                         lnet_net_unlock(0);
3513                         continue;
3514                 }
3515
3516                 /*
3517                  * if the local NI failed recovery we must unlink the md.
3518                  * But we want to keep the local_ni on the recovery queue
3519                  * so we can continue the attempts to recover it.
3520                  */
3521                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3522                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3523                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3524                 }
3525
3526
3527                 lnet_ni_unlock(ni);
3528
3529                 if (now < ni->ni_next_ping) {
3530                         lnet_net_unlock(0);
3531                         continue;
3532                 }
3533
3534                 lnet_net_unlock(0);
3535
3536                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3537                        libcfs_nidstr(&ni->ni_nid));
3538
3539                 lnet_ni_lock(ni);
3540                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3541                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3542                         lnet_ni_unlock(ni);
3543
3544                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3545                         if (!ev_info) {
3546                                 CERROR("out of memory. Can't recover %s\n",
3547                                        libcfs_nidstr(&ni->ni_nid));
3548                                 lnet_ni_lock(ni);
3549                                 ni->ni_recovery_state &=
3550                                   ~LNET_NI_RECOVERY_PENDING;
3551                                 lnet_ni_unlock(ni);
3552                                 continue;
3553                         }
3554
3555                         mdh = ni->ni_ping_mdh;
3556                         /*
3557                          * Invalidate the ni mdh in case it's deleted.
3558                          * We'll unlink the mdh in this case below.
3559                          */
3560                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3561                         nid = ni->ni_nid;
3562
3563                         /*
3564                          * remove the NI from the local queue and drop the
3565                          * reference count to it while we're recovering
3566                          * it. The reason for that, is that the NI could
3567                          * be deleted, and the way the code is structured
3568                          * is if we don't drop the NI, then the deletion
3569                          * code will enter a loop waiting for the
3570                          * reference count to be removed while holding the
3571                          * ln_mutex_lock(). When we look up the peer to
3572                          * send to in lnet_select_pathway() we will try to
3573                          * lock the ln_mutex_lock() as well, leading to
3574                          * a deadlock. By dropping the refcount and
3575                          * removing it from the list, we allow for the NI
3576                          * to be removed, then we use the cached NID to
3577                          * look it up again. If it's gone, then we just
3578                          * continue examining the rest of the queue.
3579                          */
3580                         lnet_net_lock(0);
3581                         list_del_init(&ni->ni_recovery);
3582                         lnet_ni_decref_locked(ni, 0);
3583                         lnet_net_unlock(0);
3584
3585                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3586                         ev_info->mt_nid = nid;
3587                         rc = lnet_send_ping(&nid, &mdh, LNET_INTERFACES_MIN,
3588                                             ev_info, the_lnet.ln_mt_handler,
3589                                             true);
3590                         /* lookup the nid again */
3591                         lnet_net_lock(0);
3592                         ni = lnet_nid_to_ni_locked(&nid, 0);
3593                         if (!ni) {
3594                                 /*
3595                                  * the NI has been deleted when we dropped
3596                                  * the ref count
3597                                  */
3598                                 lnet_net_unlock(0);
3599                                 LNetMDUnlink(mdh);
3600                                 continue;
3601                         }
3602                         ni->ni_ping_count++;
3603
3604                         ni->ni_ping_mdh = mdh;
3605                         lnet_ni_add_to_recoveryq_locked(ni, &processed_list,
3606                                                         now);
3607
3608                         if (rc) {
3609                                 lnet_ni_lock(ni);
3610                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3611                                 lnet_ni_unlock(ni);
3612                         }
3613                         lnet_net_unlock(0);
3614                 } else
3615                         lnet_ni_unlock(ni);
3616         }
3617
3618         /*
3619          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3620          * reexamined in the next iteration.
3621          */
3622         list_splice_init(&processed_list, &local_queue);
3623         lnet_net_lock(0);
3624         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3625         lnet_net_unlock(0);
3626 }
3627
3628 static int
3629 lnet_resendqs_create(void)
3630 {
3631         struct list_head **resendqs;
3632         resendqs = lnet_create_array_of_queues();
3633
3634         if (!resendqs)
3635                 return -ENOMEM;
3636
3637         lnet_net_lock(LNET_LOCK_EX);
3638         the_lnet.ln_mt_resendqs = resendqs;
3639         lnet_net_unlock(LNET_LOCK_EX);
3640
3641         return 0;
3642 }
3643
3644 static void
3645 lnet_clean_local_ni_recoveryq(void)
3646 {
3647         struct lnet_ni *ni;
3648
3649         /* This is only called when the monitor thread has stopped */
3650         lnet_net_lock(0);
3651
3652         while ((ni = list_first_entry_or_null(&the_lnet.ln_mt_localNIRecovq,
3653                                               struct lnet_ni,
3654                                               ni_recovery)) != NULL) {
3655                 list_del_init(&ni->ni_recovery);
3656                 lnet_ni_lock(ni);
3657                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3658                 lnet_ni_unlock(ni);
3659                 lnet_ni_decref_locked(ni, 0);
3660         }
3661
3662         lnet_net_unlock(0);
3663 }
3664
3665 static void
3666 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3667                                      bool force)
3668 {
3669         struct lnet_handle_md recovery_mdh;
3670
3671         LNetInvalidateMDHandle(&recovery_mdh);
3672
3673         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3674                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3675                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3676         }
3677         spin_unlock(&lpni->lpni_lock);
3678         lnet_net_unlock(cpt);
3679         if (!LNetMDHandleIsInvalid(recovery_mdh))
3680                 LNetMDUnlink(recovery_mdh);
3681         lnet_net_lock(cpt);
3682         spin_lock(&lpni->lpni_lock);
3683 }
3684
3685 static void
3686 lnet_clean_peer_ni_recoveryq(void)
3687 {
3688         struct lnet_peer_ni *lpni, *tmp;
3689
3690         lnet_net_lock(LNET_LOCK_EX);
3691
3692         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3693                                  lpni_recovery) {
3694                 list_del_init(&lpni->lpni_recovery);
3695                 spin_lock(&lpni->lpni_lock);
3696                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3697                 spin_unlock(&lpni->lpni_lock);
3698                 lnet_peer_ni_decref_locked(lpni);
3699         }
3700
3701         lnet_net_unlock(LNET_LOCK_EX);
3702 }
3703
3704 static void
3705 lnet_clean_resendqs(void)
3706 {
3707         struct lnet_msg *msg, *tmp;
3708         LIST_HEAD(msgs);
3709         int i;
3710
3711         cfs_cpt_for_each(i, lnet_cpt_table()) {
3712                 lnet_net_lock(i);
3713                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3714                 lnet_net_unlock(i);
3715                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3716                         list_del_init(&msg->msg_list);
3717                         msg->msg_no_resend = true;
3718                         lnet_finalize(msg, -ESHUTDOWN);
3719                 }
3720         }
3721
3722         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3723 }
3724
3725 static void
3726 lnet_recover_peer_nis(void)
3727 {
3728         struct lnet_mt_event_info *ev_info;
3729         LIST_HEAD(processed_list);
3730         LIST_HEAD(local_queue);
3731         struct lnet_handle_md mdh;
3732         struct lnet_peer_ni *lpni;
3733         struct lnet_peer_ni *tmp;
3734         struct lnet_nid nid;
3735         int healthv;
3736         int rc;
3737         time64_t now;
3738
3739         /*
3740          * Always use cpt 0 for locking across all interactions with
3741          * ln_mt_peerNIRecovq
3742          */
3743         lnet_net_lock(0);
3744         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3745                          &local_queue);
3746         lnet_net_unlock(0);
3747
3748         now = ktime_get_seconds();
3749
3750         list_for_each_entry_safe(lpni, tmp, &local_queue,
3751                                  lpni_recovery) {
3752                 /*
3753                  * The same protection strategy is used here as is in the
3754                  * local recovery case.
3755                  */
3756                 lnet_net_lock(0);
3757                 healthv = atomic_read(&lpni->lpni_healthv);
3758                 spin_lock(&lpni->lpni_lock);
3759                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3760                     healthv == LNET_MAX_HEALTH_VALUE) {
3761                         list_del_init(&lpni->lpni_recovery);
3762                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3763                         spin_unlock(&lpni->lpni_lock);
3764                         lnet_peer_ni_decref_locked(lpni);
3765                         lnet_net_unlock(0);
3766                         continue;
3767                 }
3768
3769                 /*
3770                  * If the peer NI has failed recovery we must unlink the
3771                  * md. But we want to keep the peer ni on the recovery
3772                  * queue so we can try to continue recovering it
3773                  */
3774                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3775                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3776                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3777                 }
3778
3779                 spin_unlock(&lpni->lpni_lock);
3780
3781                 if (now < lpni->lpni_next_ping) {
3782                         lnet_net_unlock(0);
3783                         continue;
3784                 }
3785
3786                 lnet_net_unlock(0);
3787
3788                 /*
3789                  * NOTE: we're racing with peer deletion from user space.
3790                  * It's possible that a peer is deleted after we check its
3791                  * state. In this case the recovery can create a new peer
3792                  */
3793                 spin_lock(&lpni->lpni_lock);
3794                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3795                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3796                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3797                         spin_unlock(&lpni->lpni_lock);
3798
3799                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3800                         if (!ev_info) {
3801                                 CERROR("out of memory. Can't recover %s\n",
3802                                        libcfs_nidstr(&lpni->lpni_nid));
3803                                 spin_lock(&lpni->lpni_lock);
3804                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3805                                 spin_unlock(&lpni->lpni_lock);
3806                                 continue;
3807                         }
3808
3809                         /* look at the comments in lnet_recover_local_nis() */
3810                         mdh = lpni->lpni_recovery_ping_mdh;
3811                         nid = lpni->lpni_nid;
3812                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3813                         lnet_net_lock(0);
3814                         list_del_init(&lpni->lpni_recovery);
3815                         lnet_peer_ni_decref_locked(lpni);
3816                         lnet_net_unlock(0);
3817
3818                         ev_info->mt_type = MT_TYPE_PEER_NI;
3819                         ev_info->mt_nid = nid;
3820                         rc = lnet_send_ping(&nid, &mdh, LNET_INTERFACES_MIN,
3821                                             ev_info, the_lnet.ln_mt_handler,
3822                                             true);
3823                         lnet_net_lock(0);
3824                         /*
3825                          * lnet_peer_ni_find_locked() grabs a refcount for
3826                          * us. No need to take it explicitly.
3827                          */
3828                         lpni = lnet_peer_ni_find_locked(&nid);
3829                         if (!lpni) {
3830                                 lnet_net_unlock(0);
3831                                 LNetMDUnlink(mdh);
3832                                 continue;
3833                         }
3834
3835                         lpni->lpni_ping_count++;
3836
3837                         lpni->lpni_recovery_ping_mdh = mdh;
3838
3839                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
3840                                                              &processed_list,
3841                                                              now);
3842                         if (rc) {
3843                                 spin_lock(&lpni->lpni_lock);
3844                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3845                                 spin_unlock(&lpni->lpni_lock);
3846                         }
3847
3848                         /* Drop the ref taken by lnet_peer_ni_find_locked() */
3849                         lnet_peer_ni_decref_locked(lpni);
3850                         lnet_net_unlock(0);
3851                 } else
3852                         spin_unlock(&lpni->lpni_lock);
3853         }
3854
3855         list_splice_init(&processed_list, &local_queue);
3856         lnet_net_lock(0);
3857         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3858         lnet_net_unlock(0);
3859 }
3860
3861 static int
3862 lnet_monitor_thread(void *arg)
3863 {
3864         time64_t rsp_timeout = 0;
3865         time64_t now;
3866
3867         wait_for_completion(&the_lnet.ln_started);
3868         /*
3869          * The monitor thread takes care of the following:
3870          *  1. Checks the aliveness of routers
3871          *  2. Checks if there are messages on the resend queue to resend
3872          *     them.
3873          *  3. Check if there are any NIs on the local recovery queue and
3874          *     pings them
3875          *  4. Checks if there are any NIs on the remote recovery queue
3876          *     and pings them.
3877          */
3878         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
3879                 now = ktime_get_real_seconds();
3880
3881                 if (lnet_router_checker_active())
3882                         lnet_check_routers();
3883
3884                 lnet_resend_pending_msgs();
3885
3886                 if (now >= rsp_timeout) {
3887                         lnet_finalize_expired_responses();
3888                         rsp_timeout = now + (lnet_transaction_timeout / 2);
3889                 }
3890
3891                 lnet_recover_local_nis();
3892                 lnet_recover_peer_nis();
3893
3894                 /*
3895                  * TODO do we need to check if we should sleep without
3896                  * timeout?  Technically, an active system will always
3897                  * have messages in flight so this check will always
3898                  * evaluate to false. And on an idle system do we care
3899                  * if we wake up every 1 second? Although, we've seen
3900                  * cases where we get a complaint that an idle thread
3901                  * is waking up unnecessarily.
3902                  */
3903                 wait_for_completion_interruptible_timeout(
3904                         &the_lnet.ln_mt_wait_complete,
3905                         cfs_time_seconds(1));
3906                 /* Must re-init the completion before testing anything,
3907                  * including ln_mt_state.
3908                  */
3909                 reinit_completion(&the_lnet.ln_mt_wait_complete);
3910         }
3911
3912         /* Shutting down */
3913         lnet_net_lock(LNET_LOCK_EX);
3914         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3915         lnet_net_unlock(LNET_LOCK_EX);
3916
3917         /* signal that the monitor thread is exiting */
3918         up(&the_lnet.ln_mt_signal);
3919
3920         return 0;
3921 }
3922
3923 /*
3924  * lnet_send_ping
3925  * Sends a ping.
3926  * Returns == 0 if success
3927  * Returns > 0 if LNetMDBind or prior fails
3928  * Returns < 0 if LNetGet fails
3929  */
3930 int
3931 lnet_send_ping(struct lnet_nid *dest_nid,
3932                struct lnet_handle_md *mdh, int nnis,
3933                void *user_data, lnet_handler_t handler, bool recovery)
3934 {
3935         struct lnet_md md = { NULL };
3936         struct lnet_processid id;
3937         struct lnet_ping_buffer *pbuf;
3938         int rc;
3939
3940         if (LNET_NID_IS_ANY(dest_nid)) {
3941                 rc = -EHOSTUNREACH;
3942                 goto fail_error;
3943         }
3944
3945         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
3946         if (!pbuf) {
3947                 rc = ENOMEM;
3948                 goto fail_error;
3949         }
3950
3951         /* initialize md content */
3952         md.start     = &pbuf->pb_info;
3953         md.length    = LNET_PING_INFO_SIZE(nnis);
3954         md.threshold = 2; /* GET/REPLY */
3955         md.max_size  = 0;
3956         md.options   = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE;
3957         md.user_ptr  = user_data;
3958         md.handler   = handler;
3959
3960         rc = LNetMDBind(&md, LNET_UNLINK, mdh);
3961         if (rc) {
3962                 lnet_ping_buffer_decref(pbuf);
3963                 CERROR("Can't bind MD: %d\n", rc);
3964                 rc = -rc; /* change the rc to positive */
3965                 goto fail_error;
3966         }
3967         id.pid = LNET_PID_LUSTRE;
3968         id.nid = *dest_nid;
3969
3970         rc = LNetGet(NULL, *mdh, &id,
3971                      LNET_RESERVED_PORTAL,
3972                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
3973
3974         if (rc)
3975                 goto fail_unlink_md;
3976
3977         return 0;
3978
3979 fail_unlink_md:
3980         LNetMDUnlink(*mdh);
3981         LNetInvalidateMDHandle(mdh);
3982 fail_error:
3983         return rc;
3984 }
3985
3986 static void
3987 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
3988                            int status, bool send, bool unlink_event)
3989 {
3990         struct lnet_nid *nid = &ev_info->mt_nid;
3991
3992         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
3993                 struct lnet_ni *ni;
3994
3995                 lnet_net_lock(0);
3996                 ni = lnet_nid_to_ni_locked(nid, 0);
3997                 if (!ni) {
3998                         lnet_net_unlock(0);
3999                         return;
4000                 }
4001                 lnet_ni_lock(ni);
4002                 if (!send || (send && status != 0))
4003                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
4004                 if (status)
4005                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
4006                 lnet_ni_unlock(ni);
4007                 lnet_net_unlock(0);
4008
4009                 if (status != 0) {
4010                         CERROR("local NI (%s) recovery failed with %d\n",
4011                                libcfs_nidstr(nid), status);
4012                         return;
4013                 }
4014                 /*
4015                  * need to increment healthv for the ni here, because in
4016                  * the lnet_finalize() path we don't have access to this
4017                  * NI. And in order to get access to it, we'll need to
4018                  * carry forward too much information.
4019                  * In the peer case, it'll naturally be incremented
4020                  */
4021                 if (!unlink_event)
4022                         lnet_inc_healthv(&ni->ni_healthv,
4023                                          lnet_health_sensitivity);
4024         } else {
4025                 struct lnet_peer_ni *lpni;
4026                 int cpt;
4027
4028                 cpt = lnet_net_lock_current();
4029                 lpni = lnet_peer_ni_find_locked(nid);
4030                 if (!lpni) {
4031                         lnet_net_unlock(cpt);
4032                         return;
4033                 }
4034                 spin_lock(&lpni->lpni_lock);
4035                 if (!send || (send && status != 0))
4036                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
4037                 if (status)
4038                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
4039                 spin_unlock(&lpni->lpni_lock);
4040                 lnet_peer_ni_decref_locked(lpni);
4041                 lnet_net_unlock(cpt);
4042
4043                 if (status != 0)
4044                         CERROR("peer NI (%s) recovery failed with %d\n",
4045                                libcfs_nidstr(nid), status);
4046         }
4047 }
4048
4049 void
4050 lnet_mt_event_handler(struct lnet_event *event)
4051 {
4052         struct lnet_mt_event_info *ev_info = event->md_user_ptr;
4053         struct lnet_ping_buffer *pbuf;
4054
4055         /* TODO: remove assert */
4056         LASSERT(event->type == LNET_EVENT_REPLY ||
4057                 event->type == LNET_EVENT_SEND ||
4058                 event->type == LNET_EVENT_UNLINK);
4059
4060         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
4061                event->status);
4062
4063         switch (event->type) {
4064         case LNET_EVENT_UNLINK:
4065                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
4066                        libcfs_nidstr(&ev_info->mt_nid));
4067                 fallthrough;
4068         case LNET_EVENT_REPLY:
4069                 lnet_handle_recovery_reply(ev_info, event->status, false,
4070                                            event->type == LNET_EVENT_UNLINK);
4071                 break;
4072         case LNET_EVENT_SEND:
4073                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
4074                                libcfs_nidstr(&ev_info->mt_nid),
4075                                (event->status) ? "unsuccessfully" :
4076                                "successfully", event->status);
4077                 lnet_handle_recovery_reply(ev_info, event->status, true, false);
4078                 break;
4079         default:
4080                 CERROR("Unexpected event: %d\n", event->type);
4081                 break;
4082         }
4083         if (event->unlinked) {
4084                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
4085                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
4086                 lnet_ping_buffer_decref(pbuf);
4087         }
4088 }
4089
4090 static int
4091 lnet_rsp_tracker_create(void)
4092 {
4093         struct list_head **rstqs;
4094         rstqs = lnet_create_array_of_queues();
4095
4096         if (!rstqs)
4097                 return -ENOMEM;
4098
4099         the_lnet.ln_mt_rstq = rstqs;
4100
4101         return 0;
4102 }
4103
4104 static void
4105 lnet_rsp_tracker_clean(void)
4106 {
4107         lnet_finalize_expired_responses();
4108
4109         cfs_percpt_free(the_lnet.ln_mt_rstq);
4110         the_lnet.ln_mt_rstq = NULL;
4111 }
4112
4113 int lnet_monitor_thr_start(void)
4114 {
4115         int rc = 0;
4116         struct task_struct *task;
4117
4118         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
4119                 return -EALREADY;
4120
4121         rc = lnet_resendqs_create();
4122         if (rc)
4123                 return rc;
4124
4125         rc = lnet_rsp_tracker_create();
4126         if (rc)
4127                 goto clean_queues;
4128
4129         sema_init(&the_lnet.ln_mt_signal, 0);
4130
4131         lnet_net_lock(LNET_LOCK_EX);
4132         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
4133         lnet_net_unlock(LNET_LOCK_EX);
4134         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
4135         if (IS_ERR(task)) {
4136                 rc = PTR_ERR(task);
4137                 CERROR("Can't start monitor thread: %d\n", rc);
4138                 goto clean_thread;
4139         }
4140
4141         return 0;
4142
4143 clean_thread:
4144         lnet_net_lock(LNET_LOCK_EX);
4145         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4146         lnet_net_unlock(LNET_LOCK_EX);
4147         /* block until event callback signals exit */
4148         down(&the_lnet.ln_mt_signal);
4149         /* clean up */
4150         lnet_net_lock(LNET_LOCK_EX);
4151         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
4152         lnet_net_unlock(LNET_LOCK_EX);
4153         lnet_rsp_tracker_clean();
4154         lnet_clean_local_ni_recoveryq();
4155         lnet_clean_peer_ni_recoveryq();
4156         lnet_clean_resendqs();
4157         the_lnet.ln_mt_handler = NULL;
4158         return rc;
4159 clean_queues:
4160         lnet_rsp_tracker_clean();
4161         lnet_clean_local_ni_recoveryq();
4162         lnet_clean_peer_ni_recoveryq();
4163         lnet_clean_resendqs();
4164         return rc;
4165 }
4166
4167 void lnet_monitor_thr_stop(void)
4168 {
4169         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
4170                 return;
4171
4172         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
4173         lnet_net_lock(LNET_LOCK_EX);
4174         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4175         lnet_net_unlock(LNET_LOCK_EX);
4176
4177         /* tell the monitor thread that we're shutting down */
4178         complete(&the_lnet.ln_mt_wait_complete);
4179
4180         /* block until monitor thread signals that it's done */
4181         mutex_unlock(&the_lnet.ln_api_mutex);
4182         down(&the_lnet.ln_mt_signal);
4183         mutex_lock(&the_lnet.ln_api_mutex);
4184         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
4185
4186         /* perform cleanup tasks */
4187         lnet_rsp_tracker_clean();
4188         lnet_clean_local_ni_recoveryq();
4189         lnet_clean_peer_ni_recoveryq();
4190         lnet_clean_resendqs();
4191 }
4192
4193 void
4194 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
4195                   __u32 msg_type)
4196 {
4197         lnet_net_lock(cpt);
4198         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
4199         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4200         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
4201         lnet_net_unlock(cpt);
4202
4203         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
4204 }
4205
4206 static void
4207 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
4208 {
4209         struct lnet_hdr *hdr = &msg->msg_hdr;
4210
4211         if (msg->msg_wanted != 0)
4212                 lnet_setpayloadbuffer(msg);
4213
4214         lnet_build_msg_event(msg, LNET_EVENT_PUT);
4215
4216         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
4217          * it back into the ACK during lnet_finalize() */
4218         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
4219                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
4220
4221         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
4222                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
4223 }
4224
4225 static int
4226 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
4227 {
4228         struct lnet_hdr         *hdr = &msg->msg_hdr;
4229         struct lnet_match_info  info;
4230         int                     rc;
4231         bool                    ready_delay;
4232
4233         /* Convert put fields to host byte order */
4234         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
4235         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
4236         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
4237
4238         /* Primary peer NID. */
4239         info.mi_id.nid = msg->msg_initiator;
4240         info.mi_id.pid  = hdr->src_pid;
4241         info.mi_opc     = LNET_MD_OP_PUT;
4242         info.mi_portal  = hdr->msg.put.ptl_index;
4243         info.mi_rlength = hdr->payload_length;
4244         info.mi_roffset = hdr->msg.put.offset;
4245         info.mi_mbits   = hdr->msg.put.match_bits;
4246         info.mi_cpt     = lnet_nid2cpt(&msg->msg_initiator, ni);
4247
4248         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
4249         ready_delay = msg->msg_rx_ready_delay;
4250
4251  again:
4252         rc = lnet_ptl_match_md(&info, msg);
4253         switch (rc) {
4254         default:
4255                 LBUG();
4256
4257         case LNET_MATCHMD_OK:
4258                 lnet_recv_put(ni, msg);
4259                 return 0;
4260
4261         case LNET_MATCHMD_NONE:
4262                 if (ready_delay)
4263                         /* no eager_recv or has already called it, should
4264                          * have been attached on delayed list */
4265                         return 0;
4266
4267                 rc = lnet_ni_eager_recv(ni, msg);
4268                 if (rc == 0) {
4269                         ready_delay = true;
4270                         goto again;
4271                 }
4272                 fallthrough;
4273
4274         case LNET_MATCHMD_DROP:
4275                 CNETERR("Dropping PUT from %s portal %d match %llu"
4276                         " offset %d length %d: %d\n",
4277                         libcfs_idstr(&info.mi_id), info.mi_portal,
4278                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
4279
4280                 return -ENOENT; /* -ve: OK but no match */
4281         }
4282 }
4283
4284 static int
4285 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
4286 {
4287         struct lnet_match_info info;
4288         struct lnet_hdr *hdr = &msg->msg_hdr;
4289         struct lnet_processid source_id;
4290         struct lnet_handle_wire reply_wmd;
4291         int rc;
4292
4293         /* Convert get fields to host byte order */
4294         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
4295         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
4296         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
4297         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
4298
4299         source_id.nid = hdr->src_nid;
4300         source_id.pid = hdr->src_pid;
4301         /* Primary peer NID */
4302         info.mi_id.nid  = msg->msg_initiator;
4303         info.mi_id.pid  = hdr->src_pid;
4304         info.mi_opc     = LNET_MD_OP_GET;
4305         info.mi_portal  = hdr->msg.get.ptl_index;
4306         info.mi_rlength = hdr->msg.get.sink_length;
4307         info.mi_roffset = hdr->msg.get.src_offset;
4308         info.mi_mbits   = hdr->msg.get.match_bits;
4309         info.mi_cpt     = lnet_nid2cpt(&msg->msg_initiator, ni);
4310
4311         rc = lnet_ptl_match_md(&info, msg);
4312         if (rc == LNET_MATCHMD_DROP) {
4313                 CNETERR("Dropping GET from %s portal %d match %llu"
4314                         " offset %d length %d\n",
4315                         libcfs_idstr(&info.mi_id), info.mi_portal,
4316                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
4317                 return -ENOENT; /* -ve: OK but no match */
4318         }
4319
4320         LASSERT(rc == LNET_MATCHMD_OK);
4321
4322         lnet_build_msg_event(msg, LNET_EVENT_GET);
4323
4324         reply_wmd = hdr->msg.get.return_wmd;
4325
4326         lnet_prep_send(msg, LNET_MSG_REPLY, &source_id,
4327                        msg->msg_offset, msg->msg_wanted);
4328
4329         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
4330
4331         if (rdma_get) {
4332                 /* The LND completes the REPLY from her recv procedure */
4333                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
4334                              msg->msg_offset, msg->msg_len, msg->msg_len);
4335                 return 0;
4336         }
4337
4338         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
4339         msg->msg_receiving = 0;
4340
4341         rc = lnet_send(&ni->ni_nid, msg, &msg->msg_from);
4342         if (rc < 0) {
4343                 /* didn't get as far as lnet_ni_send() */
4344                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
4345                        libcfs_nidstr(&ni->ni_nid),
4346                        libcfs_idstr(&info.mi_id), rc);
4347
4348                 lnet_finalize(msg, rc);
4349         }
4350
4351         return 0;
4352 }
4353
4354 static int
4355 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
4356 {
4357         void *private = msg->msg_private;
4358         struct lnet_hdr *hdr = &msg->msg_hdr;
4359         struct lnet_processid src = {};
4360         struct lnet_libmd *md;
4361         unsigned int rlength;
4362         unsigned int mlength;
4363         int cpt;
4364
4365         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4366         lnet_res_lock(cpt);
4367
4368         src.nid = hdr->src_nid;
4369         src.pid = hdr->src_pid;
4370
4371         /* NB handles only looked up by creator (no flips) */
4372         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4373         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4374                 CNETERR("%s: Dropping REPLY from %s for %s "
4375                         "MD %#llx.%#llx\n",
4376                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4377                         (md == NULL) ? "invalid" : "inactive",
4378                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4379                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4380                 if (md != NULL && md->md_me != NULL)
4381                         CERROR("REPLY MD also attached to portal %d\n",
4382                                md->md_me->me_portal);
4383
4384                 lnet_res_unlock(cpt);
4385                 return -ENOENT; /* -ve: OK but no match */
4386         }
4387
4388         LASSERT(md->md_offset == 0);
4389
4390         rlength = hdr->payload_length;
4391         mlength = min(rlength, md->md_length);
4392
4393         if (mlength < rlength &&
4394             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4395                 CNETERR("%s: Dropping REPLY from %s length %d "
4396                         "for MD %#llx would overflow (%d)\n",
4397                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4398                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4399                         mlength);
4400                 lnet_res_unlock(cpt);
4401                 return -ENOENT; /* -ve: OK but no match */
4402         }
4403
4404         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4405                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4406                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4407
4408         lnet_msg_attach_md(msg, md, 0, mlength);
4409
4410         if (mlength != 0)
4411                 lnet_setpayloadbuffer(msg);
4412
4413         lnet_res_unlock(cpt);
4414
4415         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4416
4417         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4418         return 0;
4419 }
4420
4421 static int
4422 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4423 {
4424         struct lnet_hdr *hdr = &msg->msg_hdr;
4425         struct lnet_processid src = {};
4426         struct lnet_libmd *md;
4427         int cpt;
4428
4429         src.nid = hdr->src_nid;
4430         src.pid = hdr->src_pid;
4431
4432         /* Convert ack fields to host byte order */
4433         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4434         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4435
4436         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4437         lnet_res_lock(cpt);
4438
4439         /* NB handles only looked up by creator (no flips) */
4440         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4441         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4442                 /* Don't moan; this is expected */
4443                 CDEBUG(D_NET,
4444                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4445                        libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4446                        (md == NULL) ? "invalid" : "inactive",
4447                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4448                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4449                 if (md != NULL && md->md_me != NULL)
4450                         CERROR("Source MD also attached to portal %d\n",
4451                                md->md_me->me_portal);
4452
4453                 lnet_res_unlock(cpt);
4454                 return -ENOENT;                  /* -ve! */
4455         }
4456
4457         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4458                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4459                hdr->msg.ack.dst_wmd.wh_object_cookie);
4460
4461         lnet_msg_attach_md(msg, md, 0, 0);
4462
4463         lnet_res_unlock(cpt);
4464
4465         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4466
4467         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4468         return 0;
4469 }
4470
4471 /**
4472  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4473  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4474  * \retval -ve                  error code
4475  */
4476 int
4477 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4478 {
4479         int     rc = 0;
4480
4481         if (!the_lnet.ln_routing)
4482                 return -ECANCELED;
4483
4484         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4485             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4486                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4487                         msg->msg_rx_ready_delay = 1;
4488                 } else {
4489                         lnet_net_unlock(msg->msg_rx_cpt);
4490                         rc = lnet_ni_eager_recv(ni, msg);
4491                         lnet_net_lock(msg->msg_rx_cpt);
4492                 }
4493         }
4494
4495         if (rc == 0)
4496                 rc = lnet_post_routed_recv_locked(msg, 0);
4497         return rc;
4498 }
4499
4500 int
4501 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4502 {
4503         int     rc;
4504
4505         switch (msg->msg_type) {
4506         case LNET_MSG_ACK:
4507                 rc = lnet_parse_ack(ni, msg);
4508                 break;
4509         case LNET_MSG_PUT:
4510                 rc = lnet_parse_put(ni, msg);
4511                 break;
4512         case LNET_MSG_GET:
4513                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4514                 break;
4515         case LNET_MSG_REPLY:
4516                 rc = lnet_parse_reply(ni, msg);
4517                 break;
4518         default: /* prevent an unused label if !kernel */
4519                 LASSERT(0);
4520                 return -EPROTO;
4521         }
4522
4523         LASSERT(rc == 0 || rc == -ENOENT);
4524         return rc;
4525 }
4526
4527 char *
4528 lnet_msgtyp2str (int type)
4529 {
4530         switch (type) {
4531         case LNET_MSG_ACK:
4532                 return ("ACK");
4533         case LNET_MSG_PUT:
4534                 return ("PUT");
4535         case LNET_MSG_GET:
4536                 return ("GET");
4537         case LNET_MSG_REPLY:
4538                 return ("REPLY");
4539         case LNET_MSG_HELLO:
4540                 return ("HELLO");
4541         default:
4542                 return ("<UNKNOWN>");
4543         }
4544 }
4545 EXPORT_SYMBOL(lnet_msgtyp2str);
4546
4547 int
4548 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr,
4549            struct lnet_nid *from_nid, void *private, int rdma_req)
4550 {
4551         struct lnet_peer_ni *lpni;
4552         struct lnet_msg *msg;
4553         __u32 payload_length;
4554         lnet_pid_t dest_pid;
4555         struct lnet_nid dest_nid;
4556         struct lnet_nid src_nid;
4557         bool push = false;
4558         int for_me;
4559         __u32 type;
4560         int rc = 0;
4561         int cpt;
4562         time64_t now = ktime_get_seconds();
4563
4564         LASSERT (!in_interrupt ());
4565
4566         type = hdr->type;
4567         src_nid = hdr->src_nid;
4568         dest_nid = hdr->dest_nid;
4569         dest_pid = hdr->dest_pid;
4570         payload_length = hdr->payload_length;
4571
4572         for_me = nid_same(&ni->ni_nid, &dest_nid);
4573         cpt = lnet_nid2cpt(from_nid, ni);
4574
4575         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4576                 libcfs_nidstr(&dest_nid),
4577                 libcfs_nidstr(&ni->ni_nid),
4578                 libcfs_nidstr(&src_nid),
4579                 lnet_msgtyp2str(type),
4580                 (for_me) ? "for me" : "routed");
4581
4582         switch (type) {
4583         case LNET_MSG_ACK:
4584         case LNET_MSG_GET:
4585                 if (payload_length > 0) {
4586                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4587                                libcfs_nidstr(from_nid),
4588                                libcfs_nidstr(&src_nid),
4589                                lnet_msgtyp2str(type), payload_length);
4590                         return -EPROTO;
4591                 }
4592                 break;
4593
4594         case LNET_MSG_PUT:
4595         case LNET_MSG_REPLY:
4596                 if (payload_length >
4597                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4598                         CERROR("%s, src %s: bad %s payload %d "
4599                                "(%d max expected)\n",
4600                                libcfs_nidstr(from_nid),
4601                                libcfs_nidstr(&src_nid),
4602                                lnet_msgtyp2str(type),
4603                                payload_length,
4604                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4605                         return -EPROTO;
4606                 }
4607                 break;
4608
4609         default:
4610                 CERROR("%s, src %s: Bad message type 0x%x\n",
4611                        libcfs_nidstr(from_nid),
4612                        libcfs_nidstr(&src_nid), type);
4613                 return -EPROTO;
4614         }
4615
4616         /* Only update net_last_alive for incoming GETs on the reserved portal
4617          * (i.e. incoming lnet/discovery pings).
4618          * This avoids situations where the router's own traffic results in NI
4619          * status changes
4620          */
4621         if (the_lnet.ln_routing && type == LNET_MSG_GET &&
4622             hdr->msg.get.ptl_index == LNET_RESERVED_PORTAL &&
4623             !lnet_islocalnid(&src_nid) &&
4624             ni->ni_net->net_last_alive != now) {
4625                 lnet_ni_lock(ni);
4626                 spin_lock(&ni->ni_net->net_lock);
4627                 ni->ni_net->net_last_alive = now;
4628                 spin_unlock(&ni->ni_net->net_lock);
4629                 push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP);
4630                 lnet_ni_unlock(ni);
4631         }
4632
4633         if (push)
4634                 lnet_push_update_to_peers(1);
4635
4636         /* Regard a bad destination NID as a protocol error.  Senders should
4637          * know what they're doing; if they don't they're misconfigured, buggy
4638          * or malicious so we chop them off at the knees :) */
4639
4640         if (!for_me) {
4641                 if (LNET_NID_NET(&dest_nid) == LNET_NID_NET(&ni->ni_nid)) {
4642                         /* should have gone direct */
4643                         CERROR("%s, src %s: Bad dest nid %s "
4644                                "(should have been sent direct)\n",
4645                                 libcfs_nidstr(from_nid),
4646                                 libcfs_nidstr(&src_nid),
4647                                 libcfs_nidstr(&dest_nid));
4648                         return -EPROTO;
4649                 }
4650
4651                 if (lnet_islocalnid(&dest_nid)) {
4652                         /* dest is another local NI; sender should have used
4653                          * this node's NID on its own network */
4654                         CERROR("%s, src %s: Bad dest nid %s "
4655                                "(it's my nid but on a different network)\n",
4656                                 libcfs_nidstr(from_nid),
4657                                 libcfs_nidstr(&src_nid),
4658                                 libcfs_nidstr(&dest_nid));
4659                         return -EPROTO;
4660                 }
4661
4662                 if (rdma_req && type == LNET_MSG_GET) {
4663                         CERROR("%s, src %s: Bad optimized GET for %s "
4664                                "(final destination must be me)\n",
4665                                 libcfs_nidstr(from_nid),
4666                                 libcfs_nidstr(&src_nid),
4667                                 libcfs_nidstr(&dest_nid));
4668                         return -EPROTO;
4669                 }
4670
4671                 if (!the_lnet.ln_routing) {
4672                         CERROR("%s, src %s: Dropping message for %s "
4673                                "(routing not enabled)\n",
4674                                 libcfs_nidstr(from_nid),
4675                                 libcfs_nidstr(&src_nid),
4676                                 libcfs_nidstr(&dest_nid));
4677                         goto drop;
4678                 }
4679         }
4680
4681         /* Message looks OK; we're not going to return an error, so we MUST
4682          * call back lnd_recv() come what may... */
4683
4684         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4685             fail_peer(&src_nid, 0)) {                   /* shall we now? */
4686                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4687                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4688                        lnet_msgtyp2str(type));
4689                 goto drop;
4690         }
4691
4692         if (!list_empty(&the_lnet.ln_drop_rules) &&
4693             lnet_drop_rule_match(hdr, &ni->ni_nid, NULL)) {
4694                 CDEBUG(D_NET,
4695                        "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
4696                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4697                        libcfs_nidstr(&dest_nid), lnet_msgtyp2str(type));
4698                 goto drop;
4699         }
4700
4701         msg = lnet_msg_alloc();
4702         if (msg == NULL) {
4703                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4704                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4705                        lnet_msgtyp2str(type));
4706                 goto drop;
4707         }
4708
4709         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4710          * pointers NULL etc */
4711
4712         msg->msg_type = type;
4713         msg->msg_private = private;
4714         msg->msg_receiving = 1;
4715         msg->msg_rdma_get = rdma_req;
4716         msg->msg_len = msg->msg_wanted = payload_length;
4717         msg->msg_offset = 0;
4718         msg->msg_hdr = *hdr;
4719         /* for building message event */
4720         msg->msg_from = *from_nid;
4721         if (!for_me) {
4722                 msg->msg_target.pid = dest_pid;
4723                 msg->msg_target.nid = dest_nid;
4724                 msg->msg_routing = 1;
4725         }
4726
4727         lnet_net_lock(cpt);
4728         lpni = lnet_peerni_by_nid_locked(from_nid, &ni->ni_nid, cpt);
4729         if (IS_ERR(lpni)) {
4730                 lnet_net_unlock(cpt);
4731                 rc = PTR_ERR(lpni);
4732                 CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
4733                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4734                        lnet_msgtyp2str(type), rc);
4735                 lnet_msg_free(msg);
4736                 if (rc == -ESHUTDOWN)
4737                         /* We are shutting down.  Don't do anything more */
4738                         return rc;
4739                 goto drop;
4740         }
4741
4742         /* If this message was forwarded to us from a router then we may need
4743          * to update router aliveness or check for an asymmetrical route
4744          * (or both)
4745          */
4746         if (((lnet_drop_asym_route && for_me) ||
4747              !lpni->lpni_peer_net->lpn_peer->lp_alive) &&
4748             LNET_NID_NET(&src_nid) != LNET_NID_NET(from_nid)) {
4749                 __u32 src_net_id = LNET_NID_NET(&src_nid);
4750                 struct lnet_peer *gw = lpni->lpni_peer_net->lpn_peer;
4751                 struct lnet_route *route;
4752                 bool found = false;
4753
4754                 list_for_each_entry(route, &gw->lp_routes, lr_gwlist) {
4755                         if (route->lr_net == src_net_id) {
4756                                 found = true;
4757                                 /* If we're transitioning the gateway from
4758                                  * dead -> alive, and discovery is disabled
4759                                  * locally or on the gateway, then we need to
4760                                  * update the cached route aliveness for each
4761                                  * route to the src_nid's net.
4762                                  *
4763                                  * Otherwise, we're only checking for
4764                                  * symmetrical route, and we can break the
4765                                  * loop
4766                                  */
4767                                 if (!gw->lp_alive &&
4768                                     lnet_is_discovery_disabled(gw))
4769                                         lnet_set_route_aliveness(route, true);
4770                                 else
4771                                         break;
4772                         }
4773                 }
4774                 if (lnet_drop_asym_route && for_me && !found) {
4775                         /* Drop ref taken by lnet_nid2peerni_locked() */
4776                         lnet_peer_ni_decref_locked(lpni);
4777                         lnet_net_unlock(cpt);
4778                         /* we would not use from_nid to route a message to
4779                          * src_nid
4780                          * => asymmetric routing detected but forbidden
4781                          */
4782                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4783                                libcfs_nidstr(from_nid),
4784                                libcfs_nidstr(&src_nid), lnet_msgtyp2str(type));
4785                         lnet_msg_free(msg);
4786                         goto drop;
4787                 }
4788                 if (!gw->lp_alive) {
4789                         struct lnet_peer_net *lpn;
4790                         struct lnet_peer_ni *lpni2;
4791
4792                         gw->lp_alive = true;
4793                         /* Mark all remote NIs on src_nid's net UP */
4794                         lpn = lnet_peer_get_net_locked(gw, src_net_id);
4795                         if (lpn)
4796                                 list_for_each_entry(lpni2, &lpn->lpn_peer_nis,
4797                                                     lpni_peer_nis)
4798                                         lpni2->lpni_ns_status = LNET_NI_STATUS_UP;
4799                 }
4800         }
4801
4802         lpni->lpni_last_alive = now;
4803
4804         msg->msg_rxpeer = lpni;
4805         msg->msg_rxni = ni;
4806         lnet_ni_addref_locked(ni, cpt);
4807         /* Multi-Rail: Primary NID of source. */
4808         lnet_peer_primary_nid_locked(&src_nid, &msg->msg_initiator);
4809
4810         /*
4811          * mark the status of this lpni as UP since we received a message
4812          * from it. The ping response reports back the ns_status which is
4813          * marked on the remote as up or down and we cache it here.
4814          */
4815         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
4816
4817         lnet_msg_commit(msg, cpt);
4818
4819         /* message delay simulation */
4820         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
4821                      lnet_delay_rule_match_locked(hdr, msg))) {
4822                 lnet_net_unlock(cpt);
4823                 return 0;
4824         }
4825
4826         if (!for_me) {
4827                 rc = lnet_parse_forward_locked(ni, msg);
4828                 lnet_net_unlock(cpt);
4829
4830                 if (rc < 0)
4831                         goto free_drop;
4832
4833                 if (rc == LNET_CREDIT_OK) {
4834                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
4835                                      0, payload_length, payload_length);
4836                 }
4837                 return 0;
4838         }
4839
4840         lnet_net_unlock(cpt);
4841
4842         rc = lnet_parse_local(ni, msg);
4843         if (rc != 0)
4844                 goto free_drop;
4845         return 0;
4846
4847  free_drop:
4848         LASSERT(msg->msg_md == NULL);
4849         lnet_finalize(msg, rc);
4850
4851  drop:
4852         lnet_drop_message(ni, cpt, private, payload_length, type);
4853         return 0;
4854 }
4855 EXPORT_SYMBOL(lnet_parse);
4856
4857 void
4858 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
4859 {
4860         struct lnet_msg *msg;
4861
4862         while ((msg = list_first_entry_or_null(head, struct lnet_msg,
4863                                                msg_list)) != NULL) {
4864                 struct lnet_processid id = {};
4865
4866                 list_del(&msg->msg_list);
4867
4868                 id.nid = msg->msg_hdr.src_nid;
4869                 id.pid = msg->msg_hdr.src_pid;
4870
4871                 LASSERT(msg->msg_md == NULL);
4872                 LASSERT(msg->msg_rx_delayed);
4873                 LASSERT(msg->msg_rxpeer != NULL);
4874                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4875
4876                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
4877                       " offset %d length %d: %s\n",
4878                       libcfs_idstr(&id),
4879                       msg->msg_hdr.msg.put.ptl_index,
4880                       msg->msg_hdr.msg.put.match_bits,
4881                       msg->msg_hdr.msg.put.offset,
4882                       msg->msg_hdr.payload_length, reason);
4883
4884                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
4885                  * called lnet_drop_message(), so I just hang onto msg as well
4886                  * until that's done */
4887
4888                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
4889                                   msg->msg_private, msg->msg_len,
4890                                   msg->msg_type);
4891
4892                 msg->msg_no_resend = true;
4893                 /*
4894                  * NB: message will not generate event because w/o attached MD,
4895                  * but we still should give error code so lnet_msg_decommit()
4896                  * can skip counters operations and other checks.
4897                  */
4898                 lnet_finalize(msg, -ENOENT);
4899         }
4900 }
4901
4902 void
4903 lnet_recv_delayed_msg_list(struct list_head *head)
4904 {
4905         struct lnet_msg *msg;
4906
4907         while ((msg = list_first_entry_or_null(head, struct lnet_msg,
4908                                                msg_list)) != NULL) {
4909                 struct lnet_processid id;
4910
4911                 list_del(&msg->msg_list);
4912
4913                 /* md won't disappear under me, since each msg
4914                  * holds a ref on it */
4915
4916                 id.nid = msg->msg_hdr.src_nid;
4917                 id.pid = msg->msg_hdr.src_pid;
4918
4919                 LASSERT(msg->msg_rx_delayed);
4920                 LASSERT(msg->msg_md != NULL);
4921                 LASSERT(msg->msg_rxpeer != NULL);
4922                 LASSERT(msg->msg_rxni != NULL);
4923                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4924
4925                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
4926                        "match %llu offset %d length %d.\n",
4927                         libcfs_idstr(&id), msg->msg_hdr.msg.put.ptl_index,
4928                         msg->msg_hdr.msg.put.match_bits,
4929                         msg->msg_hdr.msg.put.offset,
4930                         msg->msg_hdr.payload_length);
4931
4932                 lnet_recv_put(msg->msg_rxni, msg);
4933         }
4934 }
4935
4936 static void
4937 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
4938                         struct lnet_libmd *md, struct lnet_handle_md mdh)
4939 {
4940         s64 timeout_ns;
4941         struct lnet_rsp_tracker *local_rspt;
4942
4943         /*
4944          * MD has a refcount taken by message so it's not going away.
4945          * The MD however can be looked up. We need to secure the access
4946          * to the md_rspt_ptr by taking the res_lock.
4947          * The rspt can be accessed without protection up to when it gets
4948          * added to the list.
4949          */
4950
4951         lnet_res_lock(cpt);
4952         local_rspt = md->md_rspt_ptr;
4953         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
4954         if (local_rspt != NULL) {
4955                 /*
4956                  * we already have an rspt attached to the md, so we'll
4957                  * update the deadline on that one.
4958                  */
4959                 lnet_rspt_free(rspt, cpt);
4960         } else {
4961                 /* new md */
4962                 rspt->rspt_mdh = mdh;
4963                 rspt->rspt_cpt = cpt;
4964                 /* store the rspt so we can access it when we get the REPLY */
4965                 md->md_rspt_ptr = rspt;
4966                 local_rspt = rspt;
4967         }
4968         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
4969
4970         /*
4971          * add to the list of tracked responses. It's added to tail of the
4972          * list in order to expire all the older entries first.
4973          */
4974         lnet_net_lock(cpt);
4975         list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
4976         lnet_net_unlock(cpt);
4977         lnet_res_unlock(cpt);
4978 }
4979
4980 /**
4981  * Initiate an asynchronous PUT operation.
4982  *
4983  * There are several events associated with a PUT: completion of the send on
4984  * the initiator node (LNET_EVENT_SEND), and when the send completes
4985  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
4986  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
4987  * used at the target node to indicate the completion of incoming data
4988  * delivery.
4989  *
4990  * The local events will be logged in the EQ associated with the MD pointed to
4991  * by \a mdh handle. Using a MD without an associated EQ results in these
4992  * events being discarded. In this case, the caller must have another
4993  * mechanism (e.g., a higher level protocol) for determining when it is safe
4994  * to modify the memory region associated with the MD.
4995  *
4996  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
4997  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
4998  *
4999  * \param self Indicates the NID of a local interface through which to send
5000  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
5001  * \param mdh A handle for the MD that describes the memory to be sent. The MD
5002  * must be "free floating" (See LNetMDBind()).
5003  * \param ack Controls whether an acknowledgment is requested.
5004  * Acknowledgments are only sent when they are requested by the initiating
5005  * process and the target MD enables them.
5006  * \param target A process identifier for the target process.
5007  * \param portal The index in the \a target's portal table.
5008  * \param match_bits The match bits to use for MD selection at the target
5009  * process.
5010  * \param offset The offset into the target MD (only used when the target
5011  * MD has the LNET_MD_MANAGE_REMOTE option set).
5012  * \param hdr_data 64 bits of user data that can be included in the message
5013  * header. This data is written to an event queue entry at the target if an
5014  * EQ is present on the matching MD.
5015  *
5016  * \retval  0      Success, and only in this case events will be generated
5017  * and logged to EQ (if it exists).
5018  * \retval -EIO    Simulated failure.
5019  * \retval -ENOMEM Memory allocation failure.
5020  * \retval -ENOENT Invalid MD object.
5021  *
5022  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
5023  */
5024 int
5025 LNetPut(struct lnet_nid *self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
5026         struct lnet_processid *target, unsigned int portal,
5027         __u64 match_bits, unsigned int offset,
5028         __u64 hdr_data)
5029 {
5030         struct lnet_msg *msg;
5031         struct lnet_libmd *md;
5032         int cpt;
5033         int rc;
5034         struct lnet_rsp_tracker *rspt = NULL;
5035
5036         LASSERT(the_lnet.ln_refcount > 0);
5037
5038         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5039             fail_peer(&target->nid, 1)) {               /* shall we now? */
5040                 CERROR("Dropping PUT to %s: simulated failure\n",
5041                        libcfs_idstr(target));
5042                 return -EIO;
5043         }
5044
5045         msg = lnet_msg_alloc();
5046         if (msg == NULL) {
5047                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
5048                        libcfs_idstr(target));
5049                 return -ENOMEM;
5050         }
5051         msg->msg_vmflush = !!(current->flags & PF_MEMALLOC);
5052
5053         cpt = lnet_cpt_of_cookie(mdh.cookie);
5054
5055         if (ack == LNET_ACK_REQ) {
5056                 rspt = lnet_rspt_alloc(cpt);
5057                 if (!rspt) {
5058                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
5059                                 libcfs_idstr(target));
5060                         return -ENOMEM;
5061                 }
5062                 INIT_LIST_HEAD(&rspt->rspt_on_list);
5063         }
5064
5065         lnet_res_lock(cpt);
5066
5067         md = lnet_handle2md(&mdh);
5068         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5069                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
5070                        match_bits, portal, libcfs_idstr(target),
5071                        md == NULL ? -1 : md->md_threshold);
5072                 if (md != NULL && md->md_me != NULL)
5073                         CERROR("Source MD also attached to portal %d\n",
5074                                md->md_me->me_portal);
5075                 lnet_res_unlock(cpt);
5076
5077                 if (rspt)
5078                         lnet_rspt_free(rspt, cpt);
5079
5080                 lnet_msg_free(msg);
5081                 return -ENOENT;
5082         }
5083
5084         CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_idstr(target));
5085
5086         lnet_msg_attach_md(msg, md, 0, 0);
5087
5088         lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
5089
5090         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
5091         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
5092         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
5093         msg->msg_hdr.msg.put.hdr_data = hdr_data;
5094
5095         /* NB handles only looked up by creator (no flips) */
5096         if (ack == LNET_ACK_REQ) {
5097                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5098                         the_lnet.ln_interface_cookie;
5099                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5100                         md->md_lh.lh_cookie;
5101         } else {
5102                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5103                         LNET_WIRE_HANDLE_COOKIE_NONE;
5104                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5105                         LNET_WIRE_HANDLE_COOKIE_NONE;
5106         }
5107
5108         lnet_res_unlock(cpt);
5109
5110         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5111
5112         if (rspt && lnet_response_tracking_enabled(LNET_MSG_PUT,
5113                                                    md->md_options))
5114                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5115         else if (rspt)
5116                 lnet_rspt_free(rspt, cpt);
5117
5118         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
5119                                  CFS_FAIL_ONCE))
5120                 rc = -EIO;
5121         else
5122                 rc = lnet_send(self, msg, NULL);
5123
5124         if (rc != 0) {
5125                 CNETERR("Error sending PUT to %s: %d\n",
5126                         libcfs_idstr(target), rc);
5127                 msg->msg_no_resend = true;
5128                 lnet_finalize(msg, rc);
5129         }
5130
5131         /* completion will be signalled by an event */
5132         return 0;
5133 }
5134 EXPORT_SYMBOL(LNetPut);
5135
5136 /*
5137  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
5138  * returns a msg for the LND to pass to lnet_finalize() when the sink
5139  * data has been received.
5140  *
5141  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
5142  * lnet_finalize() is called on it, so the LND must call this first
5143  */
5144 struct lnet_msg *
5145 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
5146 {
5147         struct lnet_msg *msg = lnet_msg_alloc();
5148         struct lnet_libmd *getmd = getmsg->msg_md;
5149         struct lnet_processid *peer_id = &getmsg->msg_target;
5150         int cpt;
5151
5152         LASSERT(!getmsg->msg_target_is_router);
5153         LASSERT(!getmsg->msg_routing);
5154
5155         if (msg == NULL) {
5156                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
5157                        libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id));
5158                 goto drop;
5159         }
5160
5161         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
5162         lnet_res_lock(cpt);
5163
5164         LASSERT(getmd->md_refcount > 0);
5165
5166         if (getmd->md_threshold == 0) {
5167                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
5168                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id),
5169                         getmd);
5170                 lnet_res_unlock(cpt);
5171                 goto drop;
5172         }
5173
5174         LASSERT(getmd->md_offset == 0);
5175
5176         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
5177                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id), getmd);
5178
5179         /* setup information for lnet_build_msg_event */
5180         msg->msg_initiator =
5181                 getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
5182         msg->msg_from = peer_id->nid;
5183         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
5184         msg->msg_hdr.src_nid = peer_id->nid;
5185         msg->msg_hdr.payload_length = getmd->md_length;
5186         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
5187
5188         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
5189         lnet_res_unlock(cpt);
5190
5191         cpt = lnet_nid2cpt(&peer_id->nid, ni);
5192
5193         lnet_net_lock(cpt);
5194         lnet_msg_commit(msg, cpt);
5195         lnet_net_unlock(cpt);
5196
5197         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
5198
5199         return msg;
5200
5201  drop:
5202         cpt = lnet_nid2cpt(&peer_id->nid, ni);
5203
5204         lnet_net_lock(cpt);
5205         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
5206         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
5207         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
5208                 getmd->md_length;
5209         lnet_net_unlock(cpt);
5210
5211         if (msg != NULL)
5212                 lnet_msg_free(msg);
5213
5214         return NULL;
5215 }
5216 EXPORT_SYMBOL(lnet_create_reply_msg);
5217
5218 void
5219 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
5220                        unsigned int len)
5221 {
5222         /* Set the REPLY length, now the RDMA that elides the REPLY message has
5223          * completed and I know it. */
5224         LASSERT(reply != NULL);
5225         LASSERT(reply->msg_type == LNET_MSG_GET);
5226         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
5227
5228         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
5229          * the end of my buffer, I might as well be dead. */
5230         LASSERT(len <= reply->msg_ev.mlength);
5231
5232         reply->msg_ev.mlength = len;
5233 }
5234 EXPORT_SYMBOL(lnet_set_reply_msg_len);
5235
5236 /**
5237  * Initiate an asynchronous GET operation.
5238  *
5239  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
5240  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
5241  * the target node in the REPLY has been written to local MD.
5242  *
5243  * On the target node, an LNET_EVENT_GET is logged when the GET request
5244  * arrives and is accepted into a MD.
5245  *
5246  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
5247  * \param mdh A handle for the MD that describes the memory into which the
5248  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
5249  *
5250  * \retval  0      Success, and only in this case events will be generated
5251  * and logged to EQ (if it exists) of the MD.
5252  * \retval -EIO    Simulated failure.
5253  * \retval -ENOMEM Memory allocation failure.
5254  * \retval -ENOENT Invalid MD object.
5255  */
5256 int
5257 LNetGet(struct lnet_nid *self, struct lnet_handle_md mdh,
5258         struct lnet_processid *target, unsigned int portal,
5259         __u64 match_bits, unsigned int offset, bool recovery)
5260 {
5261         struct lnet_msg *msg;
5262         struct lnet_libmd *md;
5263         struct lnet_rsp_tracker *rspt;
5264         int cpt;
5265         int rc;
5266
5267         LASSERT(the_lnet.ln_refcount > 0);
5268
5269         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5270             fail_peer(&target->nid, 1))         /* shall we now? */
5271         {
5272                 CERROR("Dropping GET to %s: simulated failure\n",
5273                        libcfs_idstr(target));
5274                 return -EIO;
5275         }
5276
5277         msg = lnet_msg_alloc();
5278         if (!msg) {
5279                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
5280                        libcfs_idstr(target));
5281                 return -ENOMEM;
5282         }
5283
5284         cpt = lnet_cpt_of_cookie(mdh.cookie);
5285
5286         rspt = lnet_rspt_alloc(cpt);
5287         if (!rspt) {
5288                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
5289                        libcfs_idstr(target));
5290                 return -ENOMEM;
5291         }
5292         INIT_LIST_HEAD(&rspt->rspt_on_list);
5293
5294         msg->msg_recovery = recovery;
5295
5296         lnet_res_lock(cpt);
5297
5298         md = lnet_handle2md(&mdh);
5299         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5300                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
5301                        match_bits, portal, libcfs_idstr(target),
5302                        md == NULL ? -1 : md->md_threshold);
5303                 if (md != NULL && md->md_me != NULL)
5304                         CERROR("REPLY MD also attached to portal %d\n",
5305                                md->md_me->me_portal);
5306
5307                 lnet_res_unlock(cpt);
5308
5309                 lnet_msg_free(msg);
5310                 lnet_rspt_free(rspt, cpt);
5311                 return -ENOENT;
5312         }
5313
5314         CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_idstr(target));
5315
5316         lnet_msg_attach_md(msg, md, 0, 0);
5317
5318         lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
5319
5320         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5321         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5322         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5323         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5324
5325         /* NB handles only looked up by creator (no flips) */
5326         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5327                 the_lnet.ln_interface_cookie;
5328         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5329                 md->md_lh.lh_cookie;
5330
5331         lnet_res_unlock(cpt);
5332
5333         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5334
5335         if (lnet_response_tracking_enabled(LNET_MSG_GET, md->md_options))
5336                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5337         else
5338                 lnet_rspt_free(rspt, cpt);
5339
5340         rc = lnet_send(self, msg, NULL);
5341         if (rc < 0) {
5342                 CNETERR("Error sending GET to %s: %d\n",
5343                         libcfs_idstr(target), rc);
5344                 msg->msg_no_resend = true;
5345                 lnet_finalize(msg, rc);
5346         }
5347
5348         /* completion will be signalled by an event */
5349         return 0;
5350 }
5351 EXPORT_SYMBOL(LNetGet);
5352
5353 /**
5354  * Calculate distance to node at \a dstnid.
5355  *
5356  * \param dstnid Target NID.
5357  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5358  * is saved here.
5359  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5360  * here.
5361  *
5362  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5363  * local_nid_dist_zero is set, which is the default.
5364  * \retval positives Distance to target NID, i.e. number of hops plus one.
5365  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5366  */
5367 int
5368 LNetDist(struct lnet_nid *dstnid, struct lnet_nid *srcnid, __u32 *orderp)
5369 {
5370         struct lnet_ni *ni = NULL;
5371         struct lnet_remotenet *rnet;
5372         __u32 dstnet = LNET_NID_NET(dstnid);
5373         int hops;
5374         int cpt;
5375         __u32 order = 2;
5376         struct list_head *rn_list;
5377         struct lnet_ni *matched_dstnet = NULL;
5378
5379         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5380          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5381          * keep order 0 free for 0@lo and order 1 free for a local NID
5382          * match
5383          * WARNING: dstnid and srcnid might point to same place.
5384          * Don't set *srcnid until late.
5385          */
5386
5387         LASSERT(the_lnet.ln_refcount > 0);
5388
5389         cpt = lnet_net_lock_current();
5390
5391         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5392                 if (nid_same(&ni->ni_nid, dstnid)) {
5393                         if (orderp != NULL) {
5394                                 if (nid_is_lo0(dstnid))
5395                                         *orderp = 0;
5396                                 else
5397                                         *orderp = 1;
5398                         }
5399                         if (srcnid)
5400                                 *srcnid = *dstnid;
5401                         lnet_net_unlock(cpt);
5402
5403                         return local_nid_dist_zero ? 0 : 1;
5404                 }
5405
5406                 if (!matched_dstnet && LNET_NID_NET(&ni->ni_nid) == dstnet) {
5407                         matched_dstnet = ni;
5408                         /* We matched the destination net, but we may have
5409                          * additional local NIs to inspect.
5410                          *
5411                          * We record the order as appropriate, but
5412                          * they may be overwritten if we match local NI above.
5413                          */
5414
5415                         if (orderp) {
5416                                 /* Check if ni was originally created in
5417                                  * current net namespace.
5418                                  * If not, assign order above 0xffff0000,
5419                                  * to make this ni not a priority.
5420                                  */
5421                                 if (current->nsproxy &&
5422                                     !net_eq(ni->ni_net_ns,
5423                                             current->nsproxy->net_ns))
5424                                         *orderp = order + 0xffff0000;
5425                                 else
5426                                         *orderp = order;
5427                         }
5428                 }
5429
5430                 order++;
5431         }
5432
5433         if (matched_dstnet) {
5434                 if (srcnid)
5435                         *srcnid = matched_dstnet->ni_nid;
5436                 lnet_net_unlock(cpt);
5437                 return 1;
5438         }
5439
5440         rn_list = lnet_net2rnethash(dstnet);
5441         list_for_each_entry(rnet, rn_list, lrn_list) {
5442                 if (rnet->lrn_net == dstnet) {
5443                         struct lnet_route *route;
5444                         struct lnet_route *shortest = NULL;
5445                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5446                         __u32 route_hops;
5447
5448                         LASSERT(!list_empty(&rnet->lrn_routes));
5449
5450                         list_for_each_entry(route, &rnet->lrn_routes,
5451                                             lr_list) {
5452                                 route_hops = route->lr_hops;
5453                                 if (route_hops == LNET_UNDEFINED_HOPS)
5454                                         route_hops = 1;
5455                                 if (shortest == NULL ||
5456                                     route_hops < shortest_hops) {
5457                                         shortest = route;
5458                                         shortest_hops = route_hops;
5459                                 }
5460                         }
5461
5462                         LASSERT(shortest != NULL);
5463                         hops = shortest_hops;
5464                         if (srcnid) {
5465                                 struct lnet_net *net;
5466                                 net = lnet_get_net_locked(shortest->lr_lnet);
5467                                 LASSERT(net);
5468                                 ni = lnet_get_next_ni_locked(net, NULL);
5469                                 *srcnid = ni->ni_nid;
5470                         }
5471                         if (orderp != NULL)
5472                                 *orderp = order;
5473                         lnet_net_unlock(cpt);
5474                         return hops + 1;
5475                 }
5476                 order++;
5477         }
5478
5479         lnet_net_unlock(cpt);
5480         return -EHOSTUNREACH;
5481 }
5482 EXPORT_SYMBOL(LNetDist);