Whamcloud - gitweb
LU-10391 lnet: change lnet_find_best_lpni to handle large NIDs
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/lib-move.c
32  *
33  * Data movement routines
34  */
35
36 #define DEBUG_SUBSYSTEM S_LNET
37
38 #include <linux/pagemap.h>
39
40 #include <lnet/lib-lnet.h>
41 #include <linux/nsproxy.h>
42 #include <lnet/lnet_rdma.h>
43 #include <net/net_namespace.h>
44
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48
49 struct lnet_send_data {
50         struct lnet_ni *sd_best_ni;
51         struct lnet_peer_ni *sd_best_lpni;
52         struct lnet_peer_ni *sd_final_dst_lpni;
53         struct lnet_peer *sd_peer;
54         struct lnet_peer *sd_gw_peer;
55         struct lnet_peer_ni *sd_gw_lpni;
56         struct lnet_peer_net *sd_peer_net;
57         struct lnet_msg *sd_msg;
58         struct lnet_nid sd_dst_nid;
59         struct lnet_nid sd_src_nid;
60         struct lnet_nid sd_rtr_nid;
61         int sd_cpt;
62         int sd_md_cpt;
63         __u32 sd_send_case;
64 };
65
66 static inline bool
67 lnet_msg_is_response(struct lnet_msg *msg)
68 {
69         return msg->msg_type == LNET_MSG_ACK || msg->msg_type == LNET_MSG_REPLY;
70 }
71
72 static inline bool
73 lnet_response_tracking_enabled(__u32 msg_type, unsigned int md_options)
74 {
75         if (md_options & LNET_MD_NO_TRACK_RESPONSE)
76                 /* Explicitly disabled in MD options */
77                 return false;
78
79         if (md_options & LNET_MD_TRACK_RESPONSE)
80                 /* Explicity enabled in MD options */
81                 return true;
82
83         if (lnet_response_tracking == 3)
84                 /* Enabled for all message types */
85                 return true;
86
87         if (msg_type == LNET_MSG_PUT)
88                 return lnet_response_tracking == 2;
89
90         if (msg_type == LNET_MSG_GET)
91                 return lnet_response_tracking == 1;
92
93         return false;
94 }
95
96 static inline struct lnet_comm_count *
97 get_stats_counts(struct lnet_element_stats *stats,
98                  enum lnet_stats_type stats_type)
99 {
100         switch (stats_type) {
101         case LNET_STATS_TYPE_SEND:
102                 return &stats->el_send_stats;
103         case LNET_STATS_TYPE_RECV:
104                 return &stats->el_recv_stats;
105         case LNET_STATS_TYPE_DROP:
106                 return &stats->el_drop_stats;
107         default:
108                 CERROR("Unknown stats type\n");
109         }
110
111         return NULL;
112 }
113
114 void lnet_incr_stats(struct lnet_element_stats *stats,
115                      enum lnet_msg_type msg_type,
116                      enum lnet_stats_type stats_type)
117 {
118         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
119         if (!counts)
120                 return;
121
122         switch (msg_type) {
123         case LNET_MSG_ACK:
124                 atomic_inc(&counts->co_ack_count);
125                 break;
126         case LNET_MSG_PUT:
127                 atomic_inc(&counts->co_put_count);
128                 break;
129         case LNET_MSG_GET:
130                 atomic_inc(&counts->co_get_count);
131                 break;
132         case LNET_MSG_REPLY:
133                 atomic_inc(&counts->co_reply_count);
134                 break;
135         case LNET_MSG_HELLO:
136                 atomic_inc(&counts->co_hello_count);
137                 break;
138         default:
139                 CERROR("There is a BUG in the code. Unknown message type\n");
140                 break;
141         }
142 }
143
144 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
145                      enum lnet_stats_type stats_type)
146 {
147         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
148         if (!counts)
149                 return 0;
150
151         return (atomic_read(&counts->co_ack_count) +
152                 atomic_read(&counts->co_put_count) +
153                 atomic_read(&counts->co_get_count) +
154                 atomic_read(&counts->co_reply_count) +
155                 atomic_read(&counts->co_hello_count));
156 }
157
158 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
159                                 struct lnet_comm_count *counts)
160 {
161         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
162         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
163         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
164         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
165         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
166 }
167
168 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
169                               struct lnet_element_stats *stats)
170 {
171         struct lnet_comm_count *counts;
172
173         LASSERT(msg_stats);
174         LASSERT(stats);
175
176         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
177         if (!counts)
178                 return;
179         assign_stats(&msg_stats->im_send_stats, counts);
180
181         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
182         if (!counts)
183                 return;
184         assign_stats(&msg_stats->im_recv_stats, counts);
185
186         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
187         if (!counts)
188                 return;
189         assign_stats(&msg_stats->im_drop_stats, counts);
190 }
191
192 int
193 lnet_fail_nid(lnet_nid_t nid4, unsigned int threshold)
194 {
195         struct lnet_test_peer *tp;
196         struct list_head *el;
197         struct list_head *next;
198         struct lnet_nid nid;
199         LIST_HEAD(cull);
200
201         lnet_nid4_to_nid(nid4, &nid);
202         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
203         if (threshold != 0) {
204                 /* Adding a new entry */
205                 LIBCFS_ALLOC(tp, sizeof(*tp));
206                 if (tp == NULL)
207                         return -ENOMEM;
208
209                 tp->tp_nid = nid;
210                 tp->tp_threshold = threshold;
211
212                 lnet_net_lock(0);
213                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
214                 lnet_net_unlock(0);
215                 return 0;
216         }
217
218         lnet_net_lock(0);
219
220         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
221                 tp = list_entry(el, struct lnet_test_peer, tp_list);
222
223                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
224                     LNET_NID_IS_ANY(&nid) ||    /* removing all entries */
225                     nid_same(&tp->tp_nid, &nid)) {      /* matched this one */
226                         list_move(&tp->tp_list, &cull);
227                 }
228         }
229
230         lnet_net_unlock(0);
231
232         while ((tp = list_first_entry_or_null(&cull,
233                                               struct lnet_test_peer,
234                                               tp_list)) != NULL) {
235                 list_del(&tp->tp_list);
236                 LIBCFS_FREE(tp, sizeof(*tp));
237         }
238         return 0;
239 }
240
241 static int
242 fail_peer(struct lnet_nid *nid, int outgoing)
243 {
244         struct lnet_test_peer *tp;
245         struct list_head *el;
246         struct list_head *next;
247         LIST_HEAD(cull);
248         int fail = 0;
249
250         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
251         lnet_net_lock(0);
252
253         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
254                 tp = list_entry(el, struct lnet_test_peer, tp_list);
255
256                 if (tp->tp_threshold == 0) {
257                         /* zombie entry */
258                         if (outgoing) {
259                                 /* only cull zombies on outgoing tests,
260                                  * since we may be at interrupt priority on
261                                  * incoming messages. */
262                                 list_move(&tp->tp_list, &cull);
263                         }
264                         continue;
265                 }
266
267                 if (LNET_NID_IS_ANY(&tp->tp_nid) ||     /* fail every peer */
268                     nid_same(nid, &tp->tp_nid)) {       /* fail this peer */
269                         fail = 1;
270
271                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
272                                 tp->tp_threshold--;
273                                 if (outgoing &&
274                                     tp->tp_threshold == 0) {
275                                         /* see above */
276                                         list_move(&tp->tp_list, &cull);
277                                 }
278                         }
279                         break;
280                 }
281         }
282
283         lnet_net_unlock(0);
284
285         while ((tp = list_first_entry_or_null(&cull,
286                                               struct lnet_test_peer,
287                                               tp_list)) != NULL) {
288                 list_del(&tp->tp_list);
289                 LIBCFS_FREE(tp, sizeof(*tp));
290         }
291
292         return fail;
293 }
294
295 unsigned int
296 lnet_iov_nob(unsigned int niov, struct kvec *iov)
297 {
298         unsigned int nob = 0;
299
300         LASSERT(niov == 0 || iov != NULL);
301         while (niov-- > 0)
302                 nob += (iov++)->iov_len;
303
304         return (nob);
305 }
306 EXPORT_SYMBOL(lnet_iov_nob);
307
308 void
309 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
310                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
311                   unsigned int nob)
312 {
313         /* NB diov, siov are READ-ONLY */
314         unsigned int this_nob;
315
316         if (nob == 0)
317                 return;
318
319         /* skip complete frags before 'doffset' */
320         LASSERT(ndiov > 0);
321         while (doffset >= diov->iov_len) {
322                 doffset -= diov->iov_len;
323                 diov++;
324                 ndiov--;
325                 LASSERT(ndiov > 0);
326         }
327
328         /* skip complete frags before 'soffset' */
329         LASSERT(nsiov > 0);
330         while (soffset >= siov->iov_len) {
331                 soffset -= siov->iov_len;
332                 siov++;
333                 nsiov--;
334                 LASSERT(nsiov > 0);
335         }
336
337         do {
338                 LASSERT(ndiov > 0);
339                 LASSERT(nsiov > 0);
340                 this_nob = min3((unsigned int)diov->iov_len - doffset,
341                                 (unsigned int)siov->iov_len - soffset,
342                                 nob);
343
344                 memcpy((char *)diov->iov_base + doffset,
345                        (char *)siov->iov_base + soffset, this_nob);
346                 nob -= this_nob;
347
348                 if (diov->iov_len > doffset + this_nob) {
349                         doffset += this_nob;
350                 } else {
351                         diov++;
352                         ndiov--;
353                         doffset = 0;
354                 }
355
356                 if (siov->iov_len > soffset + this_nob) {
357                         soffset += this_nob;
358                 } else {
359                         siov++;
360                         nsiov--;
361                         soffset = 0;
362                 }
363         } while (nob > 0);
364 }
365 EXPORT_SYMBOL(lnet_copy_iov2iov);
366
367 unsigned int
368 lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
369 {
370         unsigned int  nob = 0;
371
372         LASSERT(niov == 0 || kiov != NULL);
373         while (niov-- > 0)
374                 nob += (kiov++)->bv_len;
375
376         return (nob);
377 }
378 EXPORT_SYMBOL(lnet_kiov_nob);
379
380 void
381 lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov,
382                     unsigned int doffset,
383                     unsigned int nsiov, struct bio_vec *siov,
384                     unsigned int soffset,
385                     unsigned int nob)
386 {
387         /* NB diov, siov are READ-ONLY */
388         unsigned int    this_nob;
389         char           *daddr = NULL;
390         char           *saddr = NULL;
391
392         if (nob == 0)
393                 return;
394
395         LASSERT (!in_interrupt ());
396
397         LASSERT (ndiov > 0);
398         while (doffset >= diov->bv_len) {
399                 doffset -= diov->bv_len;
400                 diov++;
401                 ndiov--;
402                 LASSERT(ndiov > 0);
403         }
404
405         LASSERT(nsiov > 0);
406         while (soffset >= siov->bv_len) {
407                 soffset -= siov->bv_len;
408                 siov++;
409                 nsiov--;
410                 LASSERT(nsiov > 0);
411         }
412
413         do {
414                 LASSERT(ndiov > 0);
415                 LASSERT(nsiov > 0);
416                 this_nob = min3(diov->bv_len - doffset,
417                                 siov->bv_len - soffset,
418                                 nob);
419
420                 if (daddr == NULL)
421                         daddr = ((char *)kmap(diov->bv_page)) +
422                                 diov->bv_offset + doffset;
423                 if (saddr == NULL)
424                         saddr = ((char *)kmap(siov->bv_page)) +
425                                 siov->bv_offset + soffset;
426
427                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
428                  * However in practice at least one of the kiovs will be mapped
429                  * kernel pages and the map/unmap will be NOOPs */
430
431                 memcpy (daddr, saddr, this_nob);
432                 nob -= this_nob;
433
434                 if (diov->bv_len > doffset + this_nob) {
435                         daddr += this_nob;
436                         doffset += this_nob;
437                 } else {
438                         kunmap(diov->bv_page);
439                         daddr = NULL;
440                         diov++;
441                         ndiov--;
442                         doffset = 0;
443                 }
444
445                 if (siov->bv_len > soffset + this_nob) {
446                         saddr += this_nob;
447                         soffset += this_nob;
448                 } else {
449                         kunmap(siov->bv_page);
450                         saddr = NULL;
451                         siov++;
452                         nsiov--;
453                         soffset = 0;
454                 }
455         } while (nob > 0);
456
457         if (daddr != NULL)
458                 kunmap(diov->bv_page);
459         if (saddr != NULL)
460                 kunmap(siov->bv_page);
461 }
462 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
463
464 void
465 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
466                     unsigned int nkiov, struct bio_vec *kiov,
467                     unsigned int kiovoffset,
468                     unsigned int nob)
469 {
470         /* NB iov, kiov are READ-ONLY */
471         unsigned int    this_nob;
472         char           *addr = NULL;
473
474         if (nob == 0)
475                 return;
476
477         LASSERT (!in_interrupt ());
478
479         LASSERT (niov > 0);
480         while (iovoffset >= iov->iov_len) {
481                 iovoffset -= iov->iov_len;
482                 iov++;
483                 niov--;
484                 LASSERT(niov > 0);
485         }
486
487         LASSERT(nkiov > 0);
488         while (kiovoffset >= kiov->bv_len) {
489                 kiovoffset -= kiov->bv_len;
490                 kiov++;
491                 nkiov--;
492                 LASSERT(nkiov > 0);
493         }
494
495         do {
496                 LASSERT(niov > 0);
497                 LASSERT(nkiov > 0);
498                 this_nob = min3((unsigned int)iov->iov_len - iovoffset,
499                                 (unsigned int)kiov->bv_len - kiovoffset,
500                                 nob);
501
502                 if (addr == NULL)
503                         addr = ((char *)kmap(kiov->bv_page)) +
504                                 kiov->bv_offset + kiovoffset;
505
506                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
507                 nob -= this_nob;
508
509                 if (iov->iov_len > iovoffset + this_nob) {
510                         iovoffset += this_nob;
511                 } else {
512                         iov++;
513                         niov--;
514                         iovoffset = 0;
515                 }
516
517                 if (kiov->bv_len > kiovoffset + this_nob) {
518                         addr += this_nob;
519                         kiovoffset += this_nob;
520                 } else {
521                         kunmap(kiov->bv_page);
522                         addr = NULL;
523                         kiov++;
524                         nkiov--;
525                         kiovoffset = 0;
526                 }
527
528         } while (nob > 0);
529
530         if (addr != NULL)
531                 kunmap(kiov->bv_page);
532 }
533 EXPORT_SYMBOL(lnet_copy_kiov2iov);
534
535 void
536 lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
537                    unsigned int kiovoffset,
538                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
539                    unsigned int nob)
540 {
541         /* NB kiov, iov are READ-ONLY */
542         unsigned int    this_nob;
543         char           *addr = NULL;
544
545         if (nob == 0)
546                 return;
547
548         LASSERT (!in_interrupt ());
549
550         LASSERT (nkiov > 0);
551         while (kiovoffset >= kiov->bv_len) {
552                 kiovoffset -= kiov->bv_len;
553                 kiov++;
554                 nkiov--;
555                 LASSERT(nkiov > 0);
556         }
557
558         LASSERT(niov > 0);
559         while (iovoffset >= iov->iov_len) {
560                 iovoffset -= iov->iov_len;
561                 iov++;
562                 niov--;
563                 LASSERT(niov > 0);
564         }
565
566         do {
567                 LASSERT(nkiov > 0);
568                 LASSERT(niov > 0);
569                 this_nob = min3((unsigned int)kiov->bv_len - kiovoffset,
570                                 (unsigned int)iov->iov_len - iovoffset,
571                                 nob);
572
573                 if (addr == NULL)
574                         addr = ((char *)kmap(kiov->bv_page)) +
575                                 kiov->bv_offset + kiovoffset;
576
577                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
578                 nob -= this_nob;
579
580                 if (kiov->bv_len > kiovoffset + this_nob) {
581                         addr += this_nob;
582                         kiovoffset += this_nob;
583                 } else {
584                         kunmap(kiov->bv_page);
585                         addr = NULL;
586                         kiov++;
587                         nkiov--;
588                         kiovoffset = 0;
589                 }
590
591                 if (iov->iov_len > iovoffset + this_nob) {
592                         iovoffset += this_nob;
593                 } else {
594                         iov++;
595                         niov--;
596                         iovoffset = 0;
597                 }
598         } while (nob > 0);
599
600         if (addr != NULL)
601                 kunmap(kiov->bv_page);
602 }
603 EXPORT_SYMBOL(lnet_copy_iov2kiov);
604
605 int
606 lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
607                   int src_niov, struct bio_vec *src,
608                   unsigned int offset, unsigned int len)
609 {
610         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
611          * for exactly 'len' bytes, and return the number of entries.
612          * NB not destructive to 'src' */
613         unsigned int    frag_len;
614         unsigned int    niov;
615
616         if (len == 0)                           /* no data => */
617                 return (0);                     /* no frags */
618
619         LASSERT(src_niov > 0);
620         while (offset >= src->bv_len) {      /* skip initial frags */
621                 offset -= src->bv_len;
622                 src_niov--;
623                 src++;
624                 LASSERT(src_niov > 0);
625         }
626
627         niov = 1;
628         for (;;) {
629                 LASSERT(src_niov > 0);
630                 LASSERT((int)niov <= dst_niov);
631
632                 frag_len = src->bv_len - offset;
633                 dst->bv_page = src->bv_page;
634                 dst->bv_offset = src->bv_offset + offset;
635
636                 if (len <= frag_len) {
637                         dst->bv_len = len;
638                         LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
639                         return niov;
640                 }
641
642                 dst->bv_len = frag_len;
643                 LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
644
645                 len -= frag_len;
646                 dst++;
647                 src++;
648                 niov++;
649                 src_niov--;
650                 offset = 0;
651         }
652 }
653 EXPORT_SYMBOL(lnet_extract_kiov);
654
655 void
656 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
657              int delayed, unsigned int offset, unsigned int mlen,
658              unsigned int rlen)
659 {
660         unsigned int niov = 0;
661         struct kvec *iov = NULL;
662         struct bio_vec  *kiov = NULL;
663         int rc;
664
665         LASSERT (!in_interrupt ());
666         LASSERT (mlen == 0 || msg != NULL);
667
668         if (msg != NULL) {
669                 LASSERT(msg->msg_receiving);
670                 LASSERT(!msg->msg_sending);
671                 LASSERT(rlen == msg->msg_len);
672                 LASSERT(mlen <= msg->msg_len);
673                 LASSERT(msg->msg_offset == offset);
674                 LASSERT(msg->msg_wanted == mlen);
675
676                 msg->msg_receiving = 0;
677
678                 if (mlen != 0) {
679                         niov = msg->msg_niov;
680                         kiov = msg->msg_kiov;
681
682                         LASSERT (niov > 0);
683                         LASSERT ((iov == NULL) != (kiov == NULL));
684                 }
685         }
686
687         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
688                                              niov, kiov, offset, mlen,
689                                              rlen);
690         if (rc < 0)
691                 lnet_finalize(msg, rc);
692 }
693
694 static void
695 lnet_setpayloadbuffer(struct lnet_msg *msg)
696 {
697         struct lnet_libmd *md = msg->msg_md;
698
699         LASSERT(msg->msg_len > 0);
700         LASSERT(!msg->msg_routing);
701         LASSERT(md != NULL);
702         LASSERT(msg->msg_niov == 0);
703         LASSERT(msg->msg_kiov == NULL);
704
705         msg->msg_niov = md->md_niov;
706         msg->msg_kiov = md->md_kiov;
707 }
708
709 void
710 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_processid *target,
711                unsigned int offset, unsigned int len)
712 {
713         msg->msg_type = type;
714         msg->msg_target = *target;
715         msg->msg_len = len;
716         msg->msg_offset = offset;
717
718         if (len != 0)
719                 lnet_setpayloadbuffer(msg);
720
721         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
722         msg->msg_hdr.type           = type;
723         /* dest_nid will be overwritten by lnet_select_pathway() */
724         msg->msg_hdr.dest_nid = target->nid;
725         msg->msg_hdr.dest_pid = target->pid;
726         /* src_nid will be set later */
727         msg->msg_hdr.src_pid        = the_lnet.ln_pid;
728         msg->msg_hdr.payload_length = len;
729 }
730
731 void
732 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
733 {
734         void *priv = msg->msg_private;
735         int rc;
736
737         LASSERT(!in_interrupt());
738         LASSERT(nid_is_lo0(&ni->ni_nid) ||
739                 (msg->msg_txcredit && msg->msg_peertxcredit));
740
741         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
742         if (rc < 0) {
743                 msg->msg_no_resend = true;
744                 lnet_finalize(msg, rc);
745         }
746 }
747
748 static int
749 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
750 {
751         int     rc;
752
753         LASSERT(!msg->msg_sending);
754         LASSERT(msg->msg_receiving);
755         LASSERT(!msg->msg_rx_ready_delay);
756         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
757
758         msg->msg_rx_ready_delay = 1;
759         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
760                                                   &msg->msg_private);
761         if (rc != 0) {
762                 CERROR("recv from %s / send to %s aborted: "
763                        "eager_recv failed %d\n",
764                        libcfs_nidstr(&msg->msg_rxpeer->lpni_nid),
765                        libcfs_idstr(&msg->msg_target), rc);
766                 LASSERT(rc < 0); /* required by my callers */
767         }
768
769         return rc;
770 }
771
772 /* Returns:
773  *  -ETIMEDOUT if the message deadline has been exceeded
774  *  -EHOSTUNREACH if the peer is down
775  *  0 if this message should not be dropped
776  */
777 static int
778 lnet_check_message_drop(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
779                         struct lnet_msg *msg)
780 {
781         /* Drop message if we've exceeded the message deadline */
782         if (ktime_after(ktime_get(), msg->msg_deadline))
783                 return -ETIMEDOUT;
784
785         if (msg->msg_target.pid & LNET_PID_USERFLAG)
786                 return 0;
787
788         if (!lnet_peer_aliveness_enabled(lpni))
789                 return 0;
790
791         /* If we're resending a message, let's attempt to send it even if
792          * the peer is down to fulfill our resend quota on the message
793          */
794         if (msg->msg_retry_count > 0)
795                 return 0;
796
797         /* try and send recovery messages regardless */
798         if (msg->msg_recovery)
799                 return 0;
800
801         /* always send any responses */
802         if (lnet_msg_is_response(msg))
803                 return 0;
804
805         /* always send non-routed messages */
806         if (!msg->msg_routing)
807                 return 0;
808
809         /* assume peer_ni is alive as long as we're within the configured
810          * peer timeout
811          */
812         if (ktime_get_seconds() >=
813             (lpni->lpni_last_alive +
814              lpni->lpni_net->net_tunables.lct_peer_timeout))
815                 return -EHOSTUNREACH;
816
817         return 0;
818 }
819
820 /**
821  * \param msg The message to be sent.
822  * \param do_send True if lnet_ni_send() should be called in this function.
823  *        lnet_send() is going to lnet_net_unlock immediately after this, so
824  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
825  *
826  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
827  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
828  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
829  * \retval -ECANCELED If the MD of the message has been unlinked.
830  */
831 static int
832 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
833 {
834         struct lnet_peer_ni     *lp = msg->msg_txpeer;
835         struct lnet_ni          *ni = msg->msg_txni;
836         int                     cpt = msg->msg_tx_cpt;
837         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
838         int rc;
839
840         /* non-lnet_send() callers have checked before */
841         LASSERT(!do_send || msg->msg_tx_delayed);
842         LASSERT(!msg->msg_receiving);
843         LASSERT(msg->msg_tx_committed);
844
845         /* can't get here if we're sending to the loopback interface */
846         if (the_lnet.ln_loni)
847                 LASSERT(!nid_same(&lp->lpni_nid, &the_lnet.ln_loni->ni_nid));
848
849         /* NB 'lp' is always the next hop */
850         rc = lnet_check_message_drop(ni, lp, msg);
851         if (rc) {
852                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
853                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
854                         msg->msg_len;
855                 lnet_net_unlock(cpt);
856                 if (msg->msg_txpeer)
857                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
858                                         msg->msg_type,
859                                         LNET_STATS_TYPE_DROP);
860                 if (msg->msg_txni)
861                         lnet_incr_stats(&msg->msg_txni->ni_stats,
862                                         msg->msg_type,
863                                         LNET_STATS_TYPE_DROP);
864
865                 if (rc == -EHOSTUNREACH) {
866                         CNETERR("Dropping message for %s: peer not alive\n",
867                                 libcfs_idstr(&msg->msg_target));
868                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
869                 } else {
870                         CNETERR("Dropping message for %s: exceeded message deadline\n",
871                                 libcfs_idstr(&msg->msg_target));
872                         msg->msg_health_status =
873                                 LNET_MSG_STATUS_NETWORK_TIMEOUT;
874                 }
875
876                 if (do_send)
877                         lnet_finalize(msg, rc);
878
879                 lnet_net_lock(cpt);
880                 return rc;
881         }
882
883         if (msg->msg_md != NULL &&
884             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
885                 lnet_net_unlock(cpt);
886
887                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
888                         "called on the MD/ME.\n",
889                         libcfs_idstr(&msg->msg_target));
890                 if (do_send) {
891                         msg->msg_no_resend = true;
892                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
893                                msg, libcfs_idstr(&msg->msg_target));
894                         lnet_finalize(msg, -ECANCELED);
895                 }
896
897                 lnet_net_lock(cpt);
898                 return -ECANCELED;
899         }
900
901         if (!msg->msg_peertxcredit) {
902                 spin_lock(&lp->lpni_lock);
903                 LASSERT((lp->lpni_txcredits < 0) ==
904                         !list_empty(&lp->lpni_txq));
905
906                 msg->msg_peertxcredit = 1;
907                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr_nid4);
908                 lp->lpni_txcredits--;
909
910                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
911                         lp->lpni_mintxcredits = lp->lpni_txcredits;
912
913                 if (lp->lpni_txcredits < 0) {
914                         msg->msg_tx_delayed = 1;
915                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
916                         spin_unlock(&lp->lpni_lock);
917                         return LNET_CREDIT_WAIT;
918                 }
919                 spin_unlock(&lp->lpni_lock);
920         }
921
922         if (!msg->msg_txcredit) {
923                 LASSERT((tq->tq_credits < 0) ==
924                         !list_empty(&tq->tq_delayed));
925
926                 msg->msg_txcredit = 1;
927                 tq->tq_credits--;
928                 atomic_dec(&ni->ni_tx_credits);
929
930                 if (tq->tq_credits < tq->tq_credits_min)
931                         tq->tq_credits_min = tq->tq_credits;
932
933                 if (tq->tq_credits < 0) {
934                         msg->msg_tx_delayed = 1;
935                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
936                         return LNET_CREDIT_WAIT;
937                 }
938         }
939
940         if (unlikely(!list_empty(&the_lnet.ln_delay_rules)) &&
941             lnet_delay_rule_match_locked(&msg->msg_hdr, msg)) {
942                 msg->msg_tx_delayed = 1;
943                 return LNET_CREDIT_WAIT;
944         }
945
946         /* unset the tx_delay flag as we're going to send it now */
947         msg->msg_tx_delayed = 0;
948
949         if (do_send) {
950                 lnet_net_unlock(cpt);
951                 lnet_ni_send(ni, msg);
952                 lnet_net_lock(cpt);
953         }
954         return LNET_CREDIT_OK;
955 }
956
957
958 static struct lnet_rtrbufpool *
959 lnet_msg2bufpool(struct lnet_msg *msg)
960 {
961         struct lnet_rtrbufpool  *rbp;
962         int                     cpt;
963
964         LASSERT(msg->msg_rx_committed);
965
966         cpt = msg->msg_rx_cpt;
967         rbp = &the_lnet.ln_rtrpools[cpt][0];
968
969         LASSERT(msg->msg_len <= LNET_MTU);
970         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
971                 rbp++;
972                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
973         }
974
975         return rbp;
976 }
977
978 static int
979 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
980 {
981         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
982          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
983          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
984          * received or OK to receive */
985         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
986         struct lnet_peer *lp;
987         struct lnet_rtrbufpool *rbp;
988         struct lnet_rtrbuf *rb;
989
990         LASSERT(msg->msg_kiov == NULL);
991         LASSERT(msg->msg_niov == 0);
992         LASSERT(msg->msg_routing);
993         LASSERT(msg->msg_receiving);
994         LASSERT(!msg->msg_sending);
995         LASSERT(lpni->lpni_peer_net);
996         LASSERT(lpni->lpni_peer_net->lpn_peer);
997
998         lp = lpni->lpni_peer_net->lpn_peer;
999
1000         /* non-lnet_parse callers only receive delayed messages */
1001         LASSERT(!do_recv || msg->msg_rx_delayed);
1002
1003         if (!msg->msg_peerrtrcredit) {
1004                 /* lpni_lock protects the credit manipulation */
1005                 spin_lock(&lpni->lpni_lock);
1006
1007                 msg->msg_peerrtrcredit = 1;
1008                 lpni->lpni_rtrcredits--;
1009                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
1010                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
1011
1012                 if (lpni->lpni_rtrcredits < 0) {
1013                         spin_unlock(&lpni->lpni_lock);
1014                         /* must have checked eager_recv before here */
1015                         LASSERT(msg->msg_rx_ready_delay);
1016                         msg->msg_rx_delayed = 1;
1017                         /* lp_lock protects the lp_rtrq */
1018                         spin_lock(&lp->lp_lock);
1019                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1020                         spin_unlock(&lp->lp_lock);
1021                         return LNET_CREDIT_WAIT;
1022                 }
1023                 spin_unlock(&lpni->lpni_lock);
1024         }
1025
1026         rbp = lnet_msg2bufpool(msg);
1027
1028         if (!msg->msg_rtrcredit) {
1029                 msg->msg_rtrcredit = 1;
1030                 rbp->rbp_credits--;
1031                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1032                         rbp->rbp_mincredits = rbp->rbp_credits;
1033
1034                 if (rbp->rbp_credits < 0) {
1035                         /* must have checked eager_recv before here */
1036                         LASSERT(msg->msg_rx_ready_delay);
1037                         msg->msg_rx_delayed = 1;
1038                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1039                         return LNET_CREDIT_WAIT;
1040                 }
1041         }
1042
1043         LASSERT(!list_empty(&rbp->rbp_bufs));
1044         rb = list_first_entry(&rbp->rbp_bufs, struct lnet_rtrbuf, rb_list);
1045         list_del(&rb->rb_list);
1046
1047         msg->msg_niov = rbp->rbp_npages;
1048         msg->msg_kiov = &rb->rb_kiov[0];
1049
1050         /* unset the msg-rx_delayed flag since we're receiving the message */
1051         msg->msg_rx_delayed = 0;
1052
1053         if (do_recv) {
1054                 int cpt = msg->msg_rx_cpt;
1055
1056                 lnet_net_unlock(cpt);
1057                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1058                              0, msg->msg_len, msg->msg_len);
1059                 lnet_net_lock(cpt);
1060         }
1061         return LNET_CREDIT_OK;
1062 }
1063
1064 void
1065 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1066 {
1067         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1068         struct lnet_ni          *txni = msg->msg_txni;
1069         struct lnet_msg         *msg2;
1070
1071         if (msg->msg_txcredit) {
1072                 struct lnet_ni       *ni = msg->msg_txni;
1073                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1074
1075                 /* give back NI txcredits */
1076                 msg->msg_txcredit = 0;
1077
1078                 LASSERT((tq->tq_credits < 0) ==
1079                         !list_empty(&tq->tq_delayed));
1080
1081                 tq->tq_credits++;
1082                 atomic_inc(&ni->ni_tx_credits);
1083                 if (tq->tq_credits <= 0) {
1084                         msg2 = list_first_entry(&tq->tq_delayed,
1085                                                 struct lnet_msg, msg_list);
1086                         list_del(&msg2->msg_list);
1087
1088                         LASSERT(msg2->msg_txni == ni);
1089                         LASSERT(msg2->msg_tx_delayed);
1090                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1091
1092                         (void) lnet_post_send_locked(msg2, 1);
1093                 }
1094         }
1095
1096         if (msg->msg_peertxcredit) {
1097                 /* give back peer txcredits */
1098                 msg->msg_peertxcredit = 0;
1099
1100                 spin_lock(&txpeer->lpni_lock);
1101                 LASSERT((txpeer->lpni_txcredits < 0) ==
1102                         !list_empty(&txpeer->lpni_txq));
1103
1104                 txpeer->lpni_txqnob -=  msg->msg_len +
1105                                         sizeof(struct lnet_hdr_nid4);
1106                 LASSERT(txpeer->lpni_txqnob >= 0);
1107
1108                 txpeer->lpni_txcredits++;
1109                 if (txpeer->lpni_txcredits <= 0) {
1110                         int msg2_cpt;
1111
1112                         msg2 = list_first_entry(&txpeer->lpni_txq,
1113                                                 struct lnet_msg, msg_list);
1114                         list_del(&msg2->msg_list);
1115                         spin_unlock(&txpeer->lpni_lock);
1116
1117                         LASSERT(msg2->msg_txpeer == txpeer);
1118                         LASSERT(msg2->msg_tx_delayed);
1119
1120                         msg2_cpt = msg2->msg_tx_cpt;
1121
1122                         /*
1123                          * The msg_cpt can be different from the msg2_cpt
1124                          * so we need to make sure we lock the correct cpt
1125                          * for msg2.
1126                          * Once we call lnet_post_send_locked() it is no
1127                          * longer safe to access msg2, since it could've
1128                          * been freed by lnet_finalize(), but we still
1129                          * need to relock the correct cpt, so we cache the
1130                          * msg2_cpt for the purpose of the check that
1131                          * follows the call to lnet_pose_send_locked().
1132                          */
1133                         if (msg2_cpt != msg->msg_tx_cpt) {
1134                                 lnet_net_unlock(msg->msg_tx_cpt);
1135                                 lnet_net_lock(msg2_cpt);
1136                         }
1137                         (void) lnet_post_send_locked(msg2, 1);
1138                         if (msg2_cpt != msg->msg_tx_cpt) {
1139                                 lnet_net_unlock(msg2_cpt);
1140                                 lnet_net_lock(msg->msg_tx_cpt);
1141                         }
1142                 } else {
1143                         spin_unlock(&txpeer->lpni_lock);
1144                 }
1145         }
1146
1147         if (txni != NULL) {
1148                 msg->msg_txni = NULL;
1149                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1150         }
1151
1152         if (txpeer != NULL) {
1153                 msg->msg_txpeer = NULL;
1154                 lnet_peer_ni_decref_locked(txpeer);
1155         }
1156 }
1157
1158 void
1159 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1160 {
1161         struct lnet_msg *msg;
1162
1163         if (list_empty(&rbp->rbp_msgs))
1164                 return;
1165         msg = list_first_entry(&rbp->rbp_msgs,
1166                                struct lnet_msg, msg_list);
1167         list_del(&msg->msg_list);
1168
1169         (void)lnet_post_routed_recv_locked(msg, 1);
1170 }
1171
1172 void
1173 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1174 {
1175         struct lnet_msg *msg;
1176         struct lnet_msg *tmp;
1177
1178         lnet_net_unlock(cpt);
1179
1180         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1181                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1182                              0, 0, 0, msg->msg_hdr.payload_length);
1183                 list_del_init(&msg->msg_list);
1184                 msg->msg_no_resend = true;
1185                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1186                 lnet_finalize(msg, -ECANCELED);
1187         }
1188
1189         lnet_net_lock(cpt);
1190 }
1191
1192 void
1193 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1194 {
1195         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1196         struct lnet_peer *lp;
1197         struct lnet_ni *rxni = msg->msg_rxni;
1198         struct lnet_msg *msg2;
1199
1200         if (msg->msg_rtrcredit) {
1201                 /* give back global router credits */
1202                 struct lnet_rtrbuf *rb;
1203                 struct lnet_rtrbufpool *rbp;
1204
1205                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1206                  * there until it gets one allocated, or aborts the wait
1207                  * itself */
1208                 LASSERT(msg->msg_kiov != NULL);
1209
1210                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1211                 rbp = rb->rb_pool;
1212
1213                 msg->msg_kiov = NULL;
1214                 msg->msg_rtrcredit = 0;
1215
1216                 LASSERT(rbp == lnet_msg2bufpool(msg));
1217
1218                 LASSERT((rbp->rbp_credits > 0) ==
1219                         !list_empty(&rbp->rbp_bufs));
1220
1221                 /* If routing is now turned off, we just drop this buffer and
1222                  * don't bother trying to return credits.  */
1223                 if (!the_lnet.ln_routing) {
1224                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1225                         goto routing_off;
1226                 }
1227
1228                 /* It is possible that a user has lowered the desired number of
1229                  * buffers in this pool.  Make sure we never put back
1230                  * more buffers than the stated number. */
1231                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1232                         /* Discard this buffer so we don't have too
1233                          * many. */
1234                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1235                         rbp->rbp_nbuffers--;
1236                 } else {
1237                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1238                         rbp->rbp_credits++;
1239                         if (rbp->rbp_credits <= 0)
1240                                 lnet_schedule_blocked_locked(rbp);
1241                 }
1242         }
1243
1244 routing_off:
1245         if (msg->msg_peerrtrcredit) {
1246                 LASSERT(rxpeerni);
1247                 LASSERT(rxpeerni->lpni_peer_net);
1248                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1249
1250                 /* give back peer router credits */
1251                 msg->msg_peerrtrcredit = 0;
1252
1253                 spin_lock(&rxpeerni->lpni_lock);
1254                 rxpeerni->lpni_rtrcredits++;
1255                 spin_unlock(&rxpeerni->lpni_lock);
1256
1257                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1258                 spin_lock(&lp->lp_lock);
1259
1260                 /* drop all messages which are queued to be routed on that
1261                  * peer. */
1262                 if (!the_lnet.ln_routing) {
1263                         LIST_HEAD(drop);
1264                         list_splice_init(&lp->lp_rtrq, &drop);
1265                         spin_unlock(&lp->lp_lock);
1266                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1267                 } else if (!list_empty(&lp->lp_rtrq)) {
1268                         int msg2_cpt;
1269
1270                         msg2 = list_first_entry(&lp->lp_rtrq,
1271                                                 struct lnet_msg, msg_list);
1272                         list_del(&msg2->msg_list);
1273                         msg2_cpt = msg2->msg_rx_cpt;
1274                         spin_unlock(&lp->lp_lock);
1275                         /*
1276                          * messages on the lp_rtrq can be from any NID in
1277                          * the peer, which means they might have different
1278                          * cpts. We need to make sure we lock the right
1279                          * one.
1280                          */
1281                         if (msg2_cpt != msg->msg_rx_cpt) {
1282                                 lnet_net_unlock(msg->msg_rx_cpt);
1283                                 lnet_net_lock(msg2_cpt);
1284                         }
1285                         (void) lnet_post_routed_recv_locked(msg2, 1);
1286                         if (msg2_cpt != msg->msg_rx_cpt) {
1287                                 lnet_net_unlock(msg2_cpt);
1288                                 lnet_net_lock(msg->msg_rx_cpt);
1289                         }
1290                 } else {
1291                         spin_unlock(&lp->lp_lock);
1292                 }
1293         }
1294         if (rxni != NULL) {
1295                 msg->msg_rxni = NULL;
1296                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1297         }
1298         if (rxpeerni != NULL) {
1299                 msg->msg_rxpeer = NULL;
1300                 lnet_peer_ni_decref_locked(rxpeerni);
1301         }
1302 }
1303
1304 static struct lnet_peer_ni *
1305 lnet_select_peer_ni(struct lnet_ni *best_ni, struct lnet_nid *dst_nid,
1306                     struct lnet_peer *peer,
1307                     struct lnet_peer_ni *best_lpni,
1308                     struct lnet_peer_net *peer_net)
1309 {
1310         /*
1311          * Look at the peer NIs for the destination peer that connect
1312          * to the chosen net. If a peer_ni is preferred when using the
1313          * best_ni to communicate, we use that one. If there is no
1314          * preferred peer_ni, or there are multiple preferred peer_ni,
1315          * the available transmit credits are used. If the transmit
1316          * credits are equal, we round-robin over the peer_ni.
1317          */
1318         struct lnet_peer_ni *lpni = NULL;
1319         int best_lpni_credits = (best_lpni) ? best_lpni->lpni_txcredits :
1320                 INT_MIN;
1321         int best_lpni_healthv = (best_lpni) ?
1322                 atomic_read(&best_lpni->lpni_healthv) : 0;
1323         bool best_lpni_is_preferred = false;
1324         bool lpni_is_preferred;
1325         int lpni_healthv;
1326         __u32 lpni_sel_prio;
1327         __u32 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1328
1329         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1330                 /*
1331                  * if the best_ni we've chosen aleady has this lpni
1332                  * preferred, then let's use it
1333                  */
1334                 if (best_ni) {
1335                         lpni_is_preferred = lnet_peer_is_pref_nid_locked(
1336                                 lpni, &best_ni->ni_nid);
1337                         CDEBUG(D_NET, "%s lpni_is_preferred = %d\n",
1338                                libcfs_nidstr(&best_ni->ni_nid),
1339                                lpni_is_preferred);
1340                 } else {
1341                         lpni_is_preferred = false;
1342                 }
1343
1344                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1345                 lpni_sel_prio = lpni->lpni_sel_priority;
1346
1347                 if (best_lpni)
1348                         CDEBUG(D_NET, "n:[%s, %s] h:[%d, %d] p:[%d, %d] c:[%d, %d] s:[%d, %d]\n",
1349                                 libcfs_nidstr(&lpni->lpni_nid),
1350                                 libcfs_nidstr(&best_lpni->lpni_nid),
1351                                 lpni_healthv, best_lpni_healthv,
1352                                 lpni_sel_prio, best_sel_prio,
1353                                 lpni->lpni_txcredits, best_lpni_credits,
1354                                 lpni->lpni_seq, best_lpni->lpni_seq);
1355                 else
1356                         goto select_lpni;
1357
1358                 /* pick the healthiest peer ni */
1359                 if (lpni_healthv < best_lpni_healthv)
1360                         continue;
1361                 else if (lpni_healthv > best_lpni_healthv) {
1362                         if (best_lpni_is_preferred)
1363                                 best_lpni_is_preferred = false;
1364                         goto select_lpni;
1365                 }
1366
1367                 if (lpni_sel_prio > best_sel_prio)
1368                         continue;
1369                 else if (lpni_sel_prio < best_sel_prio) {
1370                         if (best_lpni_is_preferred)
1371                                 best_lpni_is_preferred = false;
1372                         goto select_lpni;
1373                 }
1374
1375                 /* if this is a preferred peer use it */
1376                 if (!best_lpni_is_preferred && lpni_is_preferred) {
1377                         best_lpni_is_preferred = true;
1378                         goto select_lpni;
1379                 } else if (best_lpni_is_preferred && !lpni_is_preferred) {
1380                         /* this is not the preferred peer so let's ignore
1381                          * it.
1382                          */
1383                         continue;
1384                 }
1385
1386                 if (lpni->lpni_txcredits < best_lpni_credits)
1387                         /* We already have a peer that has more credits
1388                          * available than this one. No need to consider
1389                          * this peer further.
1390                          */
1391                         continue;
1392                 else if (lpni->lpni_txcredits > best_lpni_credits)
1393                         goto select_lpni;
1394
1395                 /* The best peer found so far and the current peer
1396                  * have the same number of available credits let's
1397                  * make sure to select between them using Round Robin
1398                  */
1399                 if (best_lpni && (best_lpni->lpni_seq <= lpni->lpni_seq))
1400                         continue;
1401 select_lpni:
1402                 best_lpni_is_preferred = lpni_is_preferred;
1403                 best_lpni_healthv = lpni_healthv;
1404                 best_sel_prio = lpni_sel_prio;
1405                 best_lpni = lpni;
1406                 best_lpni_credits = lpni->lpni_txcredits;
1407         }
1408
1409         /* if we still can't find a peer ni then we can't reach it */
1410         if (!best_lpni) {
1411                 u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1412                              LNET_NID_NET(dst_nid);
1413                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1414                                 libcfs_net2str(net_id));
1415                 return NULL;
1416         }
1417
1418         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1419                libcfs_nidstr(&best_lpni->lpni_nid));
1420
1421         return best_lpni;
1422 }
1423
1424 /*
1425  * Prerequisite: the best_ni should already be set in the sd
1426  * Find the best lpni.
1427  * If the net id is provided then restrict lpni selection on
1428  * that particular net.
1429  * Otherwise find any reachable lpni. When dealing with an MR
1430  * gateway and it has multiple lpnis which we can use
1431  * we want to select the best one from the list of reachable
1432  * ones.
1433  */
1434 static inline struct lnet_peer_ni *
1435 lnet_find_best_lpni(struct lnet_ni *lni, struct lnet_nid *dst_nid,
1436                     struct lnet_peer *peer, u32 net_id)
1437 {
1438         struct lnet_peer_net *peer_net;
1439
1440         /* find the best_lpni on any local network */
1441         if (net_id == LNET_NET_ANY) {
1442                 struct lnet_peer_ni *best_lpni = NULL;
1443                 struct lnet_peer_net *lpn;
1444                 list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
1445                         /* no net specified find any reachable peer ni */
1446                         if (!lnet_islocalnet_locked(lpn->lpn_net_id))
1447                                 continue;
1448                         best_lpni = lnet_select_peer_ni(lni, dst_nid, peer,
1449                                                         best_lpni, lpn);
1450                 }
1451
1452                 return best_lpni;
1453         }
1454         /* restrict on the specified net */
1455         peer_net = lnet_peer_get_net_locked(peer, net_id);
1456         if (peer_net)
1457                 return lnet_select_peer_ni(lni, dst_nid, peer, NULL, peer_net);
1458
1459         return NULL;
1460 }
1461
1462 static int
1463 lnet_compare_gw_lpnis(struct lnet_peer_ni *lpni1, struct lnet_peer_ni *lpni2)
1464 {
1465         if (lpni1->lpni_txqnob < lpni2->lpni_txqnob)
1466                 return 1;
1467
1468         if (lpni1->lpni_txqnob > lpni2->lpni_txqnob)
1469                 return -1;
1470
1471         if (lpni1->lpni_txcredits > lpni2->lpni_txcredits)
1472                 return 1;
1473
1474         if (lpni1->lpni_txcredits < lpni2->lpni_txcredits)
1475                 return -1;
1476
1477         return 0;
1478 }
1479
1480 /* Compare route priorities and hop counts */
1481 static int
1482 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
1483 {
1484         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1485         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1486
1487         if (r1->lr_priority < r2->lr_priority)
1488                 return 1;
1489
1490         if (r1->lr_priority > r2->lr_priority)
1491                 return -1;
1492
1493         if (r1_hops < r2_hops)
1494                 return 1;
1495
1496         if (r1_hops > r2_hops)
1497                 return -1;
1498
1499         return 0;
1500 }
1501
1502 static struct lnet_route *
1503 lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net,
1504                        struct lnet_peer_ni *remote_lpni,
1505                        struct lnet_route **prev_route,
1506                        struct lnet_peer_ni **gwni)
1507 {
1508         struct lnet_peer_ni *lpni, *best_gw_ni = NULL;
1509         struct lnet_route *best_route;
1510         struct lnet_route *last_route;
1511         struct lnet_route *route;
1512         int rc;
1513         bool best_rte_is_preferred = false;
1514         struct lnet_nid *gw_pnid;
1515
1516         CDEBUG(D_NET, "Looking up a route to %s, from %s\n",
1517                libcfs_net2str(rnet->lrn_net), libcfs_net2str(src_net));
1518
1519         best_route = last_route = NULL;
1520         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1521                 if (!lnet_is_route_alive(route))
1522                         continue;
1523                 gw_pnid = &route->lr_gateway->lp_primary_nid;
1524
1525                 /* no protection on below fields, but it's harmless */
1526                 if (last_route && (last_route->lr_seq - route->lr_seq < 0))
1527                         last_route = route;
1528
1529                 /* if the best route found is in the preferred list then
1530                  * tag it as preferred and use it later on. But if we
1531                  * didn't find any routes which are on the preferred list
1532                  * then just use the best route possible.
1533                  */
1534                 rc = lnet_peer_is_pref_rtr_locked(remote_lpni, gw_pnid);
1535
1536                 if (!best_route || (rc && !best_rte_is_preferred)) {
1537                         /* Restrict the selection of the router NI on the
1538                          * src_net provided. If the src_net is LNET_NID_ANY,
1539                          * then select the best interface available.
1540                          */
1541                         lpni = lnet_find_best_lpni(NULL, NULL,
1542                                                    route->lr_gateway,
1543                                                    src_net);
1544                         if (!lpni) {
1545                                 CDEBUG(D_NET,
1546                                        "Gateway %s does not have a peer NI on net %s\n",
1547                                        libcfs_nidstr(gw_pnid),
1548                                        libcfs_net2str(src_net));
1549                                 continue;
1550                         }
1551                 }
1552
1553                 if (rc && !best_rte_is_preferred) {
1554                         /* This is the first preferred route we found,
1555                          * so it beats any route found previously
1556                          */
1557                         best_route = route;
1558                         if (!last_route)
1559                                 last_route = route;
1560                         best_gw_ni = lpni;
1561                         best_rte_is_preferred = true;
1562                         CDEBUG(D_NET, "preferred gw = %s\n",
1563                                libcfs_nidstr(gw_pnid));
1564                         continue;
1565                 } else if ((!rc) && best_rte_is_preferred)
1566                         /* The best route we found so far is in the preferred
1567                          * list, so it beats any non-preferred route
1568                          */
1569                         continue;
1570
1571                 if (!best_route) {
1572                         best_route = last_route = route;
1573                         best_gw_ni = lpni;
1574                         continue;
1575                 }
1576
1577                 rc = lnet_compare_routes(route, best_route);
1578                 if (rc == -1)
1579                         continue;
1580
1581                 /* Restrict the selection of the router NI on the
1582                  * src_net provided. If the src_net is LNET_NID_ANY,
1583                  * then select the best interface available.
1584                  */
1585                 lpni = lnet_find_best_lpni(NULL, NULL, route->lr_gateway,
1586                                            src_net);
1587                 if (!lpni) {
1588                         CDEBUG(D_NET,
1589                                "Gateway %s does not have a peer NI on net %s\n",
1590                                libcfs_nidstr(gw_pnid),
1591                                libcfs_net2str(src_net));
1592                         continue;
1593                 }
1594
1595                 if (rc == 1) {
1596                         best_route = route;
1597                         best_gw_ni = lpni;
1598                         continue;
1599                 }
1600
1601                 rc = lnet_compare_gw_lpnis(lpni, best_gw_ni);
1602                 if (rc == -1)
1603                         continue;
1604
1605                 if (rc == 1 || route->lr_seq <= best_route->lr_seq) {
1606                         best_route = route;
1607                         best_gw_ni = lpni;
1608                         continue;
1609                 }
1610         }
1611
1612         *prev_route = last_route;
1613         *gwni = best_gw_ni;
1614
1615         return best_route;
1616 }
1617
1618 static inline unsigned int
1619 lnet_dev_prio_of_md(struct lnet_ni *ni, unsigned int dev_idx)
1620 {
1621         if (dev_idx == UINT_MAX)
1622                 return UINT_MAX;
1623
1624         if (!ni || !ni->ni_net || !ni->ni_net->net_lnd ||
1625             !ni->ni_net->net_lnd->lnd_get_dev_prio)
1626                 return UINT_MAX;
1627
1628         return ni->ni_net->net_lnd->lnd_get_dev_prio(ni, dev_idx);
1629 }
1630
1631 static struct lnet_ni *
1632 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1633                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1634                  struct lnet_msg *msg, int md_cpt)
1635 {
1636         struct lnet_libmd *md = msg->msg_md;
1637         unsigned int offset = msg->msg_offset;
1638         unsigned int shortest_distance;
1639         struct lnet_ni *ni = NULL;
1640         int best_credits;
1641         int best_healthv;
1642         __u32 best_sel_prio;
1643         unsigned int best_dev_prio;
1644         int best_ni_fatal;
1645         unsigned int dev_idx = UINT_MAX;
1646         bool gpu = md ? (md->md_flags & LNET_MD_FLAG_GPU) : false;
1647
1648         if (gpu) {
1649                 struct page *page = lnet_get_first_page(md, offset);
1650
1651                 dev_idx = lnet_get_dev_idx(page);
1652         }
1653
1654         /*
1655          * If there is no peer_ni that we can send to on this network,
1656          * then there is no point in looking for a new best_ni here.
1657         */
1658         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1659                 return best_ni;
1660
1661         if (best_ni == NULL) {
1662                 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1663                 shortest_distance = UINT_MAX;
1664                 best_dev_prio = UINT_MAX;
1665                 best_credits = INT_MIN;
1666                 best_healthv = 0;
1667                 best_ni_fatal = true;
1668         } else {
1669                 best_dev_prio = lnet_dev_prio_of_md(best_ni, dev_idx);
1670                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1671                                                      best_ni->ni_dev_cpt);
1672                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1673                 best_healthv = atomic_read(&best_ni->ni_healthv);
1674                 best_sel_prio = best_ni->ni_sel_priority;
1675                 best_ni_fatal = atomic_read(&best_ni->ni_fatal_error_on);
1676         }
1677
1678         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1679                 unsigned int distance;
1680                 int ni_credits;
1681                 int ni_healthv;
1682                 int ni_fatal;
1683                 __u32 ni_sel_prio;
1684                 unsigned int ni_dev_prio;
1685
1686                 ni_credits = atomic_read(&ni->ni_tx_credits);
1687                 ni_healthv = atomic_read(&ni->ni_healthv);
1688                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1689                 ni_sel_prio = ni->ni_sel_priority;
1690
1691                 /*
1692                  * calculate the distance from the CPT on which
1693                  * the message memory is allocated to the CPT of
1694                  * the NI's physical device
1695                  */
1696                 distance = cfs_cpt_distance(lnet_cpt_table(),
1697                                             md_cpt,
1698                                             ni->ni_dev_cpt);
1699
1700                 ni_dev_prio = lnet_dev_prio_of_md(ni, dev_idx);
1701
1702                 /*
1703                  * All distances smaller than the NUMA range
1704                  * are treated equally.
1705                  */
1706                 if (!gpu && distance < lnet_numa_range)
1707                         distance = lnet_numa_range;
1708
1709                 /*
1710                  * Select on health, selection policy, direct dma prio,
1711                  * shorter distance, available credits, then round-robin.
1712                  */
1713                 if (best_ni)
1714                         CDEBUG(D_NET, "compare ni %s [f:%s, c:%d, d:%d, s:%d, p:%u, g:%u, h:%d] with best_ni %s [f:%s, c:%d, d:%d, s:%d, p:%u, g:%u, h:%d]\n",
1715                                libcfs_nidstr(&ni->ni_nid),
1716                                ni_fatal ? "y" : "n", ni_credits, distance,
1717                                ni->ni_seq, ni_sel_prio, ni_dev_prio, ni_healthv,
1718                                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid)
1719                                : "not selected",
1720                                best_ni_fatal ? "y" : "n", best_credits,
1721                                shortest_distance,
1722                                (best_ni) ? best_ni->ni_seq : 0,
1723                                best_sel_prio, best_dev_prio, best_healthv);
1724                 else
1725                         goto select_ni;
1726
1727                 if (ni_fatal && !best_ni_fatal)
1728                         continue;
1729                 else if (!ni_fatal && best_ni_fatal)
1730                         goto select_ni;
1731
1732                 if (ni_healthv < best_healthv)
1733                         continue;
1734                 else if (ni_healthv > best_healthv)
1735                         goto select_ni;
1736
1737                 if (ni_sel_prio > best_sel_prio)
1738                         continue;
1739                 else if (ni_sel_prio < best_sel_prio)
1740                         goto select_ni;
1741
1742                 if (ni_dev_prio > best_dev_prio)
1743                         continue;
1744                 else if (ni_dev_prio < best_dev_prio)
1745                         goto select_ni;
1746
1747                 if (distance > shortest_distance)
1748                         continue;
1749                 else if (distance < shortest_distance)
1750                         goto select_ni;
1751
1752                 if (ni_credits < best_credits)
1753                         continue;
1754                 else if (ni_credits > best_credits)
1755                         goto select_ni;
1756
1757                 if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1758                         continue;
1759
1760 select_ni:
1761                 best_sel_prio = ni_sel_prio;
1762                 best_dev_prio = ni_dev_prio;
1763                 shortest_distance = distance;
1764                 best_healthv = ni_healthv;
1765                 best_ni = ni;
1766                 best_credits = ni_credits;
1767                 best_ni_fatal = ni_fatal;
1768         }
1769
1770         CDEBUG(D_NET, "selected best_ni %s\n",
1771                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid) : "no selection");
1772
1773         return best_ni;
1774 }
1775
1776 static bool
1777 lnet_reserved_msg(struct lnet_msg *msg)
1778 {
1779         if (msg->msg_type == LNET_MSG_PUT) {
1780                 if (msg->msg_hdr.msg.put.ptl_index == LNET_RESERVED_PORTAL)
1781                         return true;
1782         } else if (msg->msg_type == LNET_MSG_GET) {
1783                 if (msg->msg_hdr.msg.get.ptl_index == LNET_RESERVED_PORTAL)
1784                         return true;
1785         }
1786         return false;
1787 }
1788
1789 /* Can the specified message trigger peer discovery?
1790  *
1791  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1792  * because such traffic is required to perform discovery. We therefore
1793  * exclude all GET and PUT on that portal. We also exclude all ACK and
1794  * REPLY traffic, but that is because the portal is not tracked in the
1795  * message structure for these message types. We could restrict this
1796  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1797  */
1798 static bool
1799 lnet_msg_discovery(struct lnet_msg *msg)
1800 {
1801         return !(lnet_reserved_msg(msg) || lnet_msg_is_response(msg));
1802 }
1803
1804 /* Is the specified message an LNet ping?
1805  */
1806 static bool
1807 lnet_msg_is_ping(struct lnet_msg *msg)
1808 {
1809         if (msg->msg_type == LNET_MSG_GET &&
1810             msg->msg_hdr.msg.get.ptl_index == LNET_RESERVED_PORTAL)
1811                 return true;
1812
1813         return false;
1814 }
1815
1816 #define SRC_SPEC        0x0001
1817 #define SRC_ANY         0x0002
1818 #define LOCAL_DST       0x0004
1819 #define REMOTE_DST      0x0008
1820 #define MR_DST          0x0010
1821 #define NMR_DST         0x0020
1822 #define SND_RESP        0x0040
1823
1824 /* The following to defines are used for return codes */
1825 #define REPEAT_SEND     0x1000
1826 #define PASS_THROUGH    0x2000
1827
1828 /* The different cases lnet_select pathway needs to handle */
1829 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1830 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1831 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1832 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1833 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1834 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1835 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1836 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1837
1838 static int
1839 lnet_handle_lo_send(struct lnet_send_data *sd)
1840 {
1841         struct lnet_msg *msg = sd->sd_msg;
1842         int cpt = sd->sd_cpt;
1843
1844         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1845                 return -ESHUTDOWN;
1846
1847         /* No send credit hassles with LOLND */
1848         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1849         msg->msg_hdr.dest_nid = the_lnet.ln_loni->ni_nid;
1850         if (!msg->msg_routing)
1851                 msg->msg_hdr.src_nid = the_lnet.ln_loni->ni_nid;
1852         msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
1853         lnet_msg_commit(msg, cpt);
1854         msg->msg_txni = the_lnet.ln_loni;
1855
1856         return LNET_CREDIT_OK;
1857 }
1858
1859 static int
1860 lnet_handle_send(struct lnet_send_data *sd)
1861 {
1862         struct lnet_ni *best_ni = sd->sd_best_ni;
1863         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1864         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1865         struct lnet_msg *msg = sd->sd_msg;
1866         int cpt2;
1867         __u32 send_case = sd->sd_send_case;
1868         int rc;
1869         __u32 routing = send_case & REMOTE_DST;
1870         struct lnet_rsp_tracker *rspt;
1871
1872         /* Increment sequence number of the selected peer, peer net,
1873          * local ni and local net so that we pick the next ones
1874          * in Round Robin.
1875          */
1876         best_lpni->lpni_peer_net->lpn_peer->lp_send_seq++;
1877         best_lpni->lpni_peer_net->lpn_seq =
1878                 best_lpni->lpni_peer_net->lpn_peer->lp_send_seq;
1879         best_lpni->lpni_seq = best_lpni->lpni_peer_net->lpn_seq;
1880         the_lnet.ln_net_seq++;
1881         best_ni->ni_net->net_seq = the_lnet.ln_net_seq;
1882         best_ni->ni_seq = best_ni->ni_net->net_seq;
1883
1884         CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n",
1885                libcfs_nidstr(&best_ni->ni_nid),
1886                best_ni->ni_seq, best_ni->ni_net->net_seq,
1887                atomic_read(&best_ni->ni_tx_credits),
1888                best_ni->ni_sel_priority,
1889                libcfs_nidstr(&best_lpni->lpni_nid),
1890                best_lpni->lpni_seq, best_lpni->lpni_peer_net->lpn_seq,
1891                best_lpni->lpni_txcredits,
1892                best_lpni->lpni_sel_priority);
1893
1894         /*
1895          * grab a reference on the peer_ni so it sticks around even if
1896          * we need to drop and relock the lnet_net_lock below.
1897          */
1898         lnet_peer_ni_addref_locked(best_lpni);
1899
1900         /*
1901          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1902          * message. This ensures that we get a CPT that is correct for
1903          * the NI when the NI has been restricted to a subset of all CPTs.
1904          * If the selected CPT differs from the one currently locked, we
1905          * must unlock and relock the lnet_net_lock(), and then check whether
1906          * the configuration has changed. We don't have a hold on the best_ni
1907          * yet, and it may have vanished.
1908          */
1909         cpt2 = lnet_cpt_of_nid_locked(&best_lpni->lpni_nid, best_ni);
1910         if (sd->sd_cpt != cpt2) {
1911                 __u32 seq = lnet_get_dlc_seq_locked();
1912                 lnet_net_unlock(sd->sd_cpt);
1913                 sd->sd_cpt = cpt2;
1914                 lnet_net_lock(sd->sd_cpt);
1915                 if (seq != lnet_get_dlc_seq_locked()) {
1916                         lnet_peer_ni_decref_locked(best_lpni);
1917                         return REPEAT_SEND;
1918                 }
1919         }
1920
1921         /*
1922          * store the best_lpni in the message right away to avoid having
1923          * to do the same operation under different conditions
1924          */
1925         msg->msg_txpeer = best_lpni;
1926         msg->msg_txni = best_ni;
1927
1928         /*
1929          * grab a reference for the best_ni since now it's in use in this
1930          * send. The reference will be dropped in lnet_finalize()
1931          */
1932         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1933
1934         /*
1935          * Always set the target.nid to the best peer picked. Either the
1936          * NID will be one of the peer NIDs selected, or the same NID as
1937          * what was originally set in the target or it will be the NID of
1938          * a router if this message should be routed
1939          */
1940         msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1941
1942         /*
1943          * lnet_msg_commit assigns the correct cpt to the message, which
1944          * is used to decrement the correct refcount on the ni when it's
1945          * time to return the credits
1946          */
1947         lnet_msg_commit(msg, sd->sd_cpt);
1948
1949         /*
1950          * If we are routing the message then we keep the src_nid that was
1951          * set by the originator. If we are not routing then we are the
1952          * originator and set it here.
1953          */
1954         if (!msg->msg_routing)
1955                 msg->msg_hdr.src_nid = msg->msg_txni->ni_nid;
1956
1957         if (routing) {
1958                 msg->msg_target_is_router = 1;
1959                 msg->msg_target.pid = LNET_PID_LUSTRE;
1960                 /*
1961                  * since we're routing we want to ensure that the
1962                  * msg_hdr.dest_nid is set to the final destination. When
1963                  * the router receives this message it knows how to route
1964                  * it.
1965                  *
1966                  * final_dst_lpni is set at the beginning of the
1967                  * lnet_select_pathway() function and is never changed.
1968                  * It's safe to use it here.
1969                  */
1970                 final_dst_lpni->lpni_peer_net->lpn_peer->lp_send_seq++;
1971                 final_dst_lpni->lpni_peer_net->lpn_seq =
1972                         final_dst_lpni->lpni_peer_net->lpn_peer->lp_send_seq;
1973                 final_dst_lpni->lpni_seq =
1974                         final_dst_lpni->lpni_peer_net->lpn_seq;
1975                 msg->msg_hdr.dest_nid = final_dst_lpni->lpni_nid;
1976         } else {
1977                 /*
1978                  * if we're not routing set the dest_nid to the best peer
1979                  * ni NID that we picked earlier in the algorithm.
1980                  */
1981                 msg->msg_hdr.dest_nid = msg->msg_txpeer->lpni_nid;
1982         }
1983
1984         /*
1985          * if we have response tracker block update it with the next hop
1986          * nid
1987          */
1988         if (msg->msg_md) {
1989                 rspt = msg->msg_md->md_rspt_ptr;
1990                 if (rspt) {
1991                         rspt->rspt_next_hop_nid =
1992                                 msg->msg_txpeer->lpni_nid;
1993                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1994                                libcfs_nidstr(&rspt->rspt_next_hop_nid));
1995                 }
1996         }
1997
1998         rc = lnet_post_send_locked(msg, 0);
1999
2000         if (!rc)
2001                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
2002                        libcfs_nidstr(&msg->msg_hdr.src_nid),
2003                        libcfs_nidstr(&msg->msg_txni->ni_nid),
2004                        libcfs_nidstr(&sd->sd_src_nid),
2005                        libcfs_nidstr(&msg->msg_hdr.dest_nid),
2006                        libcfs_nidstr(&sd->sd_dst_nid),
2007                        libcfs_nidstr(&msg->msg_txpeer->lpni_nid),
2008                        libcfs_nidstr(&sd->sd_rtr_nid),
2009                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
2010
2011         return rc;
2012 }
2013
2014 static inline void
2015 lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni,
2016                          struct lnet_msg *msg)
2017 {
2018         if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) &&
2019             !lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) {
2020                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
2021                        libcfs_nidstr(&lni->ni_nid),
2022                        libcfs_nidstr(&lpni->lpni_nid));
2023                 lnet_peer_ni_set_non_mr_pref_nid(lpni, &lni->ni_nid);
2024         }
2025 }
2026
2027 /*
2028  * Source Specified
2029  * Local Destination
2030  * non-mr peer
2031  *
2032  * use the source and destination NIDs as the pathway
2033  */
2034 static int
2035 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
2036 {
2037         /* the destination lpni is set before we get here. */
2038
2039         /* find local NI */
2040         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2041         if (!sd->sd_best_ni) {
2042                 CERROR("Can't send to %s: src %s is not a local nid\n",
2043                        libcfs_nidstr(&sd->sd_dst_nid),
2044                        libcfs_nidstr(&sd->sd_src_nid));
2045                 return -EINVAL;
2046         }
2047
2048         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2049
2050         return lnet_handle_send(sd);
2051 }
2052
2053 /*
2054  * Source Specified
2055  * Local Destination
2056  * MR Peer
2057  *
2058  * Don't run the selection algorithm on the peer NIs. By specifying the
2059  * local NID, we're also saying that we should always use the destination NID
2060  * provided. This handles the case where we should be using the same
2061  * destination NID for the all the messages which belong to the same RPC
2062  * request.
2063  */
2064 static int
2065 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
2066 {
2067         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2068         if (!sd->sd_best_ni) {
2069                 CERROR("Can't send to %s: src %s is not a local nid\n",
2070                        libcfs_nidstr(&sd->sd_dst_nid),
2071                        libcfs_nidstr(&sd->sd_src_nid));
2072                 return -EINVAL;
2073         }
2074
2075         if (sd->sd_best_lpni &&
2076             nid_same(&sd->sd_best_lpni->lpni_nid,
2077                       &the_lnet.ln_loni->ni_nid))
2078                 return lnet_handle_lo_send(sd);
2079         else if (sd->sd_best_lpni)
2080                 return lnet_handle_send(sd);
2081
2082         CERROR("can't send to %s. no NI on %s\n",
2083                libcfs_nidstr(&sd->sd_dst_nid),
2084                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
2085
2086         return -EHOSTUNREACH;
2087 }
2088
2089 struct lnet_ni *
2090 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
2091                               struct lnet_peer *peer,
2092                               struct lnet_peer_net *peer_net,
2093                               struct lnet_msg *msg,
2094                               int cpt)
2095 {
2096         struct lnet_net *local_net;
2097         struct lnet_ni *best_ni;
2098
2099         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
2100         if (!local_net)
2101                 return NULL;
2102
2103         /*
2104          * Iterate through the NIs in this local Net and select
2105          * the NI to send from. The selection is determined by
2106          * these 3 criterion in the following priority:
2107          *      1. NUMA
2108          *      2. NI available credits
2109          *      3. Round Robin
2110          */
2111         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
2112                                    peer, peer_net, msg, cpt);
2113
2114         return best_ni;
2115 }
2116
2117 static int
2118 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg,
2119                              int cpt)
2120 {
2121         struct lnet_peer *peer;
2122         struct lnet_peer_ni *new_lpni;
2123         int rc;
2124
2125         lnet_peer_ni_addref_locked(lpni);
2126
2127         peer = lpni->lpni_peer_net->lpn_peer;
2128
2129         if (lnet_peer_gw_discovery(peer)) {
2130                 lnet_peer_ni_decref_locked(lpni);
2131                 return 0;
2132         }
2133
2134         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
2135                 lnet_peer_ni_decref_locked(lpni);
2136                 return 0;
2137         }
2138
2139         rc = lnet_discover_peer_locked(lpni, cpt, false);
2140         if (rc) {
2141                 lnet_peer_ni_decref_locked(lpni);
2142                 return rc;
2143         }
2144
2145         new_lpni = lnet_peer_ni_find_locked(&lpni->lpni_nid);
2146         if (!new_lpni) {
2147                 lnet_peer_ni_decref_locked(lpni);
2148                 return -ENOENT;
2149         }
2150
2151         peer = new_lpni->lpni_peer_net->lpn_peer;
2152         spin_lock(&peer->lp_lock);
2153         if (lpni == new_lpni && lnet_peer_is_uptodate_locked(peer)) {
2154                 /* The peer NI did not change and the peer is up to date.
2155                  * Nothing more to do.
2156                  */
2157                 spin_unlock(&peer->lp_lock);
2158                 lnet_peer_ni_decref_locked(lpni);
2159                 lnet_peer_ni_decref_locked(new_lpni);
2160                 return 0;
2161         }
2162         spin_unlock(&peer->lp_lock);
2163
2164         /* Either the peer NI changed during discovery, or the peer isn't up
2165          * to date. In both cases we want to queue the message on the
2166          * (possibly new) peer's pending queue and queue the peer for discovery
2167          */
2168         msg->msg_sending = 0;
2169         msg->msg_txpeer = NULL;
2170         lnet_net_unlock(cpt);
2171         lnet_peer_queue_message(peer, msg);
2172         lnet_net_lock(cpt);
2173
2174         lnet_peer_ni_decref_locked(lpni);
2175         lnet_peer_ni_decref_locked(new_lpni);
2176
2177         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2178                msg, libcfs_nidstr(&peer->lp_primary_nid));
2179
2180         return LNET_DC_WAIT;
2181 }
2182
2183 static int
2184 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2185                              struct lnet_nid *dst_nid,
2186                              struct lnet_peer_ni **gw_lpni,
2187                              struct lnet_peer **gw_peer)
2188 {
2189         int rc;
2190         struct lnet_peer *gw;
2191         struct lnet_peer *lp;
2192         struct lnet_peer_net *lpn;
2193         struct lnet_peer_net *best_lpn = NULL;
2194         struct lnet_remotenet *rnet, *best_rnet = NULL;
2195         struct lnet_route *best_route = NULL;
2196         struct lnet_route *last_route = NULL;
2197         struct lnet_peer_ni *lpni = NULL;
2198         struct lnet_peer_ni *gwni = NULL;
2199         bool route_found = false;
2200         struct lnet_nid *src_nid =
2201                 !LNET_NID_IS_ANY(&sd->sd_src_nid) || !sd->sd_best_ni
2202                 ? &sd->sd_src_nid
2203                 : &sd->sd_best_ni->ni_nid;
2204         int best_lpn_healthv = 0;
2205         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2206
2207         CDEBUG(D_NET, "%s route (%s) from local NI %s to destination %s\n",
2208                LNET_NID_IS_ANY(&sd->sd_rtr_nid) ? "Lookup" : "Specified",
2209                libcfs_nidstr(&sd->sd_rtr_nid), libcfs_nidstr(src_nid),
2210                libcfs_nidstr(&sd->sd_dst_nid));
2211
2212         /* If a router nid was specified then we are replying to a GET or
2213          * sending an ACK. In this case we use the gateway associated with the
2214          * specified router nid.
2215          */
2216         if (!LNET_NID_IS_ANY(&sd->sd_rtr_nid)) {
2217                 gwni = lnet_peer_ni_find_locked(&sd->sd_rtr_nid);
2218                 if (gwni) {
2219                         gw = gwni->lpni_peer_net->lpn_peer;
2220                         lnet_peer_ni_decref_locked(gwni);
2221                         if (gw->lp_rtr_refcount)
2222                                 route_found = true;
2223                 } else {
2224                         CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n",
2225                               libcfs_nidstr(&sd->sd_rtr_nid));
2226                 }
2227         }
2228
2229         if (!route_found) {
2230                 if (sd->sd_msg->msg_routing || !LNET_NID_IS_ANY(src_nid)) {
2231                         /* If I'm routing this message then I need to find the
2232                          * next hop based on the destination NID
2233                          *
2234                          * We also find next hop based on the destination NID
2235                          * if the source NI was specified
2236                          */
2237                         best_rnet = lnet_find_rnet_locked(LNET_NID_NET(&sd->sd_dst_nid));
2238                         if (!best_rnet) {
2239                                 CERROR("Unable to send message from %s to %s - Route table may be misconfigured\n",
2240                                        (src_nid && LNET_NID_IS_ANY(src_nid)) ?
2241                                                 "any local NI" :
2242                                                 libcfs_nidstr(src_nid),
2243                                        libcfs_nidstr(&sd->sd_dst_nid));
2244                                 return -EHOSTUNREACH;
2245                         }
2246                         CDEBUG(D_NET, "best_rnet %s\n",
2247                                libcfs_net2str(best_rnet->lrn_net));
2248                 } else {
2249                         /* we've already looked up the initial lpni using
2250                          * dst_nid
2251                          */
2252                         lpni = sd->sd_best_lpni;
2253                         /* the peer tree must be in existence */
2254                         LASSERT(lpni && lpni->lpni_peer_net &&
2255                                 lpni->lpni_peer_net->lpn_peer);
2256                         lp = lpni->lpni_peer_net->lpn_peer;
2257
2258                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2259                                 /* is this remote network reachable?  */
2260                                 rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2261                                 if (!rnet)
2262                                         continue;
2263
2264                                 if (!best_lpn)
2265                                         goto use_lpn;
2266                                 else
2267                                         CDEBUG(D_NET, "n[%s, %s] h[%d, %d], p[%u, %u], s[%d, %d]\n",
2268                                                libcfs_net2str(lpn->lpn_net_id),
2269                                                libcfs_net2str(best_lpn->lpn_net_id),
2270                                                lpn->lpn_healthv,
2271                                                best_lpn->lpn_healthv,
2272                                                lpn->lpn_sel_priority,
2273                                                best_lpn->lpn_sel_priority,
2274                                                lpn->lpn_seq,
2275                                                best_lpn->lpn_seq);
2276
2277                                 /* select the preferred peer net */
2278                                 if (best_lpn_healthv > lpn->lpn_healthv)
2279                                         continue;
2280                                 else if (best_lpn_healthv < lpn->lpn_healthv)
2281                                         goto use_lpn;
2282
2283                                 if (best_lpn_sel_prio < lpn->lpn_sel_priority)
2284                                         continue;
2285                                 else if (best_lpn_sel_prio > lpn->lpn_sel_priority)
2286                                         goto use_lpn;
2287
2288                                 if (best_lpn->lpn_seq <= lpn->lpn_seq)
2289                                         continue;
2290 use_lpn:
2291                                 best_lpn_healthv = lpn->lpn_healthv;
2292                                 best_lpn_sel_prio = lpn->lpn_sel_priority;
2293                                 best_lpn = lpn;
2294                                 best_rnet = rnet;
2295                         }
2296
2297                         if (!best_lpn) {
2298                                 CERROR("peer %s has no available nets\n",
2299                                        libcfs_nidstr(&sd->sd_dst_nid));
2300                                 return -EHOSTUNREACH;
2301                         }
2302
2303                         CDEBUG(D_NET, "selected best_lpn %s\n",
2304                                libcfs_net2str(best_lpn->lpn_net_id));
2305
2306                         sd->sd_best_lpni = lnet_find_best_lpni(sd->sd_best_ni,
2307                                                                &sd->sd_dst_nid,
2308                                                                lp,
2309                                                                best_lpn->lpn_net_id);
2310                         if (!sd->sd_best_lpni) {
2311                                 CERROR("peer %s is unreachable\n",
2312                                        libcfs_nidstr(&sd->sd_dst_nid));
2313                                 return -EHOSTUNREACH;
2314                         }
2315
2316                         /* We're attempting to round robin over the remote peer
2317                          * NI's so update the final destination we selected
2318                          */
2319                         sd->sd_final_dst_lpni = sd->sd_best_lpni;
2320                 }
2321
2322                 /*
2323                  * find the best route. Restrict the selection on the net of the
2324                  * local NI if we've already picked the local NI to send from.
2325                  * Otherwise, let's pick any route we can find and then find
2326                  * a local NI we can reach the route's gateway on. Any route we
2327                  * select will be reachable by virtue of the restriction we have
2328                  * when adding a route.
2329                  */
2330                 best_route = lnet_find_route_locked(best_rnet,
2331                                                     LNET_NID_NET(src_nid),
2332                                                     sd->sd_best_lpni,
2333                                                     &last_route, &gwni);
2334
2335                 if (!best_route) {
2336                         CERROR("no route to %s from %s\n",
2337                                libcfs_nidstr(dst_nid),
2338                                libcfs_nidstr(src_nid));
2339                         return -EHOSTUNREACH;
2340                 }
2341
2342                 if (!gwni) {
2343                         CERROR("Internal Error. Route expected to %s from %s\n",
2344                                libcfs_nidstr(dst_nid),
2345                                libcfs_nidstr(src_nid));
2346                         return -EFAULT;
2347                 }
2348
2349                 gw = best_route->lr_gateway;
2350                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2351         }
2352
2353         /*
2354          * If the router checker is not active then discover the gateway here.
2355          * This ensures we are able to take advantage of multi-rail routing, but
2356          * if the router checker is active then we do not unecessarily delay
2357          * messages while the gateway is being checked by the dedicated monitor
2358          * thread.
2359          *
2360          * NB: We're only checking the alive_router_check_interval here, rather
2361          * than calling lnet_router_checker_active(), because the other
2362          * conditions that are checked by that function are either
2363          * irrelevant (the_lnet.ln_routing) or must be true (list of routers
2364          * is not empty)
2365          */
2366         if (alive_router_check_interval <= 0) {
2367                 rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt);
2368                 if (rc)
2369                         return rc;
2370         }
2371
2372         if (!sd->sd_best_ni) {
2373                 lpn = gwni->lpni_peer_net;
2374                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw, lpn,
2375                                                                sd->sd_msg,
2376                                                                sd->sd_md_cpt);
2377                 if (!sd->sd_best_ni) {
2378                         CERROR("Internal Error. Expected local ni on %s but non found: %s\n",
2379                                libcfs_net2str(lpn->lpn_net_id),
2380                                libcfs_nidstr(&sd->sd_src_nid));
2381                         return -EFAULT;
2382                 }
2383         }
2384
2385         *gw_lpni = gwni;
2386         *gw_peer = gw;
2387
2388         /*
2389          * increment the sequence number since now we're sure we're
2390          * going to use this route
2391          */
2392         if (LNET_NID_IS_ANY(&sd->sd_rtr_nid)) {
2393                 LASSERT(best_route && last_route);
2394                 best_route->lr_seq = last_route->lr_seq + 1;
2395         }
2396
2397         return 0;
2398 }
2399
2400 /*
2401  * Handle two cases:
2402  *
2403  * Case 1:
2404  *  Source specified
2405  *  Remote destination
2406  *  Non-MR destination
2407  *
2408  * Case 2:
2409  *  Source specified
2410  *  Remote destination
2411  *  MR destination
2412  *
2413  * The handling of these two cases is similar. Even though the destination
2414  * can be MR or non-MR, we'll deal directly with the router.
2415  */
2416 static int
2417 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2418 {
2419         int rc;
2420         struct lnet_peer_ni *gw_lpni = NULL;
2421         struct lnet_peer *gw_peer = NULL;
2422
2423         /* find local NI */
2424         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2425         if (!sd->sd_best_ni) {
2426                 CERROR("Can't send to %s: src %s is not a local nid\n",
2427                        libcfs_nidstr(&sd->sd_dst_nid),
2428                        libcfs_nidstr(&sd->sd_src_nid));
2429                 return -EINVAL;
2430         }
2431
2432         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid,
2433                                           &gw_lpni, &gw_peer);
2434         if (rc)
2435                 return rc;
2436
2437         if (sd->sd_send_case & NMR_DST)
2438                 /*
2439                  * since the final destination is non-MR let's set its preferred
2440                  * NID before we send
2441                  */
2442                 lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni,
2443                                          sd->sd_msg);
2444
2445         /*
2446          * We're going to send to the gw found so let's set its
2447          * info
2448          */
2449         sd->sd_peer = gw_peer;
2450         sd->sd_best_lpni = gw_lpni;
2451
2452         return lnet_handle_send(sd);
2453 }
2454
2455 struct lnet_ni *
2456 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2457                                struct lnet_msg *msg, bool discovery)
2458 {
2459         struct lnet_peer_net *lpn = NULL;
2460         struct lnet_peer_net *best_lpn = NULL;
2461         struct lnet_net *net = NULL;
2462         struct lnet_net *best_net = NULL;
2463         struct lnet_ni *best_ni = NULL;
2464         int best_lpn_healthv = 0;
2465         int best_net_healthv = 0;
2466         int net_healthv;
2467         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2468         __u32 lpn_sel_prio;
2469         __u32 best_net_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2470         __u32 net_sel_prio;
2471
2472         /* If lp_disc_net_id is set, this peer is a router undergoing
2473          * discovery, and this message is an LNet ping, then this may be a
2474          * discovery message and we need to select an NI on the peer net
2475          * specified by lp_disc_net_id
2476          */
2477         if (peer->lp_disc_net_id &&
2478             (peer->lp_state & LNET_PEER_RTR_DISCOVERY) &&
2479             lnet_msg_is_ping(msg)) {
2480                 best_lpn = lnet_peer_get_net_locked(peer, peer->lp_disc_net_id);
2481                 if (best_lpn && lnet_get_net_locked(best_lpn->lpn_net_id))
2482                         goto select_best_ni;
2483         }
2484
2485         /*
2486          * The peer can have multiple interfaces, some of them can be on
2487          * the local network and others on a routed network. We should
2488          * prefer the local network. However if the local network is not
2489          * available then we need to try the routed network
2490          */
2491
2492         /* go through all the peer nets and find the best_ni */
2493         list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
2494                 /*
2495                  * The peer's list of nets can contain non-local nets. We
2496                  * want to only examine the local ones.
2497                  */
2498                 net = lnet_get_net_locked(lpn->lpn_net_id);
2499                 if (!net)
2500                         continue;
2501
2502                 lpn_sel_prio = lpn->lpn_sel_priority;
2503                 net_healthv = lnet_get_net_healthv_locked(net);
2504                 net_sel_prio = net->net_sel_priority;
2505
2506                 if (!best_lpn)
2507                         goto select_lpn;
2508                 else
2509                         CDEBUG(D_NET,
2510                                "n[%s, %s] ph[%d, %d], pp[%u, %u], nh[%d, %d], np[%u, %u], ps[%u, %u], ns[%u, %u]\n",
2511                                libcfs_net2str(lpn->lpn_net_id),
2512                                libcfs_net2str(best_lpn->lpn_net_id),
2513                                lpn->lpn_healthv,
2514                                best_lpn_healthv,
2515                                lpn_sel_prio,
2516                                best_lpn_sel_prio,
2517                                net_healthv,
2518                                best_net_healthv,
2519                                net_sel_prio,
2520                                best_net_sel_prio,
2521                                lpn->lpn_seq,
2522                                best_lpn->lpn_seq,
2523                                net->net_seq,
2524                                best_net->net_seq);
2525
2526                 /* always select the lpn with the best health */
2527                 if (best_lpn_healthv > lpn->lpn_healthv)
2528                         continue;
2529                 else if (best_lpn_healthv < lpn->lpn_healthv)
2530                         goto select_lpn;
2531
2532                 /* select the preferred peer and local nets */
2533                 if (best_lpn_sel_prio < lpn_sel_prio)
2534                         continue;
2535                 else if (best_lpn_sel_prio > lpn_sel_prio)
2536                         goto select_lpn;
2537
2538                 if (best_net_healthv > net_healthv)
2539                         continue;
2540                 else if (best_net_healthv < net_healthv)
2541                         goto select_lpn;
2542
2543                 if (best_net_sel_prio < net_sel_prio)
2544                         continue;
2545                 else if (best_net_sel_prio > net_sel_prio)
2546                         goto select_lpn;
2547
2548                 if (best_lpn->lpn_seq < lpn->lpn_seq)
2549                         continue;
2550                 else if (best_lpn->lpn_seq > lpn->lpn_seq)
2551                         goto select_lpn;
2552
2553                 /* round robin over the local networks */
2554                 if (best_net->net_seq <= net->net_seq)
2555                         continue;
2556
2557 select_lpn:
2558                 best_net_healthv = net_healthv;
2559                 best_net_sel_prio = net_sel_prio;
2560                 best_lpn_healthv = lpn->lpn_healthv;
2561                 best_lpn_sel_prio = lpn_sel_prio;
2562                 best_lpn = lpn;
2563                 best_net = net;
2564         }
2565
2566         if (best_lpn) {
2567                 /* Select the best NI on the same net as best_lpn chosen
2568                  * above
2569                  */
2570 select_best_ni:
2571                 CDEBUG(D_NET, "selected best_lpn %s\n",
2572                        libcfs_net2str(best_lpn->lpn_net_id));
2573                 best_ni = lnet_find_best_ni_on_spec_net(NULL, peer, best_lpn,
2574                                                         msg, md_cpt);
2575         }
2576
2577         return best_ni;
2578 }
2579
2580 static struct lnet_ni *
2581 lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt)
2582 {
2583         struct lnet_ni *best_ni = NULL;
2584         struct lnet_peer_net *peer_net = lpni->lpni_peer_net;
2585         struct lnet_peer_ni *lpni_entry;
2586
2587         /*
2588          * We must use a consistent source address when sending to a
2589          * non-MR peer. However, a non-MR peer can have multiple NIDs
2590          * on multiple networks, and we may even need to talk to this
2591          * peer on multiple networks -- certain types of
2592          * load-balancing configuration do this.
2593          *
2594          * So we need to pick the NI the peer prefers for this
2595          * particular network.
2596          */
2597         LASSERT(peer_net);
2598         list_for_each_entry(lpni_entry, &peer_net->lpn_peer_nis,
2599                             lpni_peer_nis) {
2600                 if (lpni_entry->lpni_pref_nnids == 0)
2601                         continue;
2602                 LASSERT(lpni_entry->lpni_pref_nnids == 1);
2603                 best_ni = lnet_nid_to_ni_locked(&lpni_entry->lpni_pref.nid,
2604                                                 cpt);
2605                 break;
2606         }
2607
2608         return best_ni;
2609 }
2610
2611 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2612 static int
2613 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2614 {
2615         struct lnet_ni *best_ni = NULL;
2616
2617         /*
2618          * We must use a consistent source address when sending to a
2619          * non-MR peer. However, a non-MR peer can have multiple NIDs
2620          * on multiple networks, and we may even need to talk to this
2621          * peer on multiple networks -- certain types of
2622          * load-balancing configuration do this.
2623          *
2624          * So we need to pick the NI the peer prefers for this
2625          * particular network.
2626          *
2627          * An exception is traffic on LNET_RESERVED_PORTAL. Internal LNet
2628          * traffic doesn't care which source NI is used, and we don't actually
2629          * want to restrict local recovery pings to a single source NI.
2630          */
2631         if (!lnet_reserved_msg(sd->sd_msg))
2632                 best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2633                                                                sd->sd_cpt);
2634
2635         if (!best_ni)
2636                 best_ni = lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2637                                                 sd->sd_best_lpni->lpni_peer_net,
2638                                                 sd->sd_msg,
2639                                                 sd->sd_md_cpt);
2640
2641         /* If there is no best_ni we don't have a route */
2642         if (!best_ni) {
2643                 CERROR("no path to %s from net %s\n",
2644                         libcfs_nidstr(&sd->sd_best_lpni->lpni_nid),
2645                         libcfs_net2str(sd->sd_best_lpni->lpni_net->net_id));
2646                 return -EHOSTUNREACH;
2647         }
2648
2649         sd->sd_best_ni = best_ni;
2650
2651         /* Set preferred NI if necessary. */
2652         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2653
2654         return 0;
2655 }
2656
2657
2658 /*
2659  * Source not specified
2660  * Local destination
2661  * Non-MR Peer
2662  *
2663  * always use the same source NID for NMR peers
2664  * If we've talked to that peer before then we already have a preferred
2665  * source NI associated with it. Otherwise, we select a preferred local NI
2666  * and store it in the peer
2667  */
2668 static int
2669 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2670 {
2671         int rc = 0;
2672
2673         /* sd->sd_best_lpni is already set to the final destination */
2674
2675         /*
2676          * At this point we should've created the peer ni and peer. If we
2677          * can't find it, then something went wrong. Instead of assert
2678          * output a relevant message and fail the send
2679          */
2680         if (!sd->sd_best_lpni) {
2681                 CERROR("Internal fault. Unable to send msg %s to %s. NID not known\n",
2682                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2683                        libcfs_nidstr(&sd->sd_dst_nid));
2684                 return -EFAULT;
2685         }
2686
2687         if (sd->sd_msg->msg_routing) {
2688                 /* If I'm forwarding this message then I can choose any NI
2689                  * on the destination peer net
2690                  */
2691                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL,
2692                                                                sd->sd_peer,
2693                                                                sd->sd_best_lpni->lpni_peer_net,
2694                                                                sd->sd_msg,
2695                                                                sd->sd_md_cpt);
2696                 if (!sd->sd_best_ni) {
2697                         CERROR("Unable to forward message to %s. No local NI available\n",
2698                                libcfs_nidstr(&sd->sd_dst_nid));
2699                         rc = -EHOSTUNREACH;
2700                 }
2701         } else
2702                 rc = lnet_select_preferred_best_ni(sd);
2703
2704         if (!rc)
2705                 rc = lnet_handle_send(sd);
2706
2707         return rc;
2708 }
2709
2710 static int
2711 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2712 {
2713         /*
2714          * NOTE we've already handled the remote peer case. So we only
2715          * need to worry about the local case here.
2716          *
2717          * if we're sending a response, ACK or reply, we need to send it
2718          * to the destination NID given to us. At this point we already
2719          * have the peer_ni we're suppose to send to, so just find the
2720          * best_ni on the peer net and use that. Since we're sending to an
2721          * MR peer then we can just run the selection algorithm on our
2722          * local NIs and pick the best one.
2723          */
2724         if (sd->sd_send_case & SND_RESP) {
2725                 sd->sd_best_ni =
2726                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2727                                                 sd->sd_best_lpni->lpni_peer_net,
2728                                                 sd->sd_msg,
2729                                                 sd->sd_md_cpt);
2730
2731                 if (!sd->sd_best_ni) {
2732                         /*
2733                          * We're not going to deal with not able to send
2734                          * a response to the provided final destination
2735                          */
2736                         CERROR("Can't send response to %s. No local NI available\n",
2737                                 libcfs_nidstr(&sd->sd_dst_nid));
2738                         return -EHOSTUNREACH;
2739                 }
2740
2741                 return lnet_handle_send(sd);
2742         }
2743
2744         /*
2745          * If we get here that means we're sending a fresh request, PUT or
2746          * GET, so we need to run our standard selection algorithm.
2747          * First find the best local interface that's on any of the peer's
2748          * networks.
2749          */
2750         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2751                                         sd->sd_md_cpt,
2752                                         sd->sd_msg,
2753                                         lnet_msg_discovery(sd->sd_msg));
2754         if (sd->sd_best_ni) {
2755                 sd->sd_best_lpni =
2756                   lnet_find_best_lpni(sd->sd_best_ni, &sd->sd_dst_nid,
2757                                       sd->sd_peer,
2758                                       sd->sd_best_ni->ni_net->net_id);
2759
2760                 /*
2761                  * if we're successful in selecting a peer_ni on the local
2762                  * network, then send to it. Otherwise fall through and
2763                  * try and see if we can reach it over another routed
2764                  * network
2765                  */
2766                 if (sd->sd_best_lpni &&
2767                     nid_same(&sd->sd_best_lpni->lpni_nid,
2768                              &the_lnet.ln_loni->ni_nid)) {
2769                         /*
2770                          * in case we initially started with a routed
2771                          * destination, let's reset to local
2772                          */
2773                         sd->sd_send_case &= ~REMOTE_DST;
2774                         sd->sd_send_case |= LOCAL_DST;
2775                         return lnet_handle_lo_send(sd);
2776                 } else if (sd->sd_best_lpni) {
2777                         /*
2778                          * in case we initially started with a routed
2779                          * destination, let's reset to local
2780                          */
2781                         sd->sd_send_case &= ~REMOTE_DST;
2782                         sd->sd_send_case |= LOCAL_DST;
2783                         return lnet_handle_send(sd);
2784                 }
2785
2786                 CERROR("Internal Error. Expected to have a best_lpni: "
2787                        "%s -> %s\n",
2788                        libcfs_nidstr(&sd->sd_src_nid),
2789                        libcfs_nidstr(&sd->sd_dst_nid));
2790
2791                 return -EFAULT;
2792         }
2793
2794         /*
2795          * Peer doesn't have a local network. Let's see if there is
2796          * a remote network we can reach it on.
2797          */
2798         return PASS_THROUGH;
2799 }
2800
2801 /*
2802  * Case 1:
2803  *      Source NID not specified
2804  *      Local destination
2805  *      MR peer
2806  *
2807  * Case 2:
2808  *      Source NID not speified
2809  *      Remote destination
2810  *      MR peer
2811  *
2812  * In both of these cases if we're sending a response, ACK or REPLY, then
2813  * we need to send to the destination NID provided.
2814  *
2815  * In the remote case let's deal with MR routers.
2816  *
2817  */
2818
2819 static int
2820 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2821 {
2822         int rc = 0;
2823         struct lnet_peer *gw_peer = NULL;
2824         struct lnet_peer_ni *gw_lpni = NULL;
2825
2826         /*
2827          * handle sending a response to a remote peer here so we don't
2828          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2829          */
2830         if (sd->sd_send_case & REMOTE_DST &&
2831             sd->sd_send_case & SND_RESP) {
2832                 struct lnet_peer_ni *gw;
2833                 struct lnet_peer *gw_peer;
2834
2835                 rc = lnet_handle_find_routed_path(
2836                         sd, &sd->sd_dst_nid, &gw, &gw_peer);
2837                 if (rc < 0) {
2838                         CERROR("Can't send response to %s. No route available\n",
2839                                libcfs_nidstr(&sd->sd_dst_nid));
2840                         return -EHOSTUNREACH;
2841                 } else if (rc > 0) {
2842                         return rc;
2843                 }
2844
2845                 sd->sd_best_lpni = gw;
2846                 sd->sd_peer = gw_peer;
2847
2848                 return lnet_handle_send(sd);
2849         }
2850
2851         /*
2852          * Even though the NID for the peer might not be on a local network,
2853          * since the peer is MR there could be other interfaces on the
2854          * local network. In that case we'd still like to prefer the local
2855          * network over the routed network. If we're unable to do that
2856          * then we select the best router among the different routed networks,
2857          * and if the router is MR then we can deal with it as such.
2858          */
2859         rc = lnet_handle_any_mr_dsta(sd);
2860         if (rc != PASS_THROUGH)
2861                 return rc;
2862
2863         /*
2864          * Now that we must route to the destination, we must consider the
2865          * MR case, where the destination has multiple interfaces, some of
2866          * which we can route to and others we do not. For this reason we
2867          * need to select the destination which we can route to and if
2868          * there are multiple, we need to round robin.
2869          */
2870         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid,
2871                                           &gw_lpni, &gw_peer);
2872         if (rc)
2873                 return rc;
2874
2875         sd->sd_send_case &= ~LOCAL_DST;
2876         sd->sd_send_case |= REMOTE_DST;
2877
2878         sd->sd_peer = gw_peer;
2879         sd->sd_best_lpni = gw_lpni;
2880
2881         return lnet_handle_send(sd);
2882 }
2883
2884 /*
2885  * Source not specified
2886  * Remote destination
2887  * Non-MR peer
2888  *
2889  * Must send to the specified peer NID using the same source NID that
2890  * we've used before. If it's the first time to talk to that peer then
2891  * find the source NI and assign it as preferred to that peer
2892  */
2893 static int
2894 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2895 {
2896         int rc;
2897         struct lnet_peer_ni *gw_lpni = NULL;
2898         struct lnet_peer *gw_peer = NULL;
2899
2900         /*
2901          * Let's see if we have a preferred NI to talk to this NMR peer
2902          */
2903         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2904                                                               sd->sd_cpt);
2905
2906         /*
2907          * find the router and that'll find the best NI if we didn't find
2908          * it already.
2909          */
2910         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid, &gw_lpni,
2911                                           &gw_peer);
2912         if (rc)
2913                 return rc;
2914
2915         /*
2916          * set the best_ni we've chosen as the preferred one for
2917          * this peer
2918          */
2919         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2920
2921         /* we'll be sending to the gw */
2922         sd->sd_best_lpni = gw_lpni;
2923         sd->sd_peer = gw_peer;
2924
2925         return lnet_handle_send(sd);
2926 }
2927
2928 static int
2929 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2930 {
2931         /*
2932          * turn off the SND_RESP bit.
2933          * It will be checked in the case handling
2934          */
2935         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2936
2937         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2938                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2939                 (send_case & SRC_SPEC) ? libcfs_nidstr(&sd->sd_src_nid) : "",
2940                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2941                 libcfs_nidstr(&sd->sd_dst_nid),
2942                 (send_case & LOCAL_DST) ? "local" : "routed");
2943
2944         switch (send_case) {
2945         /*
2946          * For all cases where the source is specified, we should always
2947          * use the destination NID, whether it's an MR destination or not,
2948          * since we're continuing a series of related messages for the
2949          * same RPC
2950          */
2951         case SRC_SPEC_LOCAL_NMR_DST:
2952                 return lnet_handle_spec_local_nmr_dst(sd);
2953         case SRC_SPEC_LOCAL_MR_DST:
2954                 return lnet_handle_spec_local_mr_dst(sd);
2955         case SRC_SPEC_ROUTER_NMR_DST:
2956         case SRC_SPEC_ROUTER_MR_DST:
2957                 return lnet_handle_spec_router_dst(sd);
2958         case SRC_ANY_LOCAL_NMR_DST:
2959                 return lnet_handle_any_local_nmr_dst(sd);
2960         case SRC_ANY_LOCAL_MR_DST:
2961         case SRC_ANY_ROUTER_MR_DST:
2962                 return lnet_handle_any_mr_dst(sd);
2963         case SRC_ANY_ROUTER_NMR_DST:
2964                 return lnet_handle_any_router_nmr_dst(sd);
2965         default:
2966                 CERROR("Unknown send case\n");
2967                 return -1;
2968         }
2969 }
2970
2971 static int
2972 lnet_select_pathway(struct lnet_nid *src_nid,
2973                     struct lnet_nid *dst_nid,
2974                     struct lnet_msg *msg,
2975                     struct lnet_nid *rtr_nid)
2976 {
2977         struct lnet_peer_ni *lpni;
2978         struct lnet_peer *peer;
2979         struct lnet_send_data send_data;
2980         int cpt, rc;
2981         int md_cpt;
2982         __u32 send_case = 0;
2983         bool final_hop;
2984         bool mr_forwarding_allowed;
2985
2986         memset(&send_data, 0, sizeof(send_data));
2987
2988         /*
2989          * get an initial CPT to use for locking. The idea here is not to
2990          * serialize the calls to select_pathway, so that as many
2991          * operations can run concurrently as possible. To do that we use
2992          * the CPT where this call is being executed. Later on when we
2993          * determine the CPT to use in lnet_message_commit, we switch the
2994          * lock and check if there was any configuration change.  If none,
2995          * then we proceed, if there is, then we restart the operation.
2996          */
2997         cpt = lnet_net_lock_current();
2998
2999         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
3000         if (md_cpt == CFS_CPT_ANY)
3001                 md_cpt = cpt;
3002
3003 again:
3004
3005         /*
3006          * If we're being asked to send to the loopback interface, there
3007          * is no need to go through any selection. We can just shortcut
3008          * the entire process and send over lolnd
3009          */
3010         send_data.sd_msg = msg;
3011         send_data.sd_cpt = cpt;
3012         if (nid_is_lo0(dst_nid)) {
3013                 rc = lnet_handle_lo_send(&send_data);
3014                 lnet_net_unlock(cpt);
3015                 return rc;
3016         }
3017
3018         /*
3019          * find an existing peer_ni, or create one and mark it as having been
3020          * created due to network traffic. This call will create the
3021          * peer->peer_net->peer_ni tree.
3022          */
3023         lpni = lnet_peerni_by_nid_locked(dst_nid, NULL, cpt);
3024         if (IS_ERR(lpni)) {
3025                 lnet_net_unlock(cpt);
3026                 return PTR_ERR(lpni);
3027         }
3028
3029         /*
3030          * Cache the original src_nid and rtr_nid. If we need to resend the
3031          * message then we'll need to know whether the src_nid was originally
3032          * specified for this message. If it was originally specified,
3033          * then we need to keep using the same src_nid since it's
3034          * continuing the same sequence of messages. Similarly, rtr_nid will
3035          * affect our choice of next hop.
3036          */
3037         if (src_nid)
3038                 msg->msg_src_nid_param = *src_nid;
3039         else
3040                 msg->msg_src_nid_param = LNET_ANY_NID;
3041         if (rtr_nid)
3042                 msg->msg_rtr_nid_param = *rtr_nid;
3043         else
3044                 msg->msg_rtr_nid_param = LNET_ANY_NID;
3045
3046         /*
3047          * If necessary, perform discovery on the peer that owns this peer_ni.
3048          * Note, this can result in the ownership of this peer_ni changing
3049          * to another peer object.
3050          */
3051         rc = lnet_initiate_peer_discovery(lpni, msg, cpt);
3052         if (rc) {
3053                 lnet_peer_ni_decref_locked(lpni);
3054                 lnet_net_unlock(cpt);
3055                 return rc;
3056         }
3057         lnet_peer_ni_decref_locked(lpni);
3058
3059         peer = lpni->lpni_peer_net->lpn_peer;
3060
3061         /*
3062          * Identify the different send cases
3063          */
3064         if (!src_nid || LNET_NID_IS_ANY(src_nid)) {
3065                 send_case |= SRC_ANY;
3066                 if (lnet_get_net_locked(LNET_NID_NET(dst_nid)))
3067                         send_case |= LOCAL_DST;
3068                 else
3069                         send_case |= REMOTE_DST;
3070         } else {
3071                 send_case |= SRC_SPEC;
3072                 if (LNET_NID_NET(src_nid) == LNET_NID_NET(dst_nid))
3073                         send_case |= LOCAL_DST;
3074                 else
3075                         send_case |= REMOTE_DST;
3076         }
3077
3078         final_hop = false;
3079         if (msg->msg_routing && (send_case & LOCAL_DST))
3080                 final_hop = true;
3081
3082         /* Determine whether to allow MR forwarding for this message.
3083          * NB: MR forwarding is allowed if the message originator and the
3084          * destination are both MR capable, and the destination lpni that was
3085          * originally chosen by the originator is unhealthy or down.
3086          * We check the MR capability of the destination further below
3087          */
3088         mr_forwarding_allowed = false;
3089         if (final_hop) {
3090                 struct lnet_peer *src_lp;
3091                 struct lnet_peer_ni *src_lpni;
3092
3093                 src_lpni = lnet_peerni_by_nid_locked(&msg->msg_hdr.src_nid,
3094                                                    NULL, cpt);
3095                 /* We don't fail the send if we hit any errors here. We'll just
3096                  * try to send it via non-multi-rail criteria
3097                  */
3098                 if (!IS_ERR(src_lpni)) {
3099                         /* Drop ref taken by lnet_peerni_by_nid_locked() */
3100                         lnet_peer_ni_decref_locked(src_lpni);
3101                         src_lp = lpni->lpni_peer_net->lpn_peer;
3102                         if (lnet_peer_is_multi_rail(src_lp) &&
3103                             !lnet_is_peer_ni_alive(lpni))
3104                                 mr_forwarding_allowed = true;
3105
3106                 }
3107                 CDEBUG(D_NET, "msg %p MR forwarding %s\n", msg,
3108                        mr_forwarding_allowed ? "allowed" : "not allowed");
3109         }
3110
3111         /*
3112          * Deal with the peer as NMR in the following cases:
3113          * 1. the peer is NMR
3114          * 2. We're trying to recover a specific peer NI
3115          * 3. I'm a router sending to the final destination and MR forwarding is
3116          *    not allowed for this message (as determined above).
3117          *    In this case the source of the message would've
3118          *    already selected the final destination so my job
3119          *    is to honor the selection.
3120          */
3121         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery ||
3122             (final_hop && !mr_forwarding_allowed))
3123                 send_case |= NMR_DST;
3124         else
3125                 send_case |= MR_DST;
3126
3127         if (lnet_msg_is_response(msg))
3128                 send_case |= SND_RESP;
3129
3130         /* assign parameters to the send_data */
3131         if (rtr_nid)
3132                 send_data.sd_rtr_nid = *rtr_nid;
3133         else
3134                 send_data.sd_rtr_nid = LNET_ANY_NID;
3135         if (src_nid)
3136                 send_data.sd_src_nid = *src_nid;
3137         else
3138                 send_data.sd_src_nid = LNET_ANY_NID;
3139         send_data.sd_dst_nid = *dst_nid;
3140         send_data.sd_best_lpni = lpni;
3141         /*
3142          * keep a pointer to the final destination in case we're going to
3143          * route, so we'll need to access it later
3144          */
3145         send_data.sd_final_dst_lpni = lpni;
3146         send_data.sd_peer = peer;
3147         send_data.sd_md_cpt = md_cpt;
3148         send_data.sd_send_case = send_case;
3149
3150         rc = lnet_handle_send_case_locked(&send_data);
3151
3152         /*
3153          * Update the local cpt since send_data.sd_cpt might've been
3154          * updated as a result of calling lnet_handle_send_case_locked().
3155          */
3156         cpt = send_data.sd_cpt;
3157
3158         if (rc == REPEAT_SEND)
3159                 goto again;
3160
3161         lnet_net_unlock(cpt);
3162
3163         return rc;
3164 }
3165
3166 int
3167 lnet_send(struct lnet_nid *src_nid, struct lnet_msg *msg,
3168           struct lnet_nid *rtr_nid)
3169 {
3170         struct lnet_nid *dst_nid = &msg->msg_target.nid;
3171         int rc;
3172
3173         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
3174         LASSERT(msg->msg_txpeer == NULL);
3175         LASSERT(msg->msg_txni == NULL);
3176         LASSERT(!msg->msg_sending);
3177         LASSERT(!msg->msg_target_is_router);
3178         LASSERT(!msg->msg_receiving);
3179
3180         msg->msg_sending = 1;
3181
3182         LASSERT(!msg->msg_tx_committed);
3183
3184         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
3185         if (rc < 0) {
3186                 if (rc == -EHOSTUNREACH)
3187                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
3188                 else
3189                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
3190                 return rc;
3191         }
3192
3193         if (rc == LNET_CREDIT_OK)
3194                 lnet_ni_send(msg->msg_txni, msg);
3195
3196         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
3197         return 0;
3198 }
3199
3200 enum lnet_mt_event_type {
3201         MT_TYPE_LOCAL_NI = 0,
3202         MT_TYPE_PEER_NI
3203 };
3204
3205 struct lnet_mt_event_info {
3206         enum lnet_mt_event_type mt_type;
3207         struct lnet_nid mt_nid;
3208 };
3209
3210 /* called with res_lock held */
3211 void
3212 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
3213 {
3214         struct lnet_rsp_tracker *rspt;
3215
3216         /*
3217          * msg has a refcount on the MD so the MD is not going away.
3218          * The rspt queue for the cpt is protected by
3219          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
3220          */
3221         if (!md->md_rspt_ptr)
3222                 return;
3223
3224         rspt = md->md_rspt_ptr;
3225
3226         /* debug code */
3227         LASSERT(rspt->rspt_cpt == cpt);
3228
3229         md->md_rspt_ptr = NULL;
3230
3231         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3232                 /*
3233                  * The monitor thread has invalidated this handle because the
3234                  * response timed out, but it failed to lookup the MD. That
3235                  * means this response tracker is on the zombie list. We can
3236                  * safely remove it under the resource lock (held by caller) and
3237                  * free the response tracker block.
3238                  */
3239                 list_del(&rspt->rspt_on_list);
3240                 lnet_rspt_free(rspt, cpt);
3241         } else {
3242                 /*
3243                  * invalidate the handle to indicate that a response has been
3244                  * received, which will then lead the monitor thread to clean up
3245                  * the rspt block.
3246                  */
3247                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
3248         }
3249 }
3250
3251 void
3252 lnet_clean_zombie_rstqs(void)
3253 {
3254         struct lnet_rsp_tracker *rspt, *tmp;
3255         int i;
3256
3257         cfs_cpt_for_each(i, lnet_cpt_table()) {
3258                 list_for_each_entry_safe(rspt, tmp,
3259                                          the_lnet.ln_mt_zombie_rstqs[i],
3260                                          rspt_on_list) {
3261                         list_del(&rspt->rspt_on_list);
3262                         lnet_rspt_free(rspt, i);
3263                 }
3264         }
3265
3266         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
3267 }
3268
3269 static void
3270 lnet_finalize_expired_responses(void)
3271 {
3272         struct lnet_libmd *md;
3273         struct lnet_rsp_tracker *rspt, *tmp;
3274         ktime_t now;
3275         int i;
3276
3277         if (the_lnet.ln_mt_rstq == NULL)
3278                 return;
3279
3280         cfs_cpt_for_each(i, lnet_cpt_table()) {
3281                 LIST_HEAD(local_queue);
3282
3283                 lnet_net_lock(i);
3284                 if (!the_lnet.ln_mt_rstq[i]) {
3285                         lnet_net_unlock(i);
3286                         continue;
3287                 }
3288                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
3289                 lnet_net_unlock(i);
3290
3291                 now = ktime_get();
3292
3293                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
3294                         /*
3295                          * The rspt mdh will be invalidated when a response
3296                          * is received or whenever we want to discard the
3297                          * block the monitor thread will walk the queue
3298                          * and clean up any rsts with an invalid mdh.
3299                          * The monitor thread will walk the queue until
3300                          * the first unexpired rspt block. This means that
3301                          * some rspt blocks which received their
3302                          * corresponding responses will linger in the
3303                          * queue until they are cleaned up eventually.
3304                          */
3305                         lnet_res_lock(i);
3306                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3307                                 lnet_res_unlock(i);
3308                                 list_del(&rspt->rspt_on_list);
3309                                 lnet_rspt_free(rspt, i);
3310                                 continue;
3311                         }
3312
3313                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
3314                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
3315                                 struct lnet_peer_ni *lpni;
3316                                 struct lnet_nid nid;
3317
3318                                 md = lnet_handle2md(&rspt->rspt_mdh);
3319                                 if (!md) {
3320                                         /* MD has been queued for unlink, but
3321                                          * rspt hasn't been detached (Note we've
3322                                          * checked above that the rspt_mdh is
3323                                          * valid). Since we cannot lookup the MD
3324                                          * we're unable to detach the rspt
3325                                          * ourselves. Thus, move the rspt to the
3326                                          * zombie list where we'll wait for
3327                                          * either:
3328                                          *   1. The remaining operations on the
3329                                          *   MD to complete. In this case the
3330                                          *   final operation will result in
3331                                          *   lnet_msg_detach_md()->
3332                                          *   lnet_detach_rsp_tracker() where
3333                                          *   we will clean up this response
3334                                          *   tracker.
3335                                          *   2. LNet to shutdown. In this case
3336                                          *   we'll wait until after all LND Nets
3337                                          *   have shutdown and then we can
3338                                          *   safely free any remaining response
3339                                          *   tracker blocks on the zombie list.
3340                                          * Note: We need to hold the resource
3341                                          * lock when adding to the zombie list
3342                                          * because we may have concurrent access
3343                                          * with lnet_detach_rsp_tracker().
3344                                          */
3345                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
3346                                         list_move(&rspt->rspt_on_list,
3347                                                   the_lnet.ln_mt_zombie_rstqs[i]);
3348                                         lnet_res_unlock(i);
3349                                         continue;
3350                                 }
3351                                 LASSERT(md->md_rspt_ptr == rspt);
3352                                 md->md_rspt_ptr = NULL;
3353                                 lnet_res_unlock(i);
3354
3355                                 LNetMDUnlink(rspt->rspt_mdh);
3356
3357                                 nid = rspt->rspt_next_hop_nid;
3358
3359                                 list_del(&rspt->rspt_on_list);
3360                                 lnet_rspt_free(rspt, i);
3361
3362                                 /* If we're shutting down we just want to clean
3363                                  * up the rspt blocks
3364                                  */
3365                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3366                                         continue;
3367
3368                                 lnet_net_lock(i);
3369                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
3370                                 lnet_net_unlock(i);
3371
3372                                 CDEBUG(D_NET,
3373                                        "Response timeout: md = %p: nid = %s\n",
3374                                        md, libcfs_nidstr(&nid));
3375
3376                                 /*
3377                                  * If there is a timeout on the response
3378                                  * from the next hop decrement its health
3379                                  * value so that we don't use it
3380                                  */
3381                                 lnet_net_lock(0);
3382                                 lpni = lnet_peer_ni_find_locked(&nid);
3383                                 if (lpni) {
3384                                         lnet_handle_remote_failure_locked(lpni);
3385                                         lnet_peer_ni_decref_locked(lpni);
3386                                 }
3387                                 lnet_net_unlock(0);
3388                         } else {
3389                                 lnet_res_unlock(i);
3390                                 break;
3391                         }
3392                 }
3393
3394                 if (!list_empty(&local_queue)) {
3395                         lnet_net_lock(i);
3396                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
3397                         lnet_net_unlock(i);
3398                 }
3399         }
3400 }
3401
3402 static void
3403 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
3404 {
3405         struct lnet_msg *msg;
3406
3407         while (!list_empty(resendq)) {
3408                 struct lnet_peer_ni *lpni;
3409
3410                 msg = list_entry(resendq->next, struct lnet_msg,
3411                                  msg_list);
3412
3413                 list_del_init(&msg->msg_list);
3414
3415                 lpni = lnet_peer_ni_find_locked(&msg->msg_hdr.dest_nid);
3416                 if (!lpni) {
3417                         lnet_net_unlock(cpt);
3418                         CERROR("Expected that a peer is already created for %s\n",
3419                                libcfs_nidstr(&msg->msg_hdr.dest_nid));
3420                         msg->msg_no_resend = true;
3421                         lnet_finalize(msg, -EFAULT);
3422                         lnet_net_lock(cpt);
3423                 } else {
3424                         int rc;
3425
3426                         lnet_peer_ni_decref_locked(lpni);
3427
3428                         lnet_net_unlock(cpt);
3429                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3430                                libcfs_nidstr(&msg->msg_src_nid_param),
3431                                libcfs_idstr(&msg->msg_target),
3432                                lnet_msgtyp2str(msg->msg_type),
3433                                msg->msg_recovery,
3434                                msg->msg_retry_count);
3435                         rc = lnet_send(&msg->msg_src_nid_param, msg,
3436                                        &msg->msg_rtr_nid_param);
3437                         if (rc) {
3438                                 CERROR("Error sending %s to %s: %d\n",
3439                                        lnet_msgtyp2str(msg->msg_type),
3440                                        libcfs_idstr(&msg->msg_target), rc);
3441                                 msg->msg_no_resend = true;
3442                                 lnet_finalize(msg, rc);
3443                         }
3444                         lnet_net_lock(cpt);
3445                         if (!rc)
3446                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3447                 }
3448         }
3449 }
3450
3451 static void
3452 lnet_resend_pending_msgs(void)
3453 {
3454         int i;
3455
3456         cfs_cpt_for_each(i, lnet_cpt_table()) {
3457                 lnet_net_lock(i);
3458                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3459                 lnet_net_unlock(i);
3460         }
3461 }
3462
3463 /* called with cpt and ni_lock held */
3464 static void
3465 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3466 {
3467         struct lnet_handle_md recovery_mdh;
3468
3469         LNetInvalidateMDHandle(&recovery_mdh);
3470
3471         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3472             force) {
3473                 recovery_mdh = ni->ni_ping_mdh;
3474                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3475         }
3476         lnet_ni_unlock(ni);
3477         lnet_net_unlock(cpt);
3478         if (!LNetMDHandleIsInvalid(recovery_mdh))
3479                 LNetMDUnlink(recovery_mdh);
3480         lnet_net_lock(cpt);
3481         lnet_ni_lock(ni);
3482 }
3483
3484 static void
3485 lnet_recover_local_nis(void)
3486 {
3487         struct lnet_mt_event_info *ev_info;
3488         LIST_HEAD(processed_list);
3489         LIST_HEAD(local_queue);
3490         struct lnet_handle_md mdh;
3491         struct lnet_ni *tmp;
3492         struct lnet_ni *ni;
3493         struct lnet_nid nid;
3494         int healthv;
3495         int rc;
3496         time64_t now;
3497
3498         /*
3499          * splice the recovery queue on a local queue. We will iterate
3500          * through the local queue and update it as needed. Once we're
3501          * done with the traversal, we'll splice the local queue back on
3502          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3503          * will be traversed in the next iteration.
3504          */
3505         lnet_net_lock(0);
3506         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3507                          &local_queue);
3508         lnet_net_unlock(0);
3509
3510         now = ktime_get_seconds();
3511
3512         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3513                 /*
3514                  * if an NI is being deleted or it is now healthy, there
3515                  * is no need to keep it around in the recovery queue.
3516                  * The monitor thread is the only thread responsible for
3517                  * removing the NI from the recovery queue.
3518                  * Multiple threads can be adding NIs to the recovery
3519                  * queue.
3520                  */
3521                 healthv = atomic_read(&ni->ni_healthv);
3522
3523                 lnet_net_lock(0);
3524                 lnet_ni_lock(ni);
3525                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3526                     healthv == LNET_MAX_HEALTH_VALUE) {
3527                         list_del_init(&ni->ni_recovery);
3528                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3529                         lnet_ni_unlock(ni);
3530                         lnet_ni_decref_locked(ni, 0);
3531                         lnet_net_unlock(0);
3532                         continue;
3533                 }
3534
3535                 /*
3536                  * if the local NI failed recovery we must unlink the md.
3537                  * But we want to keep the local_ni on the recovery queue
3538                  * so we can continue the attempts to recover it.
3539                  */
3540                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3541                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3542                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3543                 }
3544
3545
3546                 lnet_ni_unlock(ni);
3547
3548                 if (now < ni->ni_next_ping) {
3549                         lnet_net_unlock(0);
3550                         continue;
3551                 }
3552
3553                 lnet_net_unlock(0);
3554
3555                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3556                        libcfs_nidstr(&ni->ni_nid));
3557
3558                 lnet_ni_lock(ni);
3559                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3560                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3561                         lnet_ni_unlock(ni);
3562
3563                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3564                         if (!ev_info) {
3565                                 CERROR("out of memory. Can't recover %s\n",
3566                                        libcfs_nidstr(&ni->ni_nid));
3567                                 lnet_ni_lock(ni);
3568                                 ni->ni_recovery_state &=
3569                                   ~LNET_NI_RECOVERY_PENDING;
3570                                 lnet_ni_unlock(ni);
3571                                 continue;
3572                         }
3573
3574                         mdh = ni->ni_ping_mdh;
3575                         /*
3576                          * Invalidate the ni mdh in case it's deleted.
3577                          * We'll unlink the mdh in this case below.
3578                          */
3579                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3580                         nid = ni->ni_nid;
3581
3582                         /*
3583                          * remove the NI from the local queue and drop the
3584                          * reference count to it while we're recovering
3585                          * it. The reason for that, is that the NI could
3586                          * be deleted, and the way the code is structured
3587                          * is if we don't drop the NI, then the deletion
3588                          * code will enter a loop waiting for the
3589                          * reference count to be removed while holding the
3590                          * ln_mutex_lock(). When we look up the peer to
3591                          * send to in lnet_select_pathway() we will try to
3592                          * lock the ln_mutex_lock() as well, leading to
3593                          * a deadlock. By dropping the refcount and
3594                          * removing it from the list, we allow for the NI
3595                          * to be removed, then we use the cached NID to
3596                          * look it up again. If it's gone, then we just
3597                          * continue examining the rest of the queue.
3598                          */
3599                         lnet_net_lock(0);
3600                         list_del_init(&ni->ni_recovery);
3601                         lnet_ni_decref_locked(ni, 0);
3602                         lnet_net_unlock(0);
3603
3604                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3605                         ev_info->mt_nid = nid;
3606                         rc = lnet_send_ping(&nid, &mdh, LNET_PING_INFO_MIN_SIZE,
3607                                             ev_info, the_lnet.ln_mt_handler,
3608                                             true);
3609                         /* lookup the nid again */
3610                         lnet_net_lock(0);
3611                         ni = lnet_nid_to_ni_locked(&nid, 0);
3612                         if (!ni) {
3613                                 /*
3614                                  * the NI has been deleted when we dropped
3615                                  * the ref count
3616                                  */
3617                                 lnet_net_unlock(0);
3618                                 LNetMDUnlink(mdh);
3619                                 continue;
3620                         }
3621                         ni->ni_ping_count++;
3622
3623                         ni->ni_ping_mdh = mdh;
3624                         lnet_ni_add_to_recoveryq_locked(ni, &processed_list,
3625                                                         now);
3626
3627                         if (rc) {
3628                                 lnet_ni_lock(ni);
3629                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3630                                 lnet_ni_unlock(ni);
3631                         }
3632                         lnet_net_unlock(0);
3633                 } else
3634                         lnet_ni_unlock(ni);
3635         }
3636
3637         /*
3638          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3639          * reexamined in the next iteration.
3640          */
3641         list_splice_init(&processed_list, &local_queue);
3642         lnet_net_lock(0);
3643         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3644         lnet_net_unlock(0);
3645 }
3646
3647 static int
3648 lnet_resendqs_create(void)
3649 {
3650         struct list_head **resendqs;
3651         resendqs = lnet_create_array_of_queues();
3652
3653         if (!resendqs)
3654                 return -ENOMEM;
3655
3656         lnet_net_lock(LNET_LOCK_EX);
3657         the_lnet.ln_mt_resendqs = resendqs;
3658         lnet_net_unlock(LNET_LOCK_EX);
3659
3660         return 0;
3661 }
3662
3663 static void
3664 lnet_clean_local_ni_recoveryq(void)
3665 {
3666         struct lnet_ni *ni;
3667
3668         /* This is only called when the monitor thread has stopped */
3669         lnet_net_lock(0);
3670
3671         while ((ni = list_first_entry_or_null(&the_lnet.ln_mt_localNIRecovq,
3672                                               struct lnet_ni,
3673                                               ni_recovery)) != NULL) {
3674                 list_del_init(&ni->ni_recovery);
3675                 lnet_ni_lock(ni);
3676                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3677                 lnet_ni_unlock(ni);
3678                 lnet_ni_decref_locked(ni, 0);
3679         }
3680
3681         lnet_net_unlock(0);
3682 }
3683
3684 static void
3685 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3686                                      bool force)
3687 {
3688         struct lnet_handle_md recovery_mdh;
3689
3690         LNetInvalidateMDHandle(&recovery_mdh);
3691
3692         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3693                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3694                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3695         }
3696         spin_unlock(&lpni->lpni_lock);
3697         lnet_net_unlock(cpt);
3698         if (!LNetMDHandleIsInvalid(recovery_mdh))
3699                 LNetMDUnlink(recovery_mdh);
3700         lnet_net_lock(cpt);
3701         spin_lock(&lpni->lpni_lock);
3702 }
3703
3704 static void
3705 lnet_clean_peer_ni_recoveryq(void)
3706 {
3707         struct lnet_peer_ni *lpni, *tmp;
3708
3709         lnet_net_lock(LNET_LOCK_EX);
3710
3711         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3712                                  lpni_recovery) {
3713                 list_del_init(&lpni->lpni_recovery);
3714                 spin_lock(&lpni->lpni_lock);
3715                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3716                 spin_unlock(&lpni->lpni_lock);
3717                 lnet_peer_ni_decref_locked(lpni);
3718         }
3719
3720         lnet_net_unlock(LNET_LOCK_EX);
3721 }
3722
3723 static void
3724 lnet_clean_resendqs(void)
3725 {
3726         struct lnet_msg *msg, *tmp;
3727         LIST_HEAD(msgs);
3728         int i;
3729
3730         cfs_cpt_for_each(i, lnet_cpt_table()) {
3731                 lnet_net_lock(i);
3732                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3733                 lnet_net_unlock(i);
3734                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3735                         list_del_init(&msg->msg_list);
3736                         msg->msg_no_resend = true;
3737                         lnet_finalize(msg, -ESHUTDOWN);
3738                 }
3739         }
3740
3741         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3742 }
3743
3744 static void
3745 lnet_recover_peer_nis(void)
3746 {
3747         struct lnet_mt_event_info *ev_info;
3748         LIST_HEAD(processed_list);
3749         LIST_HEAD(local_queue);
3750         struct lnet_handle_md mdh;
3751         struct lnet_peer_ni *lpni;
3752         struct lnet_peer_ni *tmp;
3753         struct lnet_nid nid;
3754         int healthv;
3755         int rc;
3756         time64_t now;
3757
3758         /*
3759          * Always use cpt 0 for locking across all interactions with
3760          * ln_mt_peerNIRecovq
3761          */
3762         lnet_net_lock(0);
3763         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3764                          &local_queue);
3765         lnet_net_unlock(0);
3766
3767         now = ktime_get_seconds();
3768
3769         list_for_each_entry_safe(lpni, tmp, &local_queue,
3770                                  lpni_recovery) {
3771                 /*
3772                  * The same protection strategy is used here as is in the
3773                  * local recovery case.
3774                  */
3775                 lnet_net_lock(0);
3776                 healthv = atomic_read(&lpni->lpni_healthv);
3777                 spin_lock(&lpni->lpni_lock);
3778                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3779                     healthv == LNET_MAX_HEALTH_VALUE) {
3780                         list_del_init(&lpni->lpni_recovery);
3781                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3782                         spin_unlock(&lpni->lpni_lock);
3783                         lnet_peer_ni_decref_locked(lpni);
3784                         lnet_net_unlock(0);
3785                         continue;
3786                 }
3787
3788                 /*
3789                  * If the peer NI has failed recovery we must unlink the
3790                  * md. But we want to keep the peer ni on the recovery
3791                  * queue so we can try to continue recovering it
3792                  */
3793                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3794                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3795                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3796                 }
3797
3798                 spin_unlock(&lpni->lpni_lock);
3799
3800                 if (now < lpni->lpni_next_ping) {
3801                         lnet_net_unlock(0);
3802                         continue;
3803                 }
3804
3805                 lnet_net_unlock(0);
3806
3807                 /*
3808                  * NOTE: we're racing with peer deletion from user space.
3809                  * It's possible that a peer is deleted after we check its
3810                  * state. In this case the recovery can create a new peer
3811                  */
3812                 spin_lock(&lpni->lpni_lock);
3813                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3814                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3815                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3816                         spin_unlock(&lpni->lpni_lock);
3817
3818                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3819                         if (!ev_info) {
3820                                 CERROR("out of memory. Can't recover %s\n",
3821                                        libcfs_nidstr(&lpni->lpni_nid));
3822                                 spin_lock(&lpni->lpni_lock);
3823                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3824                                 spin_unlock(&lpni->lpni_lock);
3825                                 continue;
3826                         }
3827
3828                         /* look at the comments in lnet_recover_local_nis() */
3829                         mdh = lpni->lpni_recovery_ping_mdh;
3830                         nid = lpni->lpni_nid;
3831                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3832                         lnet_net_lock(0);
3833                         list_del_init(&lpni->lpni_recovery);
3834                         lnet_peer_ni_decref_locked(lpni);
3835                         lnet_net_unlock(0);
3836
3837                         ev_info->mt_type = MT_TYPE_PEER_NI;
3838                         ev_info->mt_nid = nid;
3839                         rc = lnet_send_ping(&nid, &mdh, LNET_PING_INFO_MIN_SIZE,
3840                                             ev_info, the_lnet.ln_mt_handler,
3841                                             true);
3842                         lnet_net_lock(0);
3843                         /*
3844                          * lnet_peer_ni_find_locked() grabs a refcount for
3845                          * us. No need to take it explicitly.
3846                          */
3847                         lpni = lnet_peer_ni_find_locked(&nid);
3848                         if (!lpni) {
3849                                 lnet_net_unlock(0);
3850                                 LNetMDUnlink(mdh);
3851                                 continue;
3852                         }
3853
3854                         lpni->lpni_ping_count++;
3855
3856                         lpni->lpni_recovery_ping_mdh = mdh;
3857
3858                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
3859                                                              &processed_list,
3860                                                              now);
3861                         if (rc) {
3862                                 spin_lock(&lpni->lpni_lock);
3863                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3864                                 spin_unlock(&lpni->lpni_lock);
3865                         }
3866
3867                         /* Drop the ref taken by lnet_peer_ni_find_locked() */
3868                         lnet_peer_ni_decref_locked(lpni);
3869                         lnet_net_unlock(0);
3870                 } else
3871                         spin_unlock(&lpni->lpni_lock);
3872         }
3873
3874         list_splice_init(&processed_list, &local_queue);
3875         lnet_net_lock(0);
3876         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3877         lnet_net_unlock(0);
3878 }
3879
3880 static int
3881 lnet_monitor_thread(void *arg)
3882 {
3883         time64_t rsp_timeout = 0;
3884         time64_t now;
3885
3886         wait_for_completion(&the_lnet.ln_started);
3887         /*
3888          * The monitor thread takes care of the following:
3889          *  1. Checks the aliveness of routers
3890          *  2. Checks if there are messages on the resend queue to resend
3891          *     them.
3892          *  3. Check if there are any NIs on the local recovery queue and
3893          *     pings them
3894          *  4. Checks if there are any NIs on the remote recovery queue
3895          *     and pings them.
3896          */
3897         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
3898                 now = ktime_get_real_seconds();
3899
3900                 if (lnet_router_checker_active())
3901                         lnet_check_routers();
3902
3903                 lnet_resend_pending_msgs();
3904
3905                 if (now >= rsp_timeout) {
3906                         lnet_finalize_expired_responses();
3907                         rsp_timeout = now + (lnet_transaction_timeout / 2);
3908                 }
3909
3910                 lnet_recover_local_nis();
3911                 lnet_recover_peer_nis();
3912
3913                 /*
3914                  * TODO do we need to check if we should sleep without
3915                  * timeout?  Technically, an active system will always
3916                  * have messages in flight so this check will always
3917                  * evaluate to false. And on an idle system do we care
3918                  * if we wake up every 1 second? Although, we've seen
3919                  * cases where we get a complaint that an idle thread
3920                  * is waking up unnecessarily.
3921                  */
3922                 wait_for_completion_interruptible_timeout(
3923                         &the_lnet.ln_mt_wait_complete,
3924                         cfs_time_seconds(1));
3925                 /* Must re-init the completion before testing anything,
3926                  * including ln_mt_state.
3927                  */
3928                 reinit_completion(&the_lnet.ln_mt_wait_complete);
3929         }
3930
3931         /* Shutting down */
3932         lnet_net_lock(LNET_LOCK_EX);
3933         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3934         lnet_net_unlock(LNET_LOCK_EX);
3935
3936         /* signal that the monitor thread is exiting */
3937         up(&the_lnet.ln_mt_signal);
3938
3939         return 0;
3940 }
3941
3942 /*
3943  * lnet_send_ping
3944  * Sends a ping.
3945  * Returns == 0 if success
3946  * Returns > 0 if LNetMDBind or prior fails
3947  * Returns < 0 if LNetGet fails
3948  */
3949 int
3950 lnet_send_ping(struct lnet_nid *dest_nid,
3951                struct lnet_handle_md *mdh, int bytes,
3952                void *user_data, lnet_handler_t handler, bool recovery)
3953 {
3954         struct lnet_md md = { NULL };
3955         struct lnet_processid id;
3956         struct lnet_ping_buffer *pbuf;
3957         int rc;
3958
3959         if (LNET_NID_IS_ANY(dest_nid)) {
3960                 rc = -EHOSTUNREACH;
3961                 goto fail_error;
3962         }
3963
3964         pbuf = lnet_ping_buffer_alloc(bytes, GFP_NOFS);
3965         if (!pbuf) {
3966                 rc = ENOMEM;
3967                 goto fail_error;
3968         }
3969
3970         /* initialize md content */
3971         md.start     = &pbuf->pb_info;
3972         md.length    = bytes;
3973         md.threshold = 2; /* GET/REPLY */
3974         md.max_size  = 0;
3975         md.options   = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE;
3976         md.user_ptr  = user_data;
3977         md.handler   = handler;
3978
3979         rc = LNetMDBind(&md, LNET_UNLINK, mdh);
3980         if (rc) {
3981                 lnet_ping_buffer_decref(pbuf);
3982                 CERROR("Can't bind MD: %d\n", rc);
3983                 rc = -rc; /* change the rc to positive */
3984                 goto fail_error;
3985         }
3986         id.pid = LNET_PID_LUSTRE;
3987         id.nid = *dest_nid;
3988
3989         rc = LNetGet(NULL, *mdh, &id,
3990                      LNET_RESERVED_PORTAL,
3991                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
3992
3993         if (rc)
3994                 goto fail_unlink_md;
3995
3996         return 0;
3997
3998 fail_unlink_md:
3999         LNetMDUnlink(*mdh);
4000         LNetInvalidateMDHandle(mdh);
4001 fail_error:
4002         return rc;
4003 }
4004
4005 static void
4006 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
4007                            int status, bool send, bool unlink_event)
4008 {
4009         struct lnet_nid *nid = &ev_info->mt_nid;
4010
4011         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
4012                 struct lnet_ni *ni;
4013
4014                 lnet_net_lock(0);
4015                 ni = lnet_nid_to_ni_locked(nid, 0);
4016                 if (!ni) {
4017                         lnet_net_unlock(0);
4018                         return;
4019                 }
4020                 lnet_ni_lock(ni);
4021                 if (!send || (send && status != 0))
4022                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
4023                 if (status)
4024                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
4025                 lnet_ni_unlock(ni);
4026                 lnet_net_unlock(0);
4027
4028                 if (status != 0) {
4029                         CERROR("local NI (%s) recovery failed with %d\n",
4030                                libcfs_nidstr(nid), status);
4031                         return;
4032                 }
4033                 /*
4034                  * need to increment healthv for the ni here, because in
4035                  * the lnet_finalize() path we don't have access to this
4036                  * NI. And in order to get access to it, we'll need to
4037                  * carry forward too much information.
4038                  * In the peer case, it'll naturally be incremented
4039                  */
4040                 if (!unlink_event)
4041                         lnet_inc_healthv(&ni->ni_healthv,
4042                                          lnet_health_sensitivity);
4043         } else {
4044                 struct lnet_peer_ni *lpni;
4045                 int cpt;
4046
4047                 cpt = lnet_net_lock_current();
4048                 lpni = lnet_peer_ni_find_locked(nid);
4049                 if (!lpni) {
4050                         lnet_net_unlock(cpt);
4051                         return;
4052                 }
4053                 spin_lock(&lpni->lpni_lock);
4054                 if (!send || (send && status != 0))
4055                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
4056                 if (status)
4057                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
4058                 spin_unlock(&lpni->lpni_lock);
4059                 lnet_peer_ni_decref_locked(lpni);
4060                 lnet_net_unlock(cpt);
4061
4062                 if (status != 0)
4063                         CERROR("peer NI (%s) recovery failed with %d\n",
4064                                libcfs_nidstr(nid), status);
4065         }
4066 }
4067
4068 void
4069 lnet_mt_event_handler(struct lnet_event *event)
4070 {
4071         struct lnet_mt_event_info *ev_info = event->md_user_ptr;
4072         struct lnet_ping_buffer *pbuf;
4073
4074         /* TODO: remove assert */
4075         LASSERT(event->type == LNET_EVENT_REPLY ||
4076                 event->type == LNET_EVENT_SEND ||
4077                 event->type == LNET_EVENT_UNLINK);
4078
4079         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
4080                event->status);
4081
4082         switch (event->type) {
4083         case LNET_EVENT_UNLINK:
4084                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
4085                        libcfs_nidstr(&ev_info->mt_nid));
4086                 fallthrough;
4087         case LNET_EVENT_REPLY:
4088                 lnet_handle_recovery_reply(ev_info, event->status, false,
4089                                            event->type == LNET_EVENT_UNLINK);
4090                 break;
4091         case LNET_EVENT_SEND:
4092                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
4093                                libcfs_nidstr(&ev_info->mt_nid),
4094                                (event->status) ? "unsuccessfully" :
4095                                "successfully", event->status);
4096                 lnet_handle_recovery_reply(ev_info, event->status, true, false);
4097                 break;
4098         default:
4099                 CERROR("Unexpected event: %d\n", event->type);
4100                 break;
4101         }
4102         if (event->unlinked) {
4103                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
4104                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
4105                 lnet_ping_buffer_decref(pbuf);
4106         }
4107 }
4108
4109 static int
4110 lnet_rsp_tracker_create(void)
4111 {
4112         struct list_head **rstqs;
4113         rstqs = lnet_create_array_of_queues();
4114
4115         if (!rstqs)
4116                 return -ENOMEM;
4117
4118         the_lnet.ln_mt_rstq = rstqs;
4119
4120         return 0;
4121 }
4122
4123 static void
4124 lnet_rsp_tracker_clean(void)
4125 {
4126         lnet_finalize_expired_responses();
4127
4128         cfs_percpt_free(the_lnet.ln_mt_rstq);
4129         the_lnet.ln_mt_rstq = NULL;
4130 }
4131
4132 int lnet_monitor_thr_start(void)
4133 {
4134         int rc = 0;
4135         struct task_struct *task;
4136
4137         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
4138                 return -EALREADY;
4139
4140         rc = lnet_resendqs_create();
4141         if (rc)
4142                 return rc;
4143
4144         rc = lnet_rsp_tracker_create();
4145         if (rc)
4146                 goto clean_queues;
4147
4148         sema_init(&the_lnet.ln_mt_signal, 0);
4149
4150         lnet_net_lock(LNET_LOCK_EX);
4151         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
4152         lnet_net_unlock(LNET_LOCK_EX);
4153         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
4154         if (IS_ERR(task)) {
4155                 rc = PTR_ERR(task);
4156                 CERROR("Can't start monitor thread: %d\n", rc);
4157                 goto clean_thread;
4158         }
4159
4160         return 0;
4161
4162 clean_thread:
4163         lnet_net_lock(LNET_LOCK_EX);
4164         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4165         lnet_net_unlock(LNET_LOCK_EX);
4166         /* block until event callback signals exit */
4167         down(&the_lnet.ln_mt_signal);
4168         /* clean up */
4169         lnet_net_lock(LNET_LOCK_EX);
4170         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
4171         lnet_net_unlock(LNET_LOCK_EX);
4172         lnet_rsp_tracker_clean();
4173         lnet_clean_local_ni_recoveryq();
4174         lnet_clean_peer_ni_recoveryq();
4175         lnet_clean_resendqs();
4176         the_lnet.ln_mt_handler = NULL;
4177         return rc;
4178 clean_queues:
4179         lnet_rsp_tracker_clean();
4180         lnet_clean_local_ni_recoveryq();
4181         lnet_clean_peer_ni_recoveryq();
4182         lnet_clean_resendqs();
4183         return rc;
4184 }
4185
4186 void lnet_monitor_thr_stop(void)
4187 {
4188         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
4189                 return;
4190
4191         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
4192         lnet_net_lock(LNET_LOCK_EX);
4193         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4194         lnet_net_unlock(LNET_LOCK_EX);
4195
4196         /* tell the monitor thread that we're shutting down */
4197         complete(&the_lnet.ln_mt_wait_complete);
4198
4199         /* block until monitor thread signals that it's done */
4200         mutex_unlock(&the_lnet.ln_api_mutex);
4201         down(&the_lnet.ln_mt_signal);
4202         mutex_lock(&the_lnet.ln_api_mutex);
4203         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
4204
4205         /* perform cleanup tasks */
4206         lnet_rsp_tracker_clean();
4207         lnet_clean_local_ni_recoveryq();
4208         lnet_clean_peer_ni_recoveryq();
4209         lnet_clean_resendqs();
4210 }
4211
4212 void
4213 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
4214                   __u32 msg_type)
4215 {
4216         lnet_net_lock(cpt);
4217         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
4218         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4219         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
4220         lnet_net_unlock(cpt);
4221
4222         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
4223 }
4224
4225 static void
4226 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
4227 {
4228         struct lnet_hdr *hdr = &msg->msg_hdr;
4229
4230         if (msg->msg_wanted != 0)
4231                 lnet_setpayloadbuffer(msg);
4232
4233         lnet_build_msg_event(msg, LNET_EVENT_PUT);
4234
4235         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
4236          * it back into the ACK during lnet_finalize() */
4237         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
4238                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
4239
4240         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
4241                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
4242 }
4243
4244 static int
4245 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
4246 {
4247         struct lnet_hdr         *hdr = &msg->msg_hdr;
4248         struct lnet_match_info  info;
4249         int                     rc;
4250         bool                    ready_delay;
4251
4252         /* Convert put fields to host byte order */
4253         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
4254         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
4255         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
4256
4257         /* Primary peer NID. */
4258         info.mi_id.nid = msg->msg_initiator;
4259         info.mi_id.pid  = hdr->src_pid;
4260         info.mi_opc     = LNET_MD_OP_PUT;
4261         info.mi_portal  = hdr->msg.put.ptl_index;
4262         info.mi_rlength = hdr->payload_length;
4263         info.mi_roffset = hdr->msg.put.offset;
4264         info.mi_mbits   = hdr->msg.put.match_bits;
4265         info.mi_cpt     = lnet_nid2cpt(&msg->msg_initiator, ni);
4266
4267         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
4268         ready_delay = msg->msg_rx_ready_delay;
4269
4270  again:
4271         rc = lnet_ptl_match_md(&info, msg);
4272         switch (rc) {
4273         default:
4274                 LBUG();
4275
4276         case LNET_MATCHMD_OK:
4277                 lnet_recv_put(ni, msg);
4278                 return 0;
4279
4280         case LNET_MATCHMD_NONE:
4281                 if (ready_delay)
4282                         /* no eager_recv or has already called it, should
4283                          * have been attached on delayed list */
4284                         return 0;
4285
4286                 rc = lnet_ni_eager_recv(ni, msg);
4287                 if (rc == 0) {
4288                         ready_delay = true;
4289                         goto again;
4290                 }
4291                 fallthrough;
4292
4293         case LNET_MATCHMD_DROP:
4294                 CNETERR("Dropping PUT from %s portal %d match %llu"
4295                         " offset %d length %d: %d\n",
4296                         libcfs_idstr(&info.mi_id), info.mi_portal,
4297                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
4298
4299                 return -ENOENT; /* -ve: OK but no match */
4300         }
4301 }
4302
4303 static int
4304 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
4305 {
4306         struct lnet_match_info info;
4307         struct lnet_hdr *hdr = &msg->msg_hdr;
4308         struct lnet_processid source_id;
4309         struct lnet_handle_wire reply_wmd;
4310         int rc;
4311
4312         /* Convert get fields to host byte order */
4313         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
4314         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
4315         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
4316         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
4317
4318         source_id.nid = hdr->src_nid;
4319         source_id.pid = hdr->src_pid;
4320         /* Primary peer NID */
4321         info.mi_id.nid  = msg->msg_initiator;
4322         info.mi_id.pid  = hdr->src_pid;
4323         info.mi_opc     = LNET_MD_OP_GET;
4324         info.mi_portal  = hdr->msg.get.ptl_index;
4325         info.mi_rlength = hdr->msg.get.sink_length;
4326         info.mi_roffset = hdr->msg.get.src_offset;
4327         info.mi_mbits   = hdr->msg.get.match_bits;
4328         info.mi_cpt     = lnet_nid2cpt(&msg->msg_initiator, ni);
4329
4330         rc = lnet_ptl_match_md(&info, msg);
4331         if (rc == LNET_MATCHMD_DROP) {
4332                 CNETERR("Dropping GET from %s portal %d match %llu"
4333                         " offset %d length %d\n",
4334                         libcfs_idstr(&info.mi_id), info.mi_portal,
4335                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
4336                 return -ENOENT; /* -ve: OK but no match */
4337         }
4338
4339         LASSERT(rc == LNET_MATCHMD_OK);
4340
4341         lnet_build_msg_event(msg, LNET_EVENT_GET);
4342
4343         reply_wmd = hdr->msg.get.return_wmd;
4344
4345         lnet_prep_send(msg, LNET_MSG_REPLY, &source_id,
4346                        msg->msg_offset, msg->msg_wanted);
4347
4348         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
4349
4350         if (rdma_get) {
4351                 /* The LND completes the REPLY from her recv procedure */
4352                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
4353                              msg->msg_offset, msg->msg_len, msg->msg_len);
4354                 return 0;
4355         }
4356
4357         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
4358         msg->msg_receiving = 0;
4359
4360         rc = lnet_send(&ni->ni_nid, msg, &msg->msg_from);
4361         if (rc < 0) {
4362                 /* didn't get as far as lnet_ni_send() */
4363                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
4364                        libcfs_nidstr(&ni->ni_nid),
4365                        libcfs_idstr(&info.mi_id), rc);
4366
4367                 lnet_finalize(msg, rc);
4368         }
4369
4370         return 0;
4371 }
4372
4373 static int
4374 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
4375 {
4376         void *private = msg->msg_private;
4377         struct lnet_hdr *hdr = &msg->msg_hdr;
4378         struct lnet_processid src = {};
4379         struct lnet_libmd *md;
4380         unsigned int rlength;
4381         unsigned int mlength;
4382         int cpt;
4383
4384         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4385         lnet_res_lock(cpt);
4386
4387         src.nid = hdr->src_nid;
4388         src.pid = hdr->src_pid;
4389
4390         /* NB handles only looked up by creator (no flips) */
4391         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4392         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4393                 CNETERR("%s: Dropping REPLY from %s for %s "
4394                         "MD %#llx.%#llx\n",
4395                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4396                         (md == NULL) ? "invalid" : "inactive",
4397                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4398                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4399                 if (md != NULL && md->md_me != NULL)
4400                         CERROR("REPLY MD also attached to portal %d\n",
4401                                md->md_me->me_portal);
4402
4403                 lnet_res_unlock(cpt);
4404                 return -ENOENT; /* -ve: OK but no match */
4405         }
4406
4407         LASSERT(md->md_offset == 0);
4408
4409         rlength = hdr->payload_length;
4410         mlength = min(rlength, md->md_length);
4411
4412         if (mlength < rlength &&
4413             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4414                 CNETERR("%s: Dropping REPLY from %s length %d "
4415                         "for MD %#llx would overflow (%d)\n",
4416                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4417                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4418                         mlength);
4419                 lnet_res_unlock(cpt);
4420                 return -ENOENT; /* -ve: OK but no match */
4421         }
4422
4423         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4424                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4425                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4426
4427         lnet_msg_attach_md(msg, md, 0, mlength);
4428
4429         if (mlength != 0)
4430                 lnet_setpayloadbuffer(msg);
4431
4432         lnet_res_unlock(cpt);
4433
4434         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4435
4436         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4437         return 0;
4438 }
4439
4440 static int
4441 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4442 {
4443         struct lnet_hdr *hdr = &msg->msg_hdr;
4444         struct lnet_processid src = {};
4445         struct lnet_libmd *md;
4446         int cpt;
4447
4448         src.nid = hdr->src_nid;
4449         src.pid = hdr->src_pid;
4450
4451         /* Convert ack fields to host byte order */
4452         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4453         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4454
4455         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4456         lnet_res_lock(cpt);
4457
4458         /* NB handles only looked up by creator (no flips) */
4459         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4460         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4461                 /* Don't moan; this is expected */
4462                 CDEBUG(D_NET,
4463                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4464                        libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4465                        (md == NULL) ? "invalid" : "inactive",
4466                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4467                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4468                 if (md != NULL && md->md_me != NULL)
4469                         CERROR("Source MD also attached to portal %d\n",
4470                                md->md_me->me_portal);
4471
4472                 lnet_res_unlock(cpt);
4473                 return -ENOENT;                  /* -ve! */
4474         }
4475
4476         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4477                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4478                hdr->msg.ack.dst_wmd.wh_object_cookie);
4479
4480         lnet_msg_attach_md(msg, md, 0, 0);
4481
4482         lnet_res_unlock(cpt);
4483
4484         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4485
4486         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4487         return 0;
4488 }
4489
4490 /**
4491  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4492  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4493  * \retval -ve                  error code
4494  */
4495 int
4496 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4497 {
4498         int     rc = 0;
4499
4500         if (!the_lnet.ln_routing)
4501                 return -ECANCELED;
4502
4503         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4504             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4505                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4506                         msg->msg_rx_ready_delay = 1;
4507                 } else {
4508                         lnet_net_unlock(msg->msg_rx_cpt);
4509                         rc = lnet_ni_eager_recv(ni, msg);
4510                         lnet_net_lock(msg->msg_rx_cpt);
4511                 }
4512         }
4513
4514         if (rc == 0)
4515                 rc = lnet_post_routed_recv_locked(msg, 0);
4516         return rc;
4517 }
4518
4519 int
4520 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4521 {
4522         int     rc;
4523
4524         switch (msg->msg_type) {
4525         case LNET_MSG_ACK:
4526                 rc = lnet_parse_ack(ni, msg);
4527                 break;
4528         case LNET_MSG_PUT:
4529                 rc = lnet_parse_put(ni, msg);
4530                 break;
4531         case LNET_MSG_GET:
4532                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4533                 break;
4534         case LNET_MSG_REPLY:
4535                 rc = lnet_parse_reply(ni, msg);
4536                 break;
4537         default: /* prevent an unused label if !kernel */
4538                 LASSERT(0);
4539                 return -EPROTO;
4540         }
4541
4542         LASSERT(rc == 0 || rc == -ENOENT);
4543         return rc;
4544 }
4545
4546 char *
4547 lnet_msgtyp2str (int type)
4548 {
4549         switch (type) {
4550         case LNET_MSG_ACK:
4551                 return ("ACK");
4552         case LNET_MSG_PUT:
4553                 return ("PUT");
4554         case LNET_MSG_GET:
4555                 return ("GET");
4556         case LNET_MSG_REPLY:
4557                 return ("REPLY");
4558         case LNET_MSG_HELLO:
4559                 return ("HELLO");
4560         default:
4561                 return ("<UNKNOWN>");
4562         }
4563 }
4564 EXPORT_SYMBOL(lnet_msgtyp2str);
4565
4566 int
4567 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr,
4568            struct lnet_nid *from_nid, void *private, int rdma_req)
4569 {
4570         struct lnet_peer_ni *lpni;
4571         struct lnet_msg *msg;
4572         __u32 payload_length;
4573         lnet_pid_t dest_pid;
4574         struct lnet_nid dest_nid;
4575         struct lnet_nid src_nid;
4576         bool push = false;
4577         int for_me;
4578         __u32 type;
4579         int rc = 0;
4580         int cpt;
4581         time64_t now = ktime_get_seconds();
4582
4583         LASSERT (!in_interrupt ());
4584
4585         type = hdr->type;
4586         src_nid = hdr->src_nid;
4587         dest_nid = hdr->dest_nid;
4588         dest_pid = hdr->dest_pid;
4589         payload_length = hdr->payload_length;
4590
4591         for_me = nid_same(&ni->ni_nid, &dest_nid);
4592         cpt = lnet_nid2cpt(from_nid, ni);
4593
4594         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4595                 libcfs_nidstr(&dest_nid),
4596                 libcfs_nidstr(&ni->ni_nid),
4597                 libcfs_nidstr(&src_nid),
4598                 lnet_msgtyp2str(type),
4599                 (for_me) ? "for me" : "routed");
4600
4601         switch (type) {
4602         case LNET_MSG_ACK:
4603         case LNET_MSG_GET:
4604                 if (payload_length > 0) {
4605                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4606                                libcfs_nidstr(from_nid),
4607                                libcfs_nidstr(&src_nid),
4608                                lnet_msgtyp2str(type), payload_length);
4609                         return -EPROTO;
4610                 }
4611                 break;
4612
4613         case LNET_MSG_PUT:
4614         case LNET_MSG_REPLY:
4615                 if (payload_length >
4616                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4617                         CERROR("%s, src %s: bad %s payload %d "
4618                                "(%d max expected)\n",
4619                                libcfs_nidstr(from_nid),
4620                                libcfs_nidstr(&src_nid),
4621                                lnet_msgtyp2str(type),
4622                                payload_length,
4623                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4624                         return -EPROTO;
4625                 }
4626                 break;
4627
4628         default:
4629                 CERROR("%s, src %s: Bad message type 0x%x\n",
4630                        libcfs_nidstr(from_nid),
4631                        libcfs_nidstr(&src_nid), type);
4632                 return -EPROTO;
4633         }
4634
4635         /* Only update net_last_alive for incoming GETs on the reserved portal
4636          * (i.e. incoming lnet/discovery pings).
4637          * This avoids situations where the router's own traffic results in NI
4638          * status changes
4639          */
4640         if (the_lnet.ln_routing && type == LNET_MSG_GET &&
4641             hdr->msg.get.ptl_index == LNET_RESERVED_PORTAL &&
4642             !lnet_islocalnid(&src_nid) &&
4643             ni->ni_net->net_last_alive != now) {
4644                 lnet_ni_lock(ni);
4645                 spin_lock(&ni->ni_net->net_lock);
4646                 ni->ni_net->net_last_alive = now;
4647                 spin_unlock(&ni->ni_net->net_lock);
4648                 push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP);
4649                 lnet_ni_unlock(ni);
4650         }
4651
4652         if (push)
4653                 lnet_push_update_to_peers(1);
4654
4655         /* Regard a bad destination NID as a protocol error.  Senders should
4656          * know what they're doing; if they don't they're misconfigured, buggy
4657          * or malicious so we chop them off at the knees :) */
4658
4659         if (!for_me) {
4660                 if (LNET_NID_NET(&dest_nid) == LNET_NID_NET(&ni->ni_nid)) {
4661                         /* should have gone direct */
4662                         CERROR("%s, src %s: Bad dest nid %s "
4663                                "(should have been sent direct)\n",
4664                                 libcfs_nidstr(from_nid),
4665                                 libcfs_nidstr(&src_nid),
4666                                 libcfs_nidstr(&dest_nid));
4667                         return -EPROTO;
4668                 }
4669
4670                 if (lnet_islocalnid(&dest_nid)) {
4671                         /* dest is another local NI; sender should have used
4672                          * this node's NID on its own network */
4673                         CERROR("%s, src %s: Bad dest nid %s "
4674                                "(it's my nid but on a different network)\n",
4675                                 libcfs_nidstr(from_nid),
4676                                 libcfs_nidstr(&src_nid),
4677                                 libcfs_nidstr(&dest_nid));
4678                         return -EPROTO;
4679                 }
4680
4681                 if (rdma_req && type == LNET_MSG_GET) {
4682                         CERROR("%s, src %s: Bad optimized GET for %s "
4683                                "(final destination must be me)\n",
4684                                 libcfs_nidstr(from_nid),
4685                                 libcfs_nidstr(&src_nid),
4686                                 libcfs_nidstr(&dest_nid));
4687                         return -EPROTO;
4688                 }
4689
4690                 if (!the_lnet.ln_routing) {
4691                         CERROR("%s, src %s: Dropping message for %s "
4692                                "(routing not enabled)\n",
4693                                 libcfs_nidstr(from_nid),
4694                                 libcfs_nidstr(&src_nid),
4695                                 libcfs_nidstr(&dest_nid));
4696                         goto drop;
4697                 }
4698         }
4699
4700         /* Message looks OK; we're not going to return an error, so we MUST
4701          * call back lnd_recv() come what may... */
4702
4703         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4704             fail_peer(&src_nid, 0)) {                   /* shall we now? */
4705                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4706                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4707                        lnet_msgtyp2str(type));
4708                 goto drop;
4709         }
4710
4711         if (!list_empty(&the_lnet.ln_drop_rules) &&
4712             lnet_drop_rule_match(hdr, &ni->ni_nid, NULL)) {
4713                 CDEBUG(D_NET,
4714                        "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
4715                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4716                        libcfs_nidstr(&dest_nid), lnet_msgtyp2str(type));
4717                 goto drop;
4718         }
4719
4720         msg = lnet_msg_alloc();
4721         if (msg == NULL) {
4722                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4723                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4724                        lnet_msgtyp2str(type));
4725                 goto drop;
4726         }
4727
4728         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4729          * pointers NULL etc */
4730
4731         msg->msg_type = type;
4732         msg->msg_private = private;
4733         msg->msg_receiving = 1;
4734         msg->msg_rdma_get = rdma_req;
4735         msg->msg_len = msg->msg_wanted = payload_length;
4736         msg->msg_offset = 0;
4737         msg->msg_hdr = *hdr;
4738         /* for building message event */
4739         msg->msg_from = *from_nid;
4740         if (!for_me) {
4741                 msg->msg_target.pid = dest_pid;
4742                 msg->msg_target.nid = dest_nid;
4743                 msg->msg_routing = 1;
4744         }
4745
4746         lnet_net_lock(cpt);
4747         lpni = lnet_peerni_by_nid_locked(from_nid, &ni->ni_nid, cpt);
4748         if (IS_ERR(lpni)) {
4749                 lnet_net_unlock(cpt);
4750                 rc = PTR_ERR(lpni);
4751                 CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
4752                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4753                        lnet_msgtyp2str(type), rc);
4754                 lnet_msg_free(msg);
4755                 if (rc == -ESHUTDOWN)
4756                         /* We are shutting down.  Don't do anything more */
4757                         return rc;
4758                 goto drop;
4759         }
4760
4761         /* If this message was forwarded to us from a router then we may need
4762          * to update router aliveness or check for an asymmetrical route
4763          * (or both)
4764          */
4765         if (((lnet_drop_asym_route && for_me) ||
4766              !lpni->lpni_peer_net->lpn_peer->lp_alive) &&
4767             LNET_NID_NET(&src_nid) != LNET_NID_NET(from_nid)) {
4768                 __u32 src_net_id = LNET_NID_NET(&src_nid);
4769                 struct lnet_peer *gw = lpni->lpni_peer_net->lpn_peer;
4770                 struct lnet_route *route;
4771                 bool found = false;
4772
4773                 list_for_each_entry(route, &gw->lp_routes, lr_gwlist) {
4774                         if (route->lr_net == src_net_id) {
4775                                 found = true;
4776                                 /* If we're transitioning the gateway from
4777                                  * dead -> alive, and discovery is disabled
4778                                  * locally or on the gateway, then we need to
4779                                  * update the cached route aliveness for each
4780                                  * route to the src_nid's net.
4781                                  *
4782                                  * Otherwise, we're only checking for
4783                                  * symmetrical route, and we can break the
4784                                  * loop
4785                                  */
4786                                 if (!gw->lp_alive &&
4787                                     lnet_is_discovery_disabled(gw))
4788                                         lnet_set_route_aliveness(route, true);
4789                                 else
4790                                         break;
4791                         }
4792                 }
4793                 if (lnet_drop_asym_route && for_me && !found) {
4794                         /* Drop ref taken by lnet_nid2peerni_locked() */
4795                         lnet_peer_ni_decref_locked(lpni);
4796                         lnet_net_unlock(cpt);
4797                         /* we would not use from_nid to route a message to
4798                          * src_nid
4799                          * => asymmetric routing detected but forbidden
4800                          */
4801                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4802                                libcfs_nidstr(from_nid),
4803                                libcfs_nidstr(&src_nid), lnet_msgtyp2str(type));
4804                         lnet_msg_free(msg);
4805                         goto drop;
4806                 }
4807                 if (!gw->lp_alive) {
4808                         struct lnet_peer_net *lpn;
4809                         struct lnet_peer_ni *lpni2;
4810
4811                         gw->lp_alive = true;
4812                         /* Mark all remote NIs on src_nid's net UP */
4813                         lpn = lnet_peer_get_net_locked(gw, src_net_id);
4814                         if (lpn)
4815                                 list_for_each_entry(lpni2, &lpn->lpn_peer_nis,
4816                                                     lpni_peer_nis)
4817                                         lpni2->lpni_ns_status = LNET_NI_STATUS_UP;
4818                 }
4819         }
4820
4821         lpni->lpni_last_alive = now;
4822
4823         msg->msg_rxpeer = lpni;
4824         msg->msg_rxni = ni;
4825         lnet_ni_addref_locked(ni, cpt);
4826         /* Multi-Rail: Primary NID of source. */
4827         lnet_peer_primary_nid_locked(&src_nid, &msg->msg_initiator);
4828
4829         /*
4830          * mark the status of this lpni as UP since we received a message
4831          * from it. The ping response reports back the ns_status which is
4832          * marked on the remote as up or down and we cache it here.
4833          */
4834         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
4835
4836         lnet_msg_commit(msg, cpt);
4837
4838         /* message delay simulation */
4839         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
4840                      lnet_delay_rule_match_locked(hdr, msg))) {
4841                 lnet_net_unlock(cpt);
4842                 return 0;
4843         }
4844
4845         if (!for_me) {
4846                 rc = lnet_parse_forward_locked(ni, msg);
4847                 lnet_net_unlock(cpt);
4848
4849                 if (rc < 0)
4850                         goto free_drop;
4851
4852                 if (rc == LNET_CREDIT_OK) {
4853                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
4854                                      0, payload_length, payload_length);
4855                 }
4856                 return 0;
4857         }
4858
4859         lnet_net_unlock(cpt);
4860
4861         rc = lnet_parse_local(ni, msg);
4862         if (rc != 0)
4863                 goto free_drop;
4864         return 0;
4865
4866  free_drop:
4867         LASSERT(msg->msg_md == NULL);
4868         lnet_finalize(msg, rc);
4869
4870  drop:
4871         lnet_drop_message(ni, cpt, private, payload_length, type);
4872         return 0;
4873 }
4874 EXPORT_SYMBOL(lnet_parse);
4875
4876 void
4877 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
4878 {
4879         struct lnet_msg *msg;
4880
4881         while ((msg = list_first_entry_or_null(head, struct lnet_msg,
4882                                                msg_list)) != NULL) {
4883                 struct lnet_processid id = {};
4884
4885                 list_del(&msg->msg_list);
4886
4887                 id.nid = msg->msg_hdr.src_nid;
4888                 id.pid = msg->msg_hdr.src_pid;
4889
4890                 LASSERT(msg->msg_md == NULL);
4891                 LASSERT(msg->msg_rx_delayed);
4892                 LASSERT(msg->msg_rxpeer != NULL);
4893                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4894
4895                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
4896                       " offset %d length %d: %s\n",
4897                       libcfs_idstr(&id),
4898                       msg->msg_hdr.msg.put.ptl_index,
4899                       msg->msg_hdr.msg.put.match_bits,
4900                       msg->msg_hdr.msg.put.offset,
4901                       msg->msg_hdr.payload_length, reason);
4902
4903                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
4904                  * called lnet_drop_message(), so I just hang onto msg as well
4905                  * until that's done */
4906
4907                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
4908                                   msg->msg_private, msg->msg_len,
4909                                   msg->msg_type);
4910
4911                 msg->msg_no_resend = true;
4912                 /*
4913                  * NB: message will not generate event because w/o attached MD,
4914                  * but we still should give error code so lnet_msg_decommit()
4915                  * can skip counters operations and other checks.
4916                  */
4917                 lnet_finalize(msg, -ENOENT);
4918         }
4919 }
4920
4921 void
4922 lnet_recv_delayed_msg_list(struct list_head *head)
4923 {
4924         struct lnet_msg *msg;
4925
4926         while ((msg = list_first_entry_or_null(head, struct lnet_msg,
4927                                                msg_list)) != NULL) {
4928                 struct lnet_processid id;
4929
4930                 list_del(&msg->msg_list);
4931
4932                 /* md won't disappear under me, since each msg
4933                  * holds a ref on it */
4934
4935                 id.nid = msg->msg_hdr.src_nid;
4936                 id.pid = msg->msg_hdr.src_pid;
4937
4938                 LASSERT(msg->msg_rx_delayed);
4939                 LASSERT(msg->msg_md != NULL);
4940                 LASSERT(msg->msg_rxpeer != NULL);
4941                 LASSERT(msg->msg_rxni != NULL);
4942                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4943
4944                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
4945                        "match %llu offset %d length %d.\n",
4946                         libcfs_idstr(&id), msg->msg_hdr.msg.put.ptl_index,
4947                         msg->msg_hdr.msg.put.match_bits,
4948                         msg->msg_hdr.msg.put.offset,
4949                         msg->msg_hdr.payload_length);
4950
4951                 lnet_recv_put(msg->msg_rxni, msg);
4952         }
4953 }
4954
4955 static void
4956 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
4957                         struct lnet_libmd *md, struct lnet_handle_md mdh)
4958 {
4959         s64 timeout_ns;
4960         struct lnet_rsp_tracker *local_rspt;
4961
4962         /*
4963          * MD has a refcount taken by message so it's not going away.
4964          * The MD however can be looked up. We need to secure the access
4965          * to the md_rspt_ptr by taking the res_lock.
4966          * The rspt can be accessed without protection up to when it gets
4967          * added to the list.
4968          */
4969
4970         lnet_res_lock(cpt);
4971         local_rspt = md->md_rspt_ptr;
4972         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
4973         if (local_rspt != NULL) {
4974                 /*
4975                  * we already have an rspt attached to the md, so we'll
4976                  * update the deadline on that one.
4977                  */
4978                 lnet_rspt_free(rspt, cpt);
4979         } else {
4980                 /* new md */
4981                 rspt->rspt_mdh = mdh;
4982                 rspt->rspt_cpt = cpt;
4983                 /* store the rspt so we can access it when we get the REPLY */
4984                 md->md_rspt_ptr = rspt;
4985                 local_rspt = rspt;
4986         }
4987         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
4988
4989         /*
4990          * add to the list of tracked responses. It's added to tail of the
4991          * list in order to expire all the older entries first.
4992          */
4993         lnet_net_lock(cpt);
4994         list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
4995         lnet_net_unlock(cpt);
4996         lnet_res_unlock(cpt);
4997 }
4998
4999 /**
5000  * Initiate an asynchronous PUT operation.
5001  *
5002  * There are several events associated with a PUT: completion of the send on
5003  * the initiator node (LNET_EVENT_SEND), and when the send completes
5004  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
5005  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
5006  * used at the target node to indicate the completion of incoming data
5007  * delivery.
5008  *
5009  * The local events will be logged in the EQ associated with the MD pointed to
5010  * by \a mdh handle. Using a MD without an associated EQ results in these
5011  * events being discarded. In this case, the caller must have another
5012  * mechanism (e.g., a higher level protocol) for determining when it is safe
5013  * to modify the memory region associated with the MD.
5014  *
5015  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
5016  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
5017  *
5018  * \param self Indicates the NID of a local interface through which to send
5019  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
5020  * \param mdh A handle for the MD that describes the memory to be sent. The MD
5021  * must be "free floating" (See LNetMDBind()).
5022  * \param ack Controls whether an acknowledgment is requested.
5023  * Acknowledgments are only sent when they are requested by the initiating
5024  * process and the target MD enables them.
5025  * \param target A process identifier for the target process.
5026  * \param portal The index in the \a target's portal table.
5027  * \param match_bits The match bits to use for MD selection at the target
5028  * process.
5029  * \param offset The offset into the target MD (only used when the target
5030  * MD has the LNET_MD_MANAGE_REMOTE option set).
5031  * \param hdr_data 64 bits of user data that can be included in the message
5032  * header. This data is written to an event queue entry at the target if an
5033  * EQ is present on the matching MD.
5034  *
5035  * \retval  0      Success, and only in this case events will be generated
5036  * and logged to EQ (if it exists).
5037  * \retval -EIO    Simulated failure.
5038  * \retval -ENOMEM Memory allocation failure.
5039  * \retval -ENOENT Invalid MD object.
5040  *
5041  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
5042  */
5043 int
5044 LNetPut(struct lnet_nid *self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
5045         struct lnet_processid *target, unsigned int portal,
5046         __u64 match_bits, unsigned int offset,
5047         __u64 hdr_data)
5048 {
5049         struct lnet_msg *msg;
5050         struct lnet_libmd *md;
5051         int cpt;
5052         int rc;
5053         struct lnet_rsp_tracker *rspt = NULL;
5054
5055         LASSERT(the_lnet.ln_refcount > 0);
5056
5057         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5058             fail_peer(&target->nid, 1)) {               /* shall we now? */
5059                 CERROR("Dropping PUT to %s: simulated failure\n",
5060                        libcfs_idstr(target));
5061                 return -EIO;
5062         }
5063
5064         msg = lnet_msg_alloc();
5065         if (msg == NULL) {
5066                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
5067                        libcfs_idstr(target));
5068                 return -ENOMEM;
5069         }
5070         msg->msg_vmflush = !!(current->flags & PF_MEMALLOC);
5071
5072         cpt = lnet_cpt_of_cookie(mdh.cookie);
5073
5074         if (ack == LNET_ACK_REQ) {
5075                 rspt = lnet_rspt_alloc(cpt);
5076                 if (!rspt) {
5077                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
5078                                 libcfs_idstr(target));
5079                         return -ENOMEM;
5080                 }
5081                 INIT_LIST_HEAD(&rspt->rspt_on_list);
5082         }
5083
5084         lnet_res_lock(cpt);
5085
5086         md = lnet_handle2md(&mdh);
5087         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5088                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
5089                        match_bits, portal, libcfs_idstr(target),
5090                        md == NULL ? -1 : md->md_threshold);
5091                 if (md != NULL && md->md_me != NULL)
5092                         CERROR("Source MD also attached to portal %d\n",
5093                                md->md_me->me_portal);
5094                 lnet_res_unlock(cpt);
5095
5096                 if (rspt)
5097                         lnet_rspt_free(rspt, cpt);
5098
5099                 lnet_msg_free(msg);
5100                 return -ENOENT;
5101         }
5102
5103         CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_idstr(target));
5104
5105         lnet_msg_attach_md(msg, md, 0, 0);
5106
5107         lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
5108
5109         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
5110         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
5111         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
5112         msg->msg_hdr.msg.put.hdr_data = hdr_data;
5113
5114         /* NB handles only looked up by creator (no flips) */
5115         if (ack == LNET_ACK_REQ) {
5116                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5117                         the_lnet.ln_interface_cookie;
5118                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5119                         md->md_lh.lh_cookie;
5120         } else {
5121                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5122                         LNET_WIRE_HANDLE_COOKIE_NONE;
5123                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5124                         LNET_WIRE_HANDLE_COOKIE_NONE;
5125         }
5126
5127         lnet_res_unlock(cpt);
5128
5129         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5130
5131         if (rspt && lnet_response_tracking_enabled(LNET_MSG_PUT,
5132                                                    md->md_options))
5133                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5134         else if (rspt)
5135                 lnet_rspt_free(rspt, cpt);
5136
5137         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
5138                                  CFS_FAIL_ONCE))
5139                 rc = -EIO;
5140         else
5141                 rc = lnet_send(self, msg, NULL);
5142
5143         if (rc != 0) {
5144                 CNETERR("Error sending PUT to %s: %d\n",
5145                         libcfs_idstr(target), rc);
5146                 msg->msg_no_resend = true;
5147                 lnet_finalize(msg, rc);
5148         }
5149
5150         /* completion will be signalled by an event */
5151         return 0;
5152 }
5153 EXPORT_SYMBOL(LNetPut);
5154
5155 /*
5156  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
5157  * returns a msg for the LND to pass to lnet_finalize() when the sink
5158  * data has been received.
5159  *
5160  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
5161  * lnet_finalize() is called on it, so the LND must call this first
5162  */
5163 struct lnet_msg *
5164 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
5165 {
5166         struct lnet_msg *msg = lnet_msg_alloc();
5167         struct lnet_libmd *getmd = getmsg->msg_md;
5168         struct lnet_processid *peer_id = &getmsg->msg_target;
5169         int cpt;
5170
5171         LASSERT(!getmsg->msg_target_is_router);
5172         LASSERT(!getmsg->msg_routing);
5173
5174         if (msg == NULL) {
5175                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
5176                        libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id));
5177                 goto drop;
5178         }
5179
5180         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
5181         lnet_res_lock(cpt);
5182
5183         LASSERT(getmd->md_refcount > 0);
5184
5185         if (getmd->md_threshold == 0) {
5186                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
5187                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id),
5188                         getmd);
5189                 lnet_res_unlock(cpt);
5190                 goto drop;
5191         }
5192
5193         LASSERT(getmd->md_offset == 0);
5194
5195         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
5196                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id), getmd);
5197
5198         /* setup information for lnet_build_msg_event */
5199         msg->msg_initiator =
5200                 getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
5201         msg->msg_from = peer_id->nid;
5202         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
5203         msg->msg_hdr.src_nid = peer_id->nid;
5204         msg->msg_hdr.payload_length = getmd->md_length;
5205         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
5206
5207         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
5208         lnet_res_unlock(cpt);
5209
5210         cpt = lnet_nid2cpt(&peer_id->nid, ni);
5211
5212         lnet_net_lock(cpt);
5213         lnet_msg_commit(msg, cpt);
5214         lnet_net_unlock(cpt);
5215
5216         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
5217
5218         return msg;
5219
5220  drop:
5221         cpt = lnet_nid2cpt(&peer_id->nid, ni);
5222
5223         lnet_net_lock(cpt);
5224         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
5225         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
5226         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
5227                 getmd->md_length;
5228         lnet_net_unlock(cpt);
5229
5230         if (msg != NULL)
5231                 lnet_msg_free(msg);
5232
5233         return NULL;
5234 }
5235 EXPORT_SYMBOL(lnet_create_reply_msg);
5236
5237 void
5238 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
5239                        unsigned int len)
5240 {
5241         /* Set the REPLY length, now the RDMA that elides the REPLY message has
5242          * completed and I know it. */
5243         LASSERT(reply != NULL);
5244         LASSERT(reply->msg_type == LNET_MSG_GET);
5245         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
5246
5247         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
5248          * the end of my buffer, I might as well be dead. */
5249         LASSERT(len <= reply->msg_ev.mlength);
5250
5251         reply->msg_ev.mlength = len;
5252 }
5253 EXPORT_SYMBOL(lnet_set_reply_msg_len);
5254
5255 /**
5256  * Initiate an asynchronous GET operation.
5257  *
5258  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
5259  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
5260  * the target node in the REPLY has been written to local MD.
5261  *
5262  * On the target node, an LNET_EVENT_GET is logged when the GET request
5263  * arrives and is accepted into a MD.
5264  *
5265  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
5266  * \param mdh A handle for the MD that describes the memory into which the
5267  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
5268  *
5269  * \retval  0      Success, and only in this case events will be generated
5270  * and logged to EQ (if it exists) of the MD.
5271  * \retval -EIO    Simulated failure.
5272  * \retval -ENOMEM Memory allocation failure.
5273  * \retval -ENOENT Invalid MD object.
5274  */
5275 int
5276 LNetGet(struct lnet_nid *self, struct lnet_handle_md mdh,
5277         struct lnet_processid *target, unsigned int portal,
5278         __u64 match_bits, unsigned int offset, bool recovery)
5279 {
5280         struct lnet_msg *msg;
5281         struct lnet_libmd *md;
5282         struct lnet_rsp_tracker *rspt;
5283         int cpt;
5284         int rc;
5285
5286         LASSERT(the_lnet.ln_refcount > 0);
5287
5288         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5289             fail_peer(&target->nid, 1))         /* shall we now? */
5290         {
5291                 CERROR("Dropping GET to %s: simulated failure\n",
5292                        libcfs_idstr(target));
5293                 return -EIO;
5294         }
5295
5296         msg = lnet_msg_alloc();
5297         if (!msg) {
5298                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
5299                        libcfs_idstr(target));
5300                 return -ENOMEM;
5301         }
5302
5303         cpt = lnet_cpt_of_cookie(mdh.cookie);
5304
5305         rspt = lnet_rspt_alloc(cpt);
5306         if (!rspt) {
5307                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
5308                        libcfs_idstr(target));
5309                 return -ENOMEM;
5310         }
5311         INIT_LIST_HEAD(&rspt->rspt_on_list);
5312
5313         msg->msg_recovery = recovery;
5314
5315         lnet_res_lock(cpt);
5316
5317         md = lnet_handle2md(&mdh);
5318         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5319                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
5320                        match_bits, portal, libcfs_idstr(target),
5321                        md == NULL ? -1 : md->md_threshold);
5322                 if (md != NULL && md->md_me != NULL)
5323                         CERROR("REPLY MD also attached to portal %d\n",
5324                                md->md_me->me_portal);
5325
5326                 lnet_res_unlock(cpt);
5327
5328                 lnet_msg_free(msg);
5329                 lnet_rspt_free(rspt, cpt);
5330                 return -ENOENT;
5331         }
5332
5333         CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_idstr(target));
5334
5335         lnet_msg_attach_md(msg, md, 0, 0);
5336
5337         lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
5338
5339         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5340         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5341         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5342         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5343
5344         /* NB handles only looked up by creator (no flips) */
5345         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5346                 the_lnet.ln_interface_cookie;
5347         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5348                 md->md_lh.lh_cookie;
5349
5350         lnet_res_unlock(cpt);
5351
5352         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5353
5354         if (lnet_response_tracking_enabled(LNET_MSG_GET, md->md_options))
5355                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5356         else
5357                 lnet_rspt_free(rspt, cpt);
5358
5359         rc = lnet_send(self, msg, NULL);
5360         if (rc < 0) {
5361                 CNETERR("Error sending GET to %s: %d\n",
5362                         libcfs_idstr(target), rc);
5363                 msg->msg_no_resend = true;
5364                 lnet_finalize(msg, rc);
5365         }
5366
5367         /* completion will be signalled by an event */
5368         return 0;
5369 }
5370 EXPORT_SYMBOL(LNetGet);
5371
5372 /**
5373  * Calculate distance to node at \a dstnid.
5374  *
5375  * \param dstnid Target NID.
5376  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5377  * is saved here.
5378  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5379  * here.
5380  *
5381  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5382  * local_nid_dist_zero is set, which is the default.
5383  * \retval positives Distance to target NID, i.e. number of hops plus one.
5384  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5385  */
5386 int
5387 LNetDist(struct lnet_nid *dstnid, struct lnet_nid *srcnid, __u32 *orderp)
5388 {
5389         struct lnet_ni *ni = NULL;
5390         struct lnet_remotenet *rnet;
5391         __u32 dstnet = LNET_NID_NET(dstnid);
5392         int hops;
5393         int cpt;
5394         __u32 order = 2;
5395         struct list_head *rn_list;
5396         struct lnet_ni *matched_dstnet = NULL;
5397
5398         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5399          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5400          * keep order 0 free for 0@lo and order 1 free for a local NID
5401          * match
5402          * WARNING: dstnid and srcnid might point to same place.
5403          * Don't set *srcnid until late.
5404          */
5405
5406         LASSERT(the_lnet.ln_refcount > 0);
5407
5408         cpt = lnet_net_lock_current();
5409
5410         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5411                 if (nid_same(&ni->ni_nid, dstnid)) {
5412                         if (orderp != NULL) {
5413                                 if (nid_is_lo0(dstnid))
5414                                         *orderp = 0;
5415                                 else
5416                                         *orderp = 1;
5417                         }
5418                         if (srcnid)
5419                                 *srcnid = *dstnid;
5420                         lnet_net_unlock(cpt);
5421
5422                         return local_nid_dist_zero ? 0 : 1;
5423                 }
5424
5425                 if (!matched_dstnet && LNET_NID_NET(&ni->ni_nid) == dstnet) {
5426                         matched_dstnet = ni;
5427                         /* We matched the destination net, but we may have
5428                          * additional local NIs to inspect.
5429                          *
5430                          * We record the order as appropriate, but
5431                          * they may be overwritten if we match local NI above.
5432                          */
5433
5434                         if (orderp) {
5435                                 /* Check if ni was originally created in
5436                                  * current net namespace.
5437                                  * If not, assign order above 0xffff0000,
5438                                  * to make this ni not a priority.
5439                                  */
5440                                 if (current->nsproxy &&
5441                                     !net_eq(ni->ni_net_ns,
5442                                             current->nsproxy->net_ns))
5443                                         *orderp = order + 0xffff0000;
5444                                 else
5445                                         *orderp = order;
5446                         }
5447                 }
5448
5449                 order++;
5450         }
5451
5452         if (matched_dstnet) {
5453                 if (srcnid)
5454                         *srcnid = matched_dstnet->ni_nid;
5455                 lnet_net_unlock(cpt);
5456                 return 1;
5457         }
5458
5459         rn_list = lnet_net2rnethash(dstnet);
5460         list_for_each_entry(rnet, rn_list, lrn_list) {
5461                 if (rnet->lrn_net == dstnet) {
5462                         struct lnet_route *route;
5463                         struct lnet_route *shortest = NULL;
5464                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5465                         __u32 route_hops;
5466
5467                         LASSERT(!list_empty(&rnet->lrn_routes));
5468
5469                         list_for_each_entry(route, &rnet->lrn_routes,
5470                                             lr_list) {
5471                                 route_hops = route->lr_hops;
5472                                 if (route_hops == LNET_UNDEFINED_HOPS)
5473                                         route_hops = 1;
5474                                 if (shortest == NULL ||
5475                                     route_hops < shortest_hops) {
5476                                         shortest = route;
5477                                         shortest_hops = route_hops;
5478                                 }
5479                         }
5480
5481                         LASSERT(shortest != NULL);
5482                         hops = shortest_hops;
5483                         if (srcnid) {
5484                                 struct lnet_net *net;
5485                                 net = lnet_get_net_locked(shortest->lr_lnet);
5486                                 LASSERT(net);
5487                                 ni = lnet_get_next_ni_locked(net, NULL);
5488                                 *srcnid = ni->ni_nid;
5489                         }
5490                         if (orderp != NULL)
5491                                 *orderp = order;
5492                         lnet_net_unlock(cpt);
5493                         return hops + 1;
5494                 }
5495                 order++;
5496         }
5497
5498         lnet_net_unlock(cpt);
5499         return -EHOSTUNREACH;
5500 }
5501 EXPORT_SYMBOL(LNetDist);