Whamcloud - gitweb
81261c9189242fa687484b048953b793b69e97dc
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/lib-move.c
32  *
33  * Data movement routines
34  */
35
36 #define DEBUG_SUBSYSTEM S_LNET
37
38 #include <linux/pagemap.h>
39
40 #include <lnet/lib-lnet.h>
41 #include <linux/nsproxy.h>
42 #include <lnet/lnet_rdma.h>
43 #include <net/net_namespace.h>
44
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48
49 struct lnet_send_data {
50         struct lnet_ni *sd_best_ni;
51         struct lnet_peer_ni *sd_best_lpni;
52         struct lnet_peer_ni *sd_final_dst_lpni;
53         struct lnet_peer *sd_peer;
54         struct lnet_peer *sd_gw_peer;
55         struct lnet_peer_ni *sd_gw_lpni;
56         struct lnet_peer_net *sd_peer_net;
57         struct lnet_msg *sd_msg;
58         struct lnet_nid sd_dst_nid;
59         struct lnet_nid sd_src_nid;
60         struct lnet_nid sd_rtr_nid;
61         int sd_cpt;
62         int sd_md_cpt;
63         __u32 sd_send_case;
64 };
65
66 static inline bool
67 lnet_msg_is_response(struct lnet_msg *msg)
68 {
69         return msg->msg_type == LNET_MSG_ACK || msg->msg_type == LNET_MSG_REPLY;
70 }
71
72 static inline bool
73 lnet_response_tracking_enabled(__u32 msg_type, unsigned int md_options)
74 {
75         if (md_options & LNET_MD_NO_TRACK_RESPONSE)
76                 /* Explicitly disabled in MD options */
77                 return false;
78
79         if (md_options & LNET_MD_TRACK_RESPONSE)
80                 /* Explicity enabled in MD options */
81                 return true;
82
83         if (lnet_response_tracking == 3)
84                 /* Enabled for all message types */
85                 return true;
86
87         if (msg_type == LNET_MSG_PUT)
88                 return lnet_response_tracking == 2;
89
90         if (msg_type == LNET_MSG_GET)
91                 return lnet_response_tracking == 1;
92
93         return false;
94 }
95
96 static inline struct lnet_comm_count *
97 get_stats_counts(struct lnet_element_stats *stats,
98                  enum lnet_stats_type stats_type)
99 {
100         switch (stats_type) {
101         case LNET_STATS_TYPE_SEND:
102                 return &stats->el_send_stats;
103         case LNET_STATS_TYPE_RECV:
104                 return &stats->el_recv_stats;
105         case LNET_STATS_TYPE_DROP:
106                 return &stats->el_drop_stats;
107         default:
108                 CERROR("Unknown stats type\n");
109         }
110
111         return NULL;
112 }
113
114 void lnet_incr_stats(struct lnet_element_stats *stats,
115                      enum lnet_msg_type msg_type,
116                      enum lnet_stats_type stats_type)
117 {
118         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
119         if (!counts)
120                 return;
121
122         switch (msg_type) {
123         case LNET_MSG_ACK:
124                 atomic_inc(&counts->co_ack_count);
125                 break;
126         case LNET_MSG_PUT:
127                 atomic_inc(&counts->co_put_count);
128                 break;
129         case LNET_MSG_GET:
130                 atomic_inc(&counts->co_get_count);
131                 break;
132         case LNET_MSG_REPLY:
133                 atomic_inc(&counts->co_reply_count);
134                 break;
135         case LNET_MSG_HELLO:
136                 atomic_inc(&counts->co_hello_count);
137                 break;
138         default:
139                 CERROR("There is a BUG in the code. Unknown message type\n");
140                 break;
141         }
142 }
143
144 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
145                      enum lnet_stats_type stats_type)
146 {
147         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
148         if (!counts)
149                 return 0;
150
151         return (atomic_read(&counts->co_ack_count) +
152                 atomic_read(&counts->co_put_count) +
153                 atomic_read(&counts->co_get_count) +
154                 atomic_read(&counts->co_reply_count) +
155                 atomic_read(&counts->co_hello_count));
156 }
157
158 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
159                                 struct lnet_comm_count *counts)
160 {
161         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
162         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
163         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
164         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
165         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
166 }
167
168 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
169                               struct lnet_element_stats *stats)
170 {
171         struct lnet_comm_count *counts;
172
173         LASSERT(msg_stats);
174         LASSERT(stats);
175
176         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
177         if (!counts)
178                 return;
179         assign_stats(&msg_stats->im_send_stats, counts);
180
181         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
182         if (!counts)
183                 return;
184         assign_stats(&msg_stats->im_recv_stats, counts);
185
186         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
187         if (!counts)
188                 return;
189         assign_stats(&msg_stats->im_drop_stats, counts);
190 }
191
192 int
193 lnet_fail_nid(lnet_nid_t nid4, unsigned int threshold)
194 {
195         struct lnet_test_peer *tp;
196         struct list_head *el;
197         struct list_head *next;
198         struct lnet_nid nid;
199         LIST_HEAD(cull);
200
201         lnet_nid4_to_nid(nid4, &nid);
202         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
203         if (threshold != 0) {
204                 /* Adding a new entry */
205                 LIBCFS_ALLOC(tp, sizeof(*tp));
206                 if (tp == NULL)
207                         return -ENOMEM;
208
209                 tp->tp_nid = nid;
210                 tp->tp_threshold = threshold;
211
212                 lnet_net_lock(0);
213                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
214                 lnet_net_unlock(0);
215                 return 0;
216         }
217
218         lnet_net_lock(0);
219
220         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
221                 tp = list_entry(el, struct lnet_test_peer, tp_list);
222
223                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
224                     LNET_NID_IS_ANY(&nid) ||    /* removing all entries */
225                     nid_same(&tp->tp_nid, &nid)) {      /* matched this one */
226                         list_move(&tp->tp_list, &cull);
227                 }
228         }
229
230         lnet_net_unlock(0);
231
232         while ((tp = list_first_entry_or_null(&cull,
233                                               struct lnet_test_peer,
234                                               tp_list)) != NULL) {
235                 list_del(&tp->tp_list);
236                 LIBCFS_FREE(tp, sizeof(*tp));
237         }
238         return 0;
239 }
240
241 static int
242 fail_peer(struct lnet_nid *nid, int outgoing)
243 {
244         struct lnet_test_peer *tp;
245         struct list_head *el;
246         struct list_head *next;
247         LIST_HEAD(cull);
248         int fail = 0;
249
250         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
251         lnet_net_lock(0);
252
253         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
254                 tp = list_entry(el, struct lnet_test_peer, tp_list);
255
256                 if (tp->tp_threshold == 0) {
257                         /* zombie entry */
258                         if (outgoing) {
259                                 /* only cull zombies on outgoing tests,
260                                  * since we may be at interrupt priority on
261                                  * incoming messages. */
262                                 list_move(&tp->tp_list, &cull);
263                         }
264                         continue;
265                 }
266
267                 if (LNET_NID_IS_ANY(&tp->tp_nid) ||     /* fail every peer */
268                     nid_same(nid, &tp->tp_nid)) {       /* fail this peer */
269                         fail = 1;
270
271                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
272                                 tp->tp_threshold--;
273                                 if (outgoing &&
274                                     tp->tp_threshold == 0) {
275                                         /* see above */
276                                         list_move(&tp->tp_list, &cull);
277                                 }
278                         }
279                         break;
280                 }
281         }
282
283         lnet_net_unlock(0);
284
285         while ((tp = list_first_entry_or_null(&cull,
286                                               struct lnet_test_peer,
287                                               tp_list)) != NULL) {
288                 list_del(&tp->tp_list);
289                 LIBCFS_FREE(tp, sizeof(*tp));
290         }
291
292         return fail;
293 }
294
295 unsigned int
296 lnet_iov_nob(unsigned int niov, struct kvec *iov)
297 {
298         unsigned int nob = 0;
299
300         LASSERT(niov == 0 || iov != NULL);
301         while (niov-- > 0)
302                 nob += (iov++)->iov_len;
303
304         return (nob);
305 }
306 EXPORT_SYMBOL(lnet_iov_nob);
307
308 void
309 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
310                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
311                   unsigned int nob)
312 {
313         /* NB diov, siov are READ-ONLY */
314         unsigned int this_nob;
315
316         if (nob == 0)
317                 return;
318
319         /* skip complete frags before 'doffset' */
320         LASSERT(ndiov > 0);
321         while (doffset >= diov->iov_len) {
322                 doffset -= diov->iov_len;
323                 diov++;
324                 ndiov--;
325                 LASSERT(ndiov > 0);
326         }
327
328         /* skip complete frags before 'soffset' */
329         LASSERT(nsiov > 0);
330         while (soffset >= siov->iov_len) {
331                 soffset -= siov->iov_len;
332                 siov++;
333                 nsiov--;
334                 LASSERT(nsiov > 0);
335         }
336
337         do {
338                 LASSERT(ndiov > 0);
339                 LASSERT(nsiov > 0);
340                 this_nob = min3((unsigned int)diov->iov_len - doffset,
341                                 (unsigned int)siov->iov_len - soffset,
342                                 nob);
343
344                 memcpy((char *)diov->iov_base + doffset,
345                        (char *)siov->iov_base + soffset, this_nob);
346                 nob -= this_nob;
347
348                 if (diov->iov_len > doffset + this_nob) {
349                         doffset += this_nob;
350                 } else {
351                         diov++;
352                         ndiov--;
353                         doffset = 0;
354                 }
355
356                 if (siov->iov_len > soffset + this_nob) {
357                         soffset += this_nob;
358                 } else {
359                         siov++;
360                         nsiov--;
361                         soffset = 0;
362                 }
363         } while (nob > 0);
364 }
365 EXPORT_SYMBOL(lnet_copy_iov2iov);
366
367 unsigned int
368 lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
369 {
370         unsigned int  nob = 0;
371
372         LASSERT(niov == 0 || kiov != NULL);
373         while (niov-- > 0)
374                 nob += (kiov++)->bv_len;
375
376         return (nob);
377 }
378 EXPORT_SYMBOL(lnet_kiov_nob);
379
380 void
381 lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov,
382                     unsigned int doffset,
383                     unsigned int nsiov, struct bio_vec *siov,
384                     unsigned int soffset,
385                     unsigned int nob)
386 {
387         /* NB diov, siov are READ-ONLY */
388         unsigned int    this_nob;
389         char           *daddr = NULL;
390         char           *saddr = NULL;
391
392         if (nob == 0)
393                 return;
394
395         LASSERT (!in_interrupt ());
396
397         LASSERT (ndiov > 0);
398         while (doffset >= diov->bv_len) {
399                 doffset -= diov->bv_len;
400                 diov++;
401                 ndiov--;
402                 LASSERT(ndiov > 0);
403         }
404
405         LASSERT(nsiov > 0);
406         while (soffset >= siov->bv_len) {
407                 soffset -= siov->bv_len;
408                 siov++;
409                 nsiov--;
410                 LASSERT(nsiov > 0);
411         }
412
413         do {
414                 LASSERT(ndiov > 0);
415                 LASSERT(nsiov > 0);
416                 this_nob = min3(diov->bv_len - doffset,
417                                 siov->bv_len - soffset,
418                                 nob);
419
420                 if (daddr == NULL)
421                         daddr = ((char *)kmap(diov->bv_page)) +
422                                 diov->bv_offset + doffset;
423                 if (saddr == NULL)
424                         saddr = ((char *)kmap(siov->bv_page)) +
425                                 siov->bv_offset + soffset;
426
427                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
428                  * However in practice at least one of the kiovs will be mapped
429                  * kernel pages and the map/unmap will be NOOPs */
430
431                 memcpy (daddr, saddr, this_nob);
432                 nob -= this_nob;
433
434                 if (diov->bv_len > doffset + this_nob) {
435                         daddr += this_nob;
436                         doffset += this_nob;
437                 } else {
438                         kunmap(diov->bv_page);
439                         daddr = NULL;
440                         diov++;
441                         ndiov--;
442                         doffset = 0;
443                 }
444
445                 if (siov->bv_len > soffset + this_nob) {
446                         saddr += this_nob;
447                         soffset += this_nob;
448                 } else {
449                         kunmap(siov->bv_page);
450                         saddr = NULL;
451                         siov++;
452                         nsiov--;
453                         soffset = 0;
454                 }
455         } while (nob > 0);
456
457         if (daddr != NULL)
458                 kunmap(diov->bv_page);
459         if (saddr != NULL)
460                 kunmap(siov->bv_page);
461 }
462 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
463
464 void
465 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
466                     unsigned int nkiov, struct bio_vec *kiov,
467                     unsigned int kiovoffset,
468                     unsigned int nob)
469 {
470         /* NB iov, kiov are READ-ONLY */
471         unsigned int    this_nob;
472         char           *addr = NULL;
473
474         if (nob == 0)
475                 return;
476
477         LASSERT (!in_interrupt ());
478
479         LASSERT (niov > 0);
480         while (iovoffset >= iov->iov_len) {
481                 iovoffset -= iov->iov_len;
482                 iov++;
483                 niov--;
484                 LASSERT(niov > 0);
485         }
486
487         LASSERT(nkiov > 0);
488         while (kiovoffset >= kiov->bv_len) {
489                 kiovoffset -= kiov->bv_len;
490                 kiov++;
491                 nkiov--;
492                 LASSERT(nkiov > 0);
493         }
494
495         do {
496                 LASSERT(niov > 0);
497                 LASSERT(nkiov > 0);
498                 this_nob = min3((unsigned int)iov->iov_len - iovoffset,
499                                 (unsigned int)kiov->bv_len - kiovoffset,
500                                 nob);
501
502                 if (addr == NULL)
503                         addr = ((char *)kmap(kiov->bv_page)) +
504                                 kiov->bv_offset + kiovoffset;
505
506                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
507                 nob -= this_nob;
508
509                 if (iov->iov_len > iovoffset + this_nob) {
510                         iovoffset += this_nob;
511                 } else {
512                         iov++;
513                         niov--;
514                         iovoffset = 0;
515                 }
516
517                 if (kiov->bv_len > kiovoffset + this_nob) {
518                         addr += this_nob;
519                         kiovoffset += this_nob;
520                 } else {
521                         kunmap(kiov->bv_page);
522                         addr = NULL;
523                         kiov++;
524                         nkiov--;
525                         kiovoffset = 0;
526                 }
527
528         } while (nob > 0);
529
530         if (addr != NULL)
531                 kunmap(kiov->bv_page);
532 }
533 EXPORT_SYMBOL(lnet_copy_kiov2iov);
534
535 void
536 lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
537                    unsigned int kiovoffset,
538                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
539                    unsigned int nob)
540 {
541         /* NB kiov, iov are READ-ONLY */
542         unsigned int    this_nob;
543         char           *addr = NULL;
544
545         if (nob == 0)
546                 return;
547
548         LASSERT (!in_interrupt ());
549
550         LASSERT (nkiov > 0);
551         while (kiovoffset >= kiov->bv_len) {
552                 kiovoffset -= kiov->bv_len;
553                 kiov++;
554                 nkiov--;
555                 LASSERT(nkiov > 0);
556         }
557
558         LASSERT(niov > 0);
559         while (iovoffset >= iov->iov_len) {
560                 iovoffset -= iov->iov_len;
561                 iov++;
562                 niov--;
563                 LASSERT(niov > 0);
564         }
565
566         do {
567                 LASSERT(nkiov > 0);
568                 LASSERT(niov > 0);
569                 this_nob = min3((unsigned int)kiov->bv_len - kiovoffset,
570                                 (unsigned int)iov->iov_len - iovoffset,
571                                 nob);
572
573                 if (addr == NULL)
574                         addr = ((char *)kmap(kiov->bv_page)) +
575                                 kiov->bv_offset + kiovoffset;
576
577                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
578                 nob -= this_nob;
579
580                 if (kiov->bv_len > kiovoffset + this_nob) {
581                         addr += this_nob;
582                         kiovoffset += this_nob;
583                 } else {
584                         kunmap(kiov->bv_page);
585                         addr = NULL;
586                         kiov++;
587                         nkiov--;
588                         kiovoffset = 0;
589                 }
590
591                 if (iov->iov_len > iovoffset + this_nob) {
592                         iovoffset += this_nob;
593                 } else {
594                         iov++;
595                         niov--;
596                         iovoffset = 0;
597                 }
598         } while (nob > 0);
599
600         if (addr != NULL)
601                 kunmap(kiov->bv_page);
602 }
603 EXPORT_SYMBOL(lnet_copy_iov2kiov);
604
605 int
606 lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
607                   int src_niov, struct bio_vec *src,
608                   unsigned int offset, unsigned int len)
609 {
610         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
611          * for exactly 'len' bytes, and return the number of entries.
612          * NB not destructive to 'src' */
613         unsigned int    frag_len;
614         unsigned int    niov;
615
616         if (len == 0)                           /* no data => */
617                 return (0);                     /* no frags */
618
619         LASSERT(src_niov > 0);
620         while (offset >= src->bv_len) {      /* skip initial frags */
621                 offset -= src->bv_len;
622                 src_niov--;
623                 src++;
624                 LASSERT(src_niov > 0);
625         }
626
627         niov = 1;
628         for (;;) {
629                 LASSERT(src_niov > 0);
630                 LASSERT((int)niov <= dst_niov);
631
632                 frag_len = src->bv_len - offset;
633                 dst->bv_page = src->bv_page;
634                 dst->bv_offset = src->bv_offset + offset;
635
636                 if (len <= frag_len) {
637                         dst->bv_len = len;
638                         LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
639                         return niov;
640                 }
641
642                 dst->bv_len = frag_len;
643                 LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
644
645                 len -= frag_len;
646                 dst++;
647                 src++;
648                 niov++;
649                 src_niov--;
650                 offset = 0;
651         }
652 }
653 EXPORT_SYMBOL(lnet_extract_kiov);
654
655 void
656 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
657              int delayed, unsigned int offset, unsigned int mlen,
658              unsigned int rlen)
659 {
660         unsigned int niov = 0;
661         struct kvec *iov = NULL;
662         struct bio_vec  *kiov = NULL;
663         int rc;
664
665         LASSERT (!in_interrupt ());
666         LASSERT (mlen == 0 || msg != NULL);
667
668         if (msg != NULL) {
669                 LASSERT(msg->msg_receiving);
670                 LASSERT(!msg->msg_sending);
671                 LASSERT(rlen == msg->msg_len);
672                 LASSERT(mlen <= msg->msg_len);
673                 LASSERT(msg->msg_offset == offset);
674                 LASSERT(msg->msg_wanted == mlen);
675
676                 msg->msg_receiving = 0;
677
678                 if (mlen != 0) {
679                         niov = msg->msg_niov;
680                         kiov = msg->msg_kiov;
681
682                         LASSERT (niov > 0);
683                         LASSERT ((iov == NULL) != (kiov == NULL));
684                 }
685         }
686
687         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
688                                              niov, kiov, offset, mlen,
689                                              rlen);
690         if (rc < 0)
691                 lnet_finalize(msg, rc);
692 }
693
694 static void
695 lnet_setpayloadbuffer(struct lnet_msg *msg)
696 {
697         struct lnet_libmd *md = msg->msg_md;
698
699         LASSERT(msg->msg_len > 0);
700         LASSERT(!msg->msg_routing);
701         LASSERT(md != NULL);
702         LASSERT(msg->msg_niov == 0);
703         LASSERT(msg->msg_kiov == NULL);
704
705         msg->msg_niov = md->md_niov;
706         msg->msg_kiov = md->md_kiov;
707 }
708
709 void
710 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_processid *target,
711                unsigned int offset, unsigned int len)
712 {
713         msg->msg_type = type;
714         msg->msg_target = *target;
715         msg->msg_len = len;
716         msg->msg_offset = offset;
717
718         if (len != 0)
719                 lnet_setpayloadbuffer(msg);
720
721         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
722         msg->msg_hdr.type           = type;
723         /* dest_nid will be overwritten by lnet_select_pathway() */
724         msg->msg_hdr.dest_nid = target->nid;
725         msg->msg_hdr.dest_pid = target->pid;
726         /* src_nid will be set later */
727         msg->msg_hdr.src_pid        = the_lnet.ln_pid;
728         msg->msg_hdr.payload_length = len;
729 }
730
731 void
732 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
733 {
734         void *priv = msg->msg_private;
735         int rc;
736
737         LASSERT(!in_interrupt());
738         LASSERT(nid_is_lo0(&ni->ni_nid) ||
739                 (msg->msg_txcredit && msg->msg_peertxcredit));
740
741         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
742         if (rc < 0) {
743                 msg->msg_no_resend = true;
744                 lnet_finalize(msg, rc);
745         }
746 }
747
748 static int
749 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
750 {
751         int     rc;
752
753         LASSERT(!msg->msg_sending);
754         LASSERT(msg->msg_receiving);
755         LASSERT(!msg->msg_rx_ready_delay);
756         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
757
758         msg->msg_rx_ready_delay = 1;
759         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
760                                                   &msg->msg_private);
761         if (rc != 0) {
762                 CERROR("recv from %s / send to %s aborted: "
763                        "eager_recv failed %d\n",
764                        libcfs_nidstr(&msg->msg_rxpeer->lpni_nid),
765                        libcfs_idstr(&msg->msg_target), rc);
766                 LASSERT(rc < 0); /* required by my callers */
767         }
768
769         return rc;
770 }
771
772 static bool
773 lnet_is_peer_deadline_passed(struct lnet_peer_ni *lpni, time64_t now)
774 {
775         time64_t deadline;
776
777         deadline = lpni->lpni_last_alive +
778                    lpni->lpni_net->net_tunables.lct_peer_timeout;
779
780         /*
781          * assume peer_ni is alive as long as we're within the configured
782          * peer timeout
783          */
784         if (deadline > now)
785                 return false;
786
787         return true;
788 }
789
790 /* NB: returns 1 when alive, 0 when dead, negative when error;
791  *     may drop the lnet_net_lock */
792 static int
793 lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
794                        struct lnet_msg *msg)
795 {
796         time64_t now = ktime_get_seconds();
797
798         if (!lnet_peer_aliveness_enabled(lpni))
799                 return -ENODEV;
800
801         /*
802          * If we're resending a message, let's attempt to send it even if
803          * the peer is down to fulfill our resend quota on the message
804          */
805         if (msg->msg_retry_count > 0)
806                 return 1;
807
808         /* try and send recovery messages irregardless */
809         if (msg->msg_recovery)
810                 return 1;
811
812         /* always send any responses */
813         if (lnet_msg_is_response(msg))
814                 return 1;
815
816         if (!lnet_is_peer_deadline_passed(lpni, now))
817                 return true;
818
819         return lnet_is_peer_ni_alive(lpni);
820 }
821
822 /**
823  * \param msg The message to be sent.
824  * \param do_send True if lnet_ni_send() should be called in this function.
825  *        lnet_send() is going to lnet_net_unlock immediately after this, so
826  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
827  *
828  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
829  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
830  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
831  * \retval -ECANCELED If the MD of the message has been unlinked.
832  */
833 static int
834 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
835 {
836         struct lnet_peer_ni     *lp = msg->msg_txpeer;
837         struct lnet_ni          *ni = msg->msg_txni;
838         int                     cpt = msg->msg_tx_cpt;
839         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
840
841         /* non-lnet_send() callers have checked before */
842         LASSERT(!do_send || msg->msg_tx_delayed);
843         LASSERT(!msg->msg_receiving);
844         LASSERT(msg->msg_tx_committed);
845
846         /* can't get here if we're sending to the loopback interface */
847         if (the_lnet.ln_loni)
848                 LASSERT(!nid_same(&lp->lpni_nid, &the_lnet.ln_loni->ni_nid));
849
850         /* NB 'lp' is always the next hop */
851         if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
852             lnet_peer_alive_locked(ni, lp, msg) == 0) {
853                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
854                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
855                         msg->msg_len;
856                 lnet_net_unlock(cpt);
857                 if (msg->msg_txpeer)
858                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
859                                         msg->msg_type,
860                                         LNET_STATS_TYPE_DROP);
861                 if (msg->msg_txni)
862                         lnet_incr_stats(&msg->msg_txni->ni_stats,
863                                         msg->msg_type,
864                                         LNET_STATS_TYPE_DROP);
865
866                 CNETERR("Dropping message for %s: peer not alive\n",
867                         libcfs_idstr(&msg->msg_target));
868                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
869                 if (do_send)
870                         lnet_finalize(msg, -EHOSTUNREACH);
871
872                 lnet_net_lock(cpt);
873                 return -EHOSTUNREACH;
874         }
875
876         if (msg->msg_md != NULL &&
877             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
878                 lnet_net_unlock(cpt);
879
880                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
881                         "called on the MD/ME.\n",
882                         libcfs_idstr(&msg->msg_target));
883                 if (do_send) {
884                         msg->msg_no_resend = true;
885                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
886                                msg, libcfs_idstr(&msg->msg_target));
887                         lnet_finalize(msg, -ECANCELED);
888                 }
889
890                 lnet_net_lock(cpt);
891                 return -ECANCELED;
892         }
893
894         if (!msg->msg_peertxcredit) {
895                 spin_lock(&lp->lpni_lock);
896                 LASSERT((lp->lpni_txcredits < 0) ==
897                         !list_empty(&lp->lpni_txq));
898
899                 msg->msg_peertxcredit = 1;
900                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr_nid4);
901                 lp->lpni_txcredits--;
902
903                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
904                         lp->lpni_mintxcredits = lp->lpni_txcredits;
905
906                 if (lp->lpni_txcredits < 0) {
907                         msg->msg_tx_delayed = 1;
908                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
909                         spin_unlock(&lp->lpni_lock);
910                         return LNET_CREDIT_WAIT;
911                 }
912                 spin_unlock(&lp->lpni_lock);
913         }
914
915         if (!msg->msg_txcredit) {
916                 LASSERT((tq->tq_credits < 0) ==
917                         !list_empty(&tq->tq_delayed));
918
919                 msg->msg_txcredit = 1;
920                 tq->tq_credits--;
921                 atomic_dec(&ni->ni_tx_credits);
922
923                 if (tq->tq_credits < tq->tq_credits_min)
924                         tq->tq_credits_min = tq->tq_credits;
925
926                 if (tq->tq_credits < 0) {
927                         msg->msg_tx_delayed = 1;
928                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
929                         return LNET_CREDIT_WAIT;
930                 }
931         }
932
933         if (unlikely(!list_empty(&the_lnet.ln_delay_rules)) &&
934             lnet_delay_rule_match_locked(&msg->msg_hdr, msg)) {
935                 msg->msg_tx_delayed = 1;
936                 return LNET_CREDIT_WAIT;
937         }
938
939         /* unset the tx_delay flag as we're going to send it now */
940         msg->msg_tx_delayed = 0;
941
942         if (do_send) {
943                 lnet_net_unlock(cpt);
944                 lnet_ni_send(ni, msg);
945                 lnet_net_lock(cpt);
946         }
947         return LNET_CREDIT_OK;
948 }
949
950
951 static struct lnet_rtrbufpool *
952 lnet_msg2bufpool(struct lnet_msg *msg)
953 {
954         struct lnet_rtrbufpool  *rbp;
955         int                     cpt;
956
957         LASSERT(msg->msg_rx_committed);
958
959         cpt = msg->msg_rx_cpt;
960         rbp = &the_lnet.ln_rtrpools[cpt][0];
961
962         LASSERT(msg->msg_len <= LNET_MTU);
963         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
964                 rbp++;
965                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
966         }
967
968         return rbp;
969 }
970
971 static int
972 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
973 {
974         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
975          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
976          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
977          * received or OK to receive */
978         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
979         struct lnet_peer *lp;
980         struct lnet_rtrbufpool *rbp;
981         struct lnet_rtrbuf *rb;
982
983         LASSERT(msg->msg_kiov == NULL);
984         LASSERT(msg->msg_niov == 0);
985         LASSERT(msg->msg_routing);
986         LASSERT(msg->msg_receiving);
987         LASSERT(!msg->msg_sending);
988         LASSERT(lpni->lpni_peer_net);
989         LASSERT(lpni->lpni_peer_net->lpn_peer);
990
991         lp = lpni->lpni_peer_net->lpn_peer;
992
993         /* non-lnet_parse callers only receive delayed messages */
994         LASSERT(!do_recv || msg->msg_rx_delayed);
995
996         if (!msg->msg_peerrtrcredit) {
997                 /* lpni_lock protects the credit manipulation */
998                 spin_lock(&lpni->lpni_lock);
999
1000                 msg->msg_peerrtrcredit = 1;
1001                 lpni->lpni_rtrcredits--;
1002                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
1003                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
1004
1005                 if (lpni->lpni_rtrcredits < 0) {
1006                         spin_unlock(&lpni->lpni_lock);
1007                         /* must have checked eager_recv before here */
1008                         LASSERT(msg->msg_rx_ready_delay);
1009                         msg->msg_rx_delayed = 1;
1010                         /* lp_lock protects the lp_rtrq */
1011                         spin_lock(&lp->lp_lock);
1012                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1013                         spin_unlock(&lp->lp_lock);
1014                         return LNET_CREDIT_WAIT;
1015                 }
1016                 spin_unlock(&lpni->lpni_lock);
1017         }
1018
1019         rbp = lnet_msg2bufpool(msg);
1020
1021         if (!msg->msg_rtrcredit) {
1022                 msg->msg_rtrcredit = 1;
1023                 rbp->rbp_credits--;
1024                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1025                         rbp->rbp_mincredits = rbp->rbp_credits;
1026
1027                 if (rbp->rbp_credits < 0) {
1028                         /* must have checked eager_recv before here */
1029                         LASSERT(msg->msg_rx_ready_delay);
1030                         msg->msg_rx_delayed = 1;
1031                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1032                         return LNET_CREDIT_WAIT;
1033                 }
1034         }
1035
1036         LASSERT(!list_empty(&rbp->rbp_bufs));
1037         rb = list_first_entry(&rbp->rbp_bufs, struct lnet_rtrbuf, rb_list);
1038         list_del(&rb->rb_list);
1039
1040         msg->msg_niov = rbp->rbp_npages;
1041         msg->msg_kiov = &rb->rb_kiov[0];
1042
1043         /* unset the msg-rx_delayed flag since we're receiving the message */
1044         msg->msg_rx_delayed = 0;
1045
1046         if (do_recv) {
1047                 int cpt = msg->msg_rx_cpt;
1048
1049                 lnet_net_unlock(cpt);
1050                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1051                              0, msg->msg_len, msg->msg_len);
1052                 lnet_net_lock(cpt);
1053         }
1054         return LNET_CREDIT_OK;
1055 }
1056
1057 void
1058 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1059 {
1060         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1061         struct lnet_ni          *txni = msg->msg_txni;
1062         struct lnet_msg         *msg2;
1063
1064         if (msg->msg_txcredit) {
1065                 struct lnet_ni       *ni = msg->msg_txni;
1066                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1067
1068                 /* give back NI txcredits */
1069                 msg->msg_txcredit = 0;
1070
1071                 LASSERT((tq->tq_credits < 0) ==
1072                         !list_empty(&tq->tq_delayed));
1073
1074                 tq->tq_credits++;
1075                 atomic_inc(&ni->ni_tx_credits);
1076                 if (tq->tq_credits <= 0) {
1077                         msg2 = list_first_entry(&tq->tq_delayed,
1078                                                 struct lnet_msg, msg_list);
1079                         list_del(&msg2->msg_list);
1080
1081                         LASSERT(msg2->msg_txni == ni);
1082                         LASSERT(msg2->msg_tx_delayed);
1083                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1084
1085                         (void) lnet_post_send_locked(msg2, 1);
1086                 }
1087         }
1088
1089         if (msg->msg_peertxcredit) {
1090                 /* give back peer txcredits */
1091                 msg->msg_peertxcredit = 0;
1092
1093                 spin_lock(&txpeer->lpni_lock);
1094                 LASSERT((txpeer->lpni_txcredits < 0) ==
1095                         !list_empty(&txpeer->lpni_txq));
1096
1097                 txpeer->lpni_txqnob -=  msg->msg_len +
1098                                         sizeof(struct lnet_hdr_nid4);
1099                 LASSERT(txpeer->lpni_txqnob >= 0);
1100
1101                 txpeer->lpni_txcredits++;
1102                 if (txpeer->lpni_txcredits <= 0) {
1103                         int msg2_cpt;
1104
1105                         msg2 = list_first_entry(&txpeer->lpni_txq,
1106                                                 struct lnet_msg, msg_list);
1107                         list_del(&msg2->msg_list);
1108                         spin_unlock(&txpeer->lpni_lock);
1109
1110                         LASSERT(msg2->msg_txpeer == txpeer);
1111                         LASSERT(msg2->msg_tx_delayed);
1112
1113                         msg2_cpt = msg2->msg_tx_cpt;
1114
1115                         /*
1116                          * The msg_cpt can be different from the msg2_cpt
1117                          * so we need to make sure we lock the correct cpt
1118                          * for msg2.
1119                          * Once we call lnet_post_send_locked() it is no
1120                          * longer safe to access msg2, since it could've
1121                          * been freed by lnet_finalize(), but we still
1122                          * need to relock the correct cpt, so we cache the
1123                          * msg2_cpt for the purpose of the check that
1124                          * follows the call to lnet_pose_send_locked().
1125                          */
1126                         if (msg2_cpt != msg->msg_tx_cpt) {
1127                                 lnet_net_unlock(msg->msg_tx_cpt);
1128                                 lnet_net_lock(msg2_cpt);
1129                         }
1130                         (void) lnet_post_send_locked(msg2, 1);
1131                         if (msg2_cpt != msg->msg_tx_cpt) {
1132                                 lnet_net_unlock(msg2_cpt);
1133                                 lnet_net_lock(msg->msg_tx_cpt);
1134                         }
1135                 } else {
1136                         spin_unlock(&txpeer->lpni_lock);
1137                 }
1138         }
1139
1140         if (txni != NULL) {
1141                 msg->msg_txni = NULL;
1142                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1143         }
1144
1145         if (txpeer != NULL) {
1146                 msg->msg_txpeer = NULL;
1147                 lnet_peer_ni_decref_locked(txpeer);
1148         }
1149 }
1150
1151 void
1152 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1153 {
1154         struct lnet_msg *msg;
1155
1156         if (list_empty(&rbp->rbp_msgs))
1157                 return;
1158         msg = list_first_entry(&rbp->rbp_msgs,
1159                                struct lnet_msg, msg_list);
1160         list_del(&msg->msg_list);
1161
1162         (void)lnet_post_routed_recv_locked(msg, 1);
1163 }
1164
1165 void
1166 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1167 {
1168         struct lnet_msg *msg;
1169         struct lnet_msg *tmp;
1170
1171         lnet_net_unlock(cpt);
1172
1173         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1174                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1175                              0, 0, 0, msg->msg_hdr.payload_length);
1176                 list_del_init(&msg->msg_list);
1177                 msg->msg_no_resend = true;
1178                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1179                 lnet_finalize(msg, -ECANCELED);
1180         }
1181
1182         lnet_net_lock(cpt);
1183 }
1184
1185 void
1186 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1187 {
1188         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1189         struct lnet_peer *lp;
1190         struct lnet_ni *rxni = msg->msg_rxni;
1191         struct lnet_msg *msg2;
1192
1193         if (msg->msg_rtrcredit) {
1194                 /* give back global router credits */
1195                 struct lnet_rtrbuf *rb;
1196                 struct lnet_rtrbufpool *rbp;
1197
1198                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1199                  * there until it gets one allocated, or aborts the wait
1200                  * itself */
1201                 LASSERT(msg->msg_kiov != NULL);
1202
1203                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1204                 rbp = rb->rb_pool;
1205
1206                 msg->msg_kiov = NULL;
1207                 msg->msg_rtrcredit = 0;
1208
1209                 LASSERT(rbp == lnet_msg2bufpool(msg));
1210
1211                 LASSERT((rbp->rbp_credits > 0) ==
1212                         !list_empty(&rbp->rbp_bufs));
1213
1214                 /* If routing is now turned off, we just drop this buffer and
1215                  * don't bother trying to return credits.  */
1216                 if (!the_lnet.ln_routing) {
1217                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1218                         goto routing_off;
1219                 }
1220
1221                 /* It is possible that a user has lowered the desired number of
1222                  * buffers in this pool.  Make sure we never put back
1223                  * more buffers than the stated number. */
1224                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1225                         /* Discard this buffer so we don't have too
1226                          * many. */
1227                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1228                         rbp->rbp_nbuffers--;
1229                 } else {
1230                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1231                         rbp->rbp_credits++;
1232                         if (rbp->rbp_credits <= 0)
1233                                 lnet_schedule_blocked_locked(rbp);
1234                 }
1235         }
1236
1237 routing_off:
1238         if (msg->msg_peerrtrcredit) {
1239                 LASSERT(rxpeerni);
1240                 LASSERT(rxpeerni->lpni_peer_net);
1241                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1242
1243                 /* give back peer router credits */
1244                 msg->msg_peerrtrcredit = 0;
1245
1246                 spin_lock(&rxpeerni->lpni_lock);
1247                 rxpeerni->lpni_rtrcredits++;
1248                 spin_unlock(&rxpeerni->lpni_lock);
1249
1250                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1251                 spin_lock(&lp->lp_lock);
1252
1253                 /* drop all messages which are queued to be routed on that
1254                  * peer. */
1255                 if (!the_lnet.ln_routing) {
1256                         LIST_HEAD(drop);
1257                         list_splice_init(&lp->lp_rtrq, &drop);
1258                         spin_unlock(&lp->lp_lock);
1259                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1260                 } else if (!list_empty(&lp->lp_rtrq)) {
1261                         int msg2_cpt;
1262
1263                         msg2 = list_first_entry(&lp->lp_rtrq,
1264                                                 struct lnet_msg, msg_list);
1265                         list_del(&msg2->msg_list);
1266                         msg2_cpt = msg2->msg_rx_cpt;
1267                         spin_unlock(&lp->lp_lock);
1268                         /*
1269                          * messages on the lp_rtrq can be from any NID in
1270                          * the peer, which means they might have different
1271                          * cpts. We need to make sure we lock the right
1272                          * one.
1273                          */
1274                         if (msg2_cpt != msg->msg_rx_cpt) {
1275                                 lnet_net_unlock(msg->msg_rx_cpt);
1276                                 lnet_net_lock(msg2_cpt);
1277                         }
1278                         (void) lnet_post_routed_recv_locked(msg2, 1);
1279                         if (msg2_cpt != msg->msg_rx_cpt) {
1280                                 lnet_net_unlock(msg2_cpt);
1281                                 lnet_net_lock(msg->msg_rx_cpt);
1282                         }
1283                 } else {
1284                         spin_unlock(&lp->lp_lock);
1285                 }
1286         }
1287         if (rxni != NULL) {
1288                 msg->msg_rxni = NULL;
1289                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1290         }
1291         if (rxpeerni != NULL) {
1292                 msg->msg_rxpeer = NULL;
1293                 lnet_peer_ni_decref_locked(rxpeerni);
1294         }
1295 }
1296
1297 static struct lnet_peer_ni *
1298 lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid,
1299                     struct lnet_peer *peer,
1300                     struct lnet_peer_ni *best_lpni,
1301                     struct lnet_peer_net *peer_net)
1302 {
1303         /*
1304          * Look at the peer NIs for the destination peer that connect
1305          * to the chosen net. If a peer_ni is preferred when using the
1306          * best_ni to communicate, we use that one. If there is no
1307          * preferred peer_ni, or there are multiple preferred peer_ni,
1308          * the available transmit credits are used. If the transmit
1309          * credits are equal, we round-robin over the peer_ni.
1310          */
1311         struct lnet_peer_ni *lpni = NULL;
1312         int best_lpni_credits = (best_lpni) ? best_lpni->lpni_txcredits :
1313                 INT_MIN;
1314         int best_lpni_healthv = (best_lpni) ?
1315                 atomic_read(&best_lpni->lpni_healthv) : 0;
1316         bool best_lpni_is_preferred = false;
1317         bool lpni_is_preferred;
1318         int lpni_healthv;
1319         __u32 lpni_sel_prio;
1320         __u32 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1321
1322         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1323                 /*
1324                  * if the best_ni we've chosen aleady has this lpni
1325                  * preferred, then let's use it
1326                  */
1327                 if (best_ni) {
1328                         lpni_is_preferred = lnet_peer_is_pref_nid_locked(
1329                                 lpni, &best_ni->ni_nid);
1330                         CDEBUG(D_NET, "%s lpni_is_preferred = %d\n",
1331                                libcfs_nidstr(&best_ni->ni_nid),
1332                                lpni_is_preferred);
1333                 } else {
1334                         lpni_is_preferred = false;
1335                 }
1336
1337                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1338                 lpni_sel_prio = lpni->lpni_sel_priority;
1339
1340                 if (best_lpni)
1341                         CDEBUG(D_NET, "n:[%s, %s] h:[%d, %d] p:[%d, %d] c:[%d, %d] s:[%d, %d]\n",
1342                                 libcfs_nidstr(&lpni->lpni_nid),
1343                                 libcfs_nidstr(&best_lpni->lpni_nid),
1344                                 lpni_healthv, best_lpni_healthv,
1345                                 lpni_sel_prio, best_sel_prio,
1346                                 lpni->lpni_txcredits, best_lpni_credits,
1347                                 lpni->lpni_seq, best_lpni->lpni_seq);
1348                 else
1349                         goto select_lpni;
1350
1351                 /* pick the healthiest peer ni */
1352                 if (lpni_healthv < best_lpni_healthv)
1353                         continue;
1354                 else if (lpni_healthv > best_lpni_healthv) {
1355                         if (best_lpni_is_preferred)
1356                                 best_lpni_is_preferred = false;
1357                         goto select_lpni;
1358                 }
1359
1360                 if (lpni_sel_prio > best_sel_prio)
1361                         continue;
1362                 else if (lpni_sel_prio < best_sel_prio) {
1363                         if (best_lpni_is_preferred)
1364                                 best_lpni_is_preferred = false;
1365                         goto select_lpni;
1366                 }
1367
1368                 /* if this is a preferred peer use it */
1369                 if (!best_lpni_is_preferred && lpni_is_preferred) {
1370                         best_lpni_is_preferred = true;
1371                         goto select_lpni;
1372                 } else if (best_lpni_is_preferred && !lpni_is_preferred) {
1373                         /* this is not the preferred peer so let's ignore
1374                          * it.
1375                          */
1376                         continue;
1377                 }
1378
1379                 if (lpni->lpni_txcredits < best_lpni_credits)
1380                         /* We already have a peer that has more credits
1381                          * available than this one. No need to consider
1382                          * this peer further.
1383                          */
1384                         continue;
1385                 else if (lpni->lpni_txcredits > best_lpni_credits)
1386                         goto select_lpni;
1387
1388                 /* The best peer found so far and the current peer
1389                  * have the same number of available credits let's
1390                  * make sure to select between them using Round Robin
1391                  */
1392                 if (best_lpni && (best_lpni->lpni_seq <= lpni->lpni_seq))
1393                         continue;
1394 select_lpni:
1395                 best_lpni_is_preferred = lpni_is_preferred;
1396                 best_lpni_healthv = lpni_healthv;
1397                 best_sel_prio = lpni_sel_prio;
1398                 best_lpni = lpni;
1399                 best_lpni_credits = lpni->lpni_txcredits;
1400         }
1401
1402         /* if we still can't find a peer ni then we can't reach it */
1403         if (!best_lpni) {
1404                 __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1405                         LNET_NIDNET(dst_nid);
1406                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1407                                 libcfs_net2str(net_id));
1408                 return NULL;
1409         }
1410
1411         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1412                libcfs_nidstr(&best_lpni->lpni_nid));
1413
1414         return best_lpni;
1415 }
1416
1417 /*
1418  * Prerequisite: the best_ni should already be set in the sd
1419  * Find the best lpni.
1420  * If the net id is provided then restrict lpni selection on
1421  * that particular net.
1422  * Otherwise find any reachable lpni. When dealing with an MR
1423  * gateway and it has multiple lpnis which we can use
1424  * we want to select the best one from the list of reachable
1425  * ones.
1426  */
1427 static inline struct lnet_peer_ni *
1428 lnet_find_best_lpni(struct lnet_ni *lni, lnet_nid_t dst_nid,
1429                     struct lnet_peer *peer, __u32 net_id)
1430 {
1431         struct lnet_peer_net *peer_net;
1432
1433         /* find the best_lpni on any local network */
1434         if (net_id == LNET_NET_ANY) {
1435                 struct lnet_peer_ni *best_lpni = NULL;
1436                 struct lnet_peer_net *lpn;
1437                 list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
1438                         /* no net specified find any reachable peer ni */
1439                         if (!lnet_islocalnet_locked(lpn->lpn_net_id))
1440                                 continue;
1441                         best_lpni = lnet_select_peer_ni(lni, dst_nid, peer,
1442                                                         best_lpni, lpn);
1443                 }
1444
1445                 return best_lpni;
1446         }
1447         /* restrict on the specified net */
1448         peer_net = lnet_peer_get_net_locked(peer, net_id);
1449         if (peer_net)
1450                 return lnet_select_peer_ni(lni, dst_nid, peer, NULL, peer_net);
1451
1452         return NULL;
1453 }
1454
1455 static int
1456 lnet_compare_gw_lpnis(struct lnet_peer_ni *lpni1, struct lnet_peer_ni *lpni2)
1457 {
1458         if (lpni1->lpni_txqnob < lpni2->lpni_txqnob)
1459                 return 1;
1460
1461         if (lpni1->lpni_txqnob > lpni2->lpni_txqnob)
1462                 return -1;
1463
1464         if (lpni1->lpni_txcredits > lpni2->lpni_txcredits)
1465                 return 1;
1466
1467         if (lpni1->lpni_txcredits < lpni2->lpni_txcredits)
1468                 return -1;
1469
1470         return 0;
1471 }
1472
1473 /* Compare route priorities and hop counts */
1474 static int
1475 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
1476 {
1477         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1478         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1479
1480         if (r1->lr_priority < r2->lr_priority)
1481                 return 1;
1482
1483         if (r1->lr_priority > r2->lr_priority)
1484                 return -1;
1485
1486         if (r1_hops < r2_hops)
1487                 return 1;
1488
1489         if (r1_hops > r2_hops)
1490                 return -1;
1491
1492         return 0;
1493 }
1494
1495 static struct lnet_route *
1496 lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net,
1497                        struct lnet_peer_ni *remote_lpni,
1498                        struct lnet_route **prev_route,
1499                        struct lnet_peer_ni **gwni)
1500 {
1501         struct lnet_peer_ni *lpni, *best_gw_ni = NULL;
1502         struct lnet_route *best_route;
1503         struct lnet_route *last_route;
1504         struct lnet_route *route;
1505         int rc;
1506         bool best_rte_is_preferred = false;
1507         struct lnet_nid *gw_pnid;
1508
1509         CDEBUG(D_NET, "Looking up a route to %s, from %s\n",
1510                libcfs_net2str(rnet->lrn_net), libcfs_net2str(src_net));
1511
1512         best_route = last_route = NULL;
1513         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1514                 if (!lnet_is_route_alive(route))
1515                         continue;
1516                 gw_pnid = &route->lr_gateway->lp_primary_nid;
1517
1518                 /* no protection on below fields, but it's harmless */
1519                 if (last_route && (last_route->lr_seq - route->lr_seq < 0))
1520                         last_route = route;
1521
1522                 /* if the best route found is in the preferred list then
1523                  * tag it as preferred and use it later on. But if we
1524                  * didn't find any routes which are on the preferred list
1525                  * then just use the best route possible.
1526                  */
1527                 rc = lnet_peer_is_pref_rtr_locked(remote_lpni, gw_pnid);
1528
1529                 if (!best_route || (rc && !best_rte_is_preferred)) {
1530                         /* Restrict the selection of the router NI on the
1531                          * src_net provided. If the src_net is LNET_NID_ANY,
1532                          * then select the best interface available.
1533                          */
1534                         lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1535                                                    route->lr_gateway,
1536                                                    src_net);
1537                         if (!lpni) {
1538                                 CDEBUG(D_NET,
1539                                        "Gateway %s does not have a peer NI on net %s\n",
1540                                        libcfs_nidstr(gw_pnid),
1541                                        libcfs_net2str(src_net));
1542                                 continue;
1543                         }
1544                 }
1545
1546                 if (rc && !best_rte_is_preferred) {
1547                         /* This is the first preferred route we found,
1548                          * so it beats any route found previously
1549                          */
1550                         best_route = route;
1551                         if (!last_route)
1552                                 last_route = route;
1553                         best_gw_ni = lpni;
1554                         best_rte_is_preferred = true;
1555                         CDEBUG(D_NET, "preferred gw = %s\n",
1556                                libcfs_nidstr(gw_pnid));
1557                         continue;
1558                 } else if ((!rc) && best_rte_is_preferred)
1559                         /* The best route we found so far is in the preferred
1560                          * list, so it beats any non-preferred route
1561                          */
1562                         continue;
1563
1564                 if (!best_route) {
1565                         best_route = last_route = route;
1566                         best_gw_ni = lpni;
1567                         continue;
1568                 }
1569
1570                 rc = lnet_compare_routes(route, best_route);
1571                 if (rc == -1)
1572                         continue;
1573
1574                 /* Restrict the selection of the router NI on the
1575                  * src_net provided. If the src_net is LNET_NID_ANY,
1576                  * then select the best interface available.
1577                  */
1578                 lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1579                                            route->lr_gateway,
1580                                            src_net);
1581                 if (!lpni) {
1582                         CDEBUG(D_NET,
1583                                "Gateway %s does not have a peer NI on net %s\n",
1584                                libcfs_nidstr(gw_pnid),
1585                                libcfs_net2str(src_net));
1586                         continue;
1587                 }
1588
1589                 if (rc == 1) {
1590                         best_route = route;
1591                         best_gw_ni = lpni;
1592                         continue;
1593                 }
1594
1595                 rc = lnet_compare_gw_lpnis(lpni, best_gw_ni);
1596                 if (rc == -1)
1597                         continue;
1598
1599                 if (rc == 1 || route->lr_seq <= best_route->lr_seq) {
1600                         best_route = route;
1601                         best_gw_ni = lpni;
1602                         continue;
1603                 }
1604         }
1605
1606         *prev_route = last_route;
1607         *gwni = best_gw_ni;
1608
1609         return best_route;
1610 }
1611
1612 static inline unsigned int
1613 lnet_dev_prio_of_md(struct lnet_ni *ni, unsigned int dev_idx)
1614 {
1615         if (dev_idx == UINT_MAX)
1616                 return UINT_MAX;
1617
1618         if (!ni || !ni->ni_net || !ni->ni_net->net_lnd ||
1619             !ni->ni_net->net_lnd->lnd_get_dev_prio)
1620                 return UINT_MAX;
1621
1622         return ni->ni_net->net_lnd->lnd_get_dev_prio(ni, dev_idx);
1623 }
1624
1625 static struct lnet_ni *
1626 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1627                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1628                  struct lnet_msg *msg, int md_cpt)
1629 {
1630         struct lnet_libmd *md = msg->msg_md;
1631         unsigned int offset = msg->msg_offset;
1632         unsigned int shortest_distance;
1633         struct lnet_ni *ni = NULL;
1634         int best_credits;
1635         int best_healthv;
1636         __u32 best_sel_prio;
1637         unsigned int best_dev_prio;
1638         unsigned int dev_idx = UINT_MAX;
1639         bool gpu = md ? (md->md_flags & LNET_MD_FLAG_GPU) : false;
1640
1641         if (gpu) {
1642                 struct page *page = lnet_get_first_page(md, offset);
1643
1644                 dev_idx = lnet_get_dev_idx(page);
1645         }
1646
1647         /*
1648          * If there is no peer_ni that we can send to on this network,
1649          * then there is no point in looking for a new best_ni here.
1650         */
1651         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1652                 return best_ni;
1653
1654         if (best_ni == NULL) {
1655                 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1656                 shortest_distance = UINT_MAX;
1657                 best_dev_prio = UINT_MAX;
1658                 best_credits = INT_MIN;
1659                 best_healthv = 0;
1660         } else {
1661                 best_dev_prio = lnet_dev_prio_of_md(best_ni, dev_idx);
1662                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1663                                                      best_ni->ni_dev_cpt);
1664                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1665                 best_healthv = atomic_read(&best_ni->ni_healthv);
1666                 best_sel_prio = best_ni->ni_sel_priority;
1667         }
1668
1669         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1670                 unsigned int distance;
1671                 int ni_credits;
1672                 int ni_healthv;
1673                 int ni_fatal;
1674                 __u32 ni_sel_prio;
1675                 unsigned int ni_dev_prio;
1676
1677                 ni_credits = atomic_read(&ni->ni_tx_credits);
1678                 ni_healthv = atomic_read(&ni->ni_healthv);
1679                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1680                 ni_sel_prio = ni->ni_sel_priority;
1681
1682                 /*
1683                  * calculate the distance from the CPT on which
1684                  * the message memory is allocated to the CPT of
1685                  * the NI's physical device
1686                  */
1687                 distance = cfs_cpt_distance(lnet_cpt_table(),
1688                                             md_cpt,
1689                                             ni->ni_dev_cpt);
1690
1691                 ni_dev_prio = lnet_dev_prio_of_md(ni, dev_idx);
1692
1693                 /*
1694                  * All distances smaller than the NUMA range
1695                  * are treated equally.
1696                  */
1697                 if (!gpu && distance < lnet_numa_range)
1698                         distance = lnet_numa_range;
1699
1700                 /*
1701                  * Select on health, selection policy, direct dma prio,
1702                  * shorter distance, available credits, then round-robin.
1703                  */
1704                 if (ni_fatal)
1705                         continue;
1706
1707                 if (best_ni)
1708                         CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d, p:%u, g:%u, h:%d] with best_ni %s [c:%d, d:%d, s:%d, p:%u, g:%u, h:%d]\n",
1709                                libcfs_nidstr(&ni->ni_nid), ni_credits, distance,
1710                                ni->ni_seq, ni_sel_prio, ni_dev_prio, ni_healthv,
1711                                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid)
1712                                : "not selected", best_credits, shortest_distance,
1713                                (best_ni) ? best_ni->ni_seq : 0,
1714                                best_sel_prio, best_dev_prio, best_healthv);
1715                 else
1716                         goto select_ni;
1717
1718                 if (ni_healthv < best_healthv)
1719                         continue;
1720                 else if (ni_healthv > best_healthv)
1721                         goto select_ni;
1722
1723                 if (ni_sel_prio > best_sel_prio)
1724                         continue;
1725                 else if (ni_sel_prio < best_sel_prio)
1726                         goto select_ni;
1727
1728                 if (ni_dev_prio > best_dev_prio)
1729                         continue;
1730                 else if (ni_dev_prio < best_dev_prio)
1731                         goto select_ni;
1732
1733                 if (distance > shortest_distance)
1734                         continue;
1735                 else if (distance < shortest_distance)
1736                         goto select_ni;
1737
1738                 if (ni_credits < best_credits)
1739                         continue;
1740                 else if (ni_credits > best_credits)
1741                         goto select_ni;
1742
1743                 if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1744                         continue;
1745
1746 select_ni:
1747                 best_sel_prio = ni_sel_prio;
1748                 best_dev_prio = ni_dev_prio;
1749                 shortest_distance = distance;
1750                 best_healthv = ni_healthv;
1751                 best_ni = ni;
1752                 best_credits = ni_credits;
1753         }
1754
1755         CDEBUG(D_NET, "selected best_ni %s\n",
1756                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid) : "no selection");
1757
1758         return best_ni;
1759 }
1760
1761 static bool
1762 lnet_reserved_msg(struct lnet_msg *msg)
1763 {
1764         if (msg->msg_type == LNET_MSG_PUT) {
1765                 if (msg->msg_hdr.msg.put.ptl_index == LNET_RESERVED_PORTAL)
1766                         return true;
1767         } else if (msg->msg_type == LNET_MSG_GET) {
1768                 if (msg->msg_hdr.msg.get.ptl_index == LNET_RESERVED_PORTAL)
1769                         return true;
1770         }
1771         return false;
1772 }
1773
1774 /*
1775  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1776  * because such traffic is required to perform discovery. We therefore
1777  * exclude all GET and PUT on that portal. We also exclude all ACK and
1778  * REPLY traffic, but that is because the portal is not tracked in the
1779  * message structure for these message types. We could restrict this
1780  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1781  */
1782 static bool
1783 lnet_msg_discovery(struct lnet_msg *msg)
1784 {
1785         return !(lnet_reserved_msg(msg) || lnet_msg_is_response(msg));
1786 }
1787
1788 #define SRC_SPEC        0x0001
1789 #define SRC_ANY         0x0002
1790 #define LOCAL_DST       0x0004
1791 #define REMOTE_DST      0x0008
1792 #define MR_DST          0x0010
1793 #define NMR_DST         0x0020
1794 #define SND_RESP        0x0040
1795
1796 /* The following to defines are used for return codes */
1797 #define REPEAT_SEND     0x1000
1798 #define PASS_THROUGH    0x2000
1799
1800 /* The different cases lnet_select pathway needs to handle */
1801 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1802 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1803 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1804 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1805 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1806 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1807 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1808 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1809
1810 static int
1811 lnet_handle_lo_send(struct lnet_send_data *sd)
1812 {
1813         struct lnet_msg *msg = sd->sd_msg;
1814         int cpt = sd->sd_cpt;
1815
1816         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1817                 return -ESHUTDOWN;
1818
1819         /* No send credit hassles with LOLND */
1820         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1821         msg->msg_hdr.dest_nid = the_lnet.ln_loni->ni_nid;
1822         if (!msg->msg_routing)
1823                 msg->msg_hdr.src_nid = the_lnet.ln_loni->ni_nid;
1824         msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
1825         lnet_msg_commit(msg, cpt);
1826         msg->msg_txni = the_lnet.ln_loni;
1827
1828         return LNET_CREDIT_OK;
1829 }
1830
1831 static int
1832 lnet_handle_send(struct lnet_send_data *sd)
1833 {
1834         struct lnet_ni *best_ni = sd->sd_best_ni;
1835         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1836         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1837         struct lnet_msg *msg = sd->sd_msg;
1838         int cpt2;
1839         __u32 send_case = sd->sd_send_case;
1840         int rc;
1841         __u32 routing = send_case & REMOTE_DST;
1842         struct lnet_rsp_tracker *rspt;
1843
1844         /* Increment sequence number of the selected peer, peer net,
1845          * local ni and local net so that we pick the next ones
1846          * in Round Robin.
1847          */
1848         best_lpni->lpni_peer_net->lpn_peer->lp_send_seq++;
1849         best_lpni->lpni_peer_net->lpn_seq =
1850                 best_lpni->lpni_peer_net->lpn_peer->lp_send_seq;
1851         best_lpni->lpni_seq = best_lpni->lpni_peer_net->lpn_seq;
1852         the_lnet.ln_net_seq++;
1853         best_ni->ni_net->net_seq = the_lnet.ln_net_seq;
1854         best_ni->ni_seq = best_ni->ni_net->net_seq;
1855
1856         CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n",
1857                libcfs_nidstr(&best_ni->ni_nid),
1858                best_ni->ni_seq, best_ni->ni_net->net_seq,
1859                atomic_read(&best_ni->ni_tx_credits),
1860                best_ni->ni_sel_priority,
1861                libcfs_nidstr(&best_lpni->lpni_nid),
1862                best_lpni->lpni_seq, best_lpni->lpni_peer_net->lpn_seq,
1863                best_lpni->lpni_txcredits,
1864                best_lpni->lpni_sel_priority);
1865
1866         /*
1867          * grab a reference on the peer_ni so it sticks around even if
1868          * we need to drop and relock the lnet_net_lock below.
1869          */
1870         lnet_peer_ni_addref_locked(best_lpni);
1871
1872         /*
1873          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1874          * message. This ensures that we get a CPT that is correct for
1875          * the NI when the NI has been restricted to a subset of all CPTs.
1876          * If the selected CPT differs from the one currently locked, we
1877          * must unlock and relock the lnet_net_lock(), and then check whether
1878          * the configuration has changed. We don't have a hold on the best_ni
1879          * yet, and it may have vanished.
1880          */
1881         cpt2 = lnet_cpt_of_nid_locked(&best_lpni->lpni_nid, best_ni);
1882         if (sd->sd_cpt != cpt2) {
1883                 __u32 seq = lnet_get_dlc_seq_locked();
1884                 lnet_net_unlock(sd->sd_cpt);
1885                 sd->sd_cpt = cpt2;
1886                 lnet_net_lock(sd->sd_cpt);
1887                 if (seq != lnet_get_dlc_seq_locked()) {
1888                         lnet_peer_ni_decref_locked(best_lpni);
1889                         return REPEAT_SEND;
1890                 }
1891         }
1892
1893         /*
1894          * store the best_lpni in the message right away to avoid having
1895          * to do the same operation under different conditions
1896          */
1897         msg->msg_txpeer = best_lpni;
1898         msg->msg_txni = best_ni;
1899
1900         /*
1901          * grab a reference for the best_ni since now it's in use in this
1902          * send. The reference will be dropped in lnet_finalize()
1903          */
1904         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1905
1906         /*
1907          * Always set the target.nid to the best peer picked. Either the
1908          * NID will be one of the peer NIDs selected, or the same NID as
1909          * what was originally set in the target or it will be the NID of
1910          * a router if this message should be routed
1911          */
1912         msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1913
1914         /*
1915          * lnet_msg_commit assigns the correct cpt to the message, which
1916          * is used to decrement the correct refcount on the ni when it's
1917          * time to return the credits
1918          */
1919         lnet_msg_commit(msg, sd->sd_cpt);
1920
1921         /*
1922          * If we are routing the message then we keep the src_nid that was
1923          * set by the originator. If we are not routing then we are the
1924          * originator and set it here.
1925          */
1926         if (!msg->msg_routing)
1927                 msg->msg_hdr.src_nid = msg->msg_txni->ni_nid;
1928
1929         if (routing) {
1930                 msg->msg_target_is_router = 1;
1931                 msg->msg_target.pid = LNET_PID_LUSTRE;
1932                 /*
1933                  * since we're routing we want to ensure that the
1934                  * msg_hdr.dest_nid is set to the final destination. When
1935                  * the router receives this message it knows how to route
1936                  * it.
1937                  *
1938                  * final_dst_lpni is set at the beginning of the
1939                  * lnet_select_pathway() function and is never changed.
1940                  * It's safe to use it here.
1941                  */
1942                 final_dst_lpni->lpni_peer_net->lpn_peer->lp_send_seq++;
1943                 final_dst_lpni->lpni_peer_net->lpn_seq =
1944                         final_dst_lpni->lpni_peer_net->lpn_peer->lp_send_seq;
1945                 final_dst_lpni->lpni_seq =
1946                         final_dst_lpni->lpni_peer_net->lpn_seq;
1947                 msg->msg_hdr.dest_nid = final_dst_lpni->lpni_nid;
1948         } else {
1949                 /*
1950                  * if we're not routing set the dest_nid to the best peer
1951                  * ni NID that we picked earlier in the algorithm.
1952                  */
1953                 msg->msg_hdr.dest_nid = msg->msg_txpeer->lpni_nid;
1954         }
1955
1956         /*
1957          * if we have response tracker block update it with the next hop
1958          * nid
1959          */
1960         if (msg->msg_md) {
1961                 rspt = msg->msg_md->md_rspt_ptr;
1962                 if (rspt) {
1963                         rspt->rspt_next_hop_nid =
1964                                 msg->msg_txpeer->lpni_nid;
1965                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1966                                libcfs_nidstr(&rspt->rspt_next_hop_nid));
1967                 }
1968         }
1969
1970         rc = lnet_post_send_locked(msg, 0);
1971
1972         if (!rc)
1973                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
1974                        libcfs_nidstr(&msg->msg_hdr.src_nid),
1975                        libcfs_nidstr(&msg->msg_txni->ni_nid),
1976                        libcfs_nidstr(&sd->sd_src_nid),
1977                        libcfs_nidstr(&msg->msg_hdr.dest_nid),
1978                        libcfs_nidstr(&sd->sd_dst_nid),
1979                        libcfs_nidstr(&msg->msg_txpeer->lpni_nid),
1980                        libcfs_nidstr(&sd->sd_rtr_nid),
1981                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
1982
1983         return rc;
1984 }
1985
1986 static inline void
1987 lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni,
1988                          struct lnet_msg *msg)
1989 {
1990         if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) &&
1991             !lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) {
1992                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
1993                        libcfs_nidstr(&lni->ni_nid),
1994                        libcfs_nidstr(&lpni->lpni_nid));
1995                 lnet_peer_ni_set_non_mr_pref_nid(lpni, &lni->ni_nid);
1996         }
1997 }
1998
1999 /*
2000  * Source Specified
2001  * Local Destination
2002  * non-mr peer
2003  *
2004  * use the source and destination NIDs as the pathway
2005  */
2006 static int
2007 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
2008 {
2009         /* the destination lpni is set before we get here. */
2010
2011         /* find local NI */
2012         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2013         if (!sd->sd_best_ni) {
2014                 CERROR("Can't send to %s: src %s is not a local nid\n",
2015                        libcfs_nidstr(&sd->sd_dst_nid),
2016                        libcfs_nidstr(&sd->sd_src_nid));
2017                 return -EINVAL;
2018         }
2019
2020         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2021
2022         return lnet_handle_send(sd);
2023 }
2024
2025 /*
2026  * Source Specified
2027  * Local Destination
2028  * MR Peer
2029  *
2030  * Don't run the selection algorithm on the peer NIs. By specifying the
2031  * local NID, we're also saying that we should always use the destination NID
2032  * provided. This handles the case where we should be using the same
2033  * destination NID for the all the messages which belong to the same RPC
2034  * request.
2035  */
2036 static int
2037 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
2038 {
2039         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2040         if (!sd->sd_best_ni) {
2041                 CERROR("Can't send to %s: src %s is not a local nid\n",
2042                        libcfs_nidstr(&sd->sd_dst_nid),
2043                        libcfs_nidstr(&sd->sd_src_nid));
2044                 return -EINVAL;
2045         }
2046
2047         if (sd->sd_best_lpni &&
2048             nid_same(&sd->sd_best_lpni->lpni_nid,
2049                       &the_lnet.ln_loni->ni_nid))
2050                 return lnet_handle_lo_send(sd);
2051         else if (sd->sd_best_lpni)
2052                 return lnet_handle_send(sd);
2053
2054         CERROR("can't send to %s. no NI on %s\n",
2055                libcfs_nidstr(&sd->sd_dst_nid),
2056                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
2057
2058         return -EHOSTUNREACH;
2059 }
2060
2061 struct lnet_ni *
2062 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
2063                               struct lnet_peer *peer,
2064                               struct lnet_peer_net *peer_net,
2065                               struct lnet_msg *msg,
2066                               int cpt)
2067 {
2068         struct lnet_net *local_net;
2069         struct lnet_ni *best_ni;
2070
2071         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
2072         if (!local_net)
2073                 return NULL;
2074
2075         /*
2076          * Iterate through the NIs in this local Net and select
2077          * the NI to send from. The selection is determined by
2078          * these 3 criterion in the following priority:
2079          *      1. NUMA
2080          *      2. NI available credits
2081          *      3. Round Robin
2082          */
2083         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
2084                                    peer, peer_net, msg, cpt);
2085
2086         return best_ni;
2087 }
2088
2089 static int
2090 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg,
2091                              int cpt)
2092 {
2093         struct lnet_peer *peer;
2094         struct lnet_peer_ni *new_lpni;
2095         int rc;
2096
2097         lnet_peer_ni_addref_locked(lpni);
2098
2099         peer = lpni->lpni_peer_net->lpn_peer;
2100
2101         if (lnet_peer_gw_discovery(peer)) {
2102                 lnet_peer_ni_decref_locked(lpni);
2103                 return 0;
2104         }
2105
2106         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
2107                 lnet_peer_ni_decref_locked(lpni);
2108                 return 0;
2109         }
2110
2111         rc = lnet_discover_peer_locked(lpni, cpt, false);
2112         if (rc) {
2113                 lnet_peer_ni_decref_locked(lpni);
2114                 return rc;
2115         }
2116
2117         new_lpni = lnet_peer_ni_find_locked(&lpni->lpni_nid);
2118         if (!new_lpni) {
2119                 lnet_peer_ni_decref_locked(lpni);
2120                 return -ENOENT;
2121         }
2122
2123         peer = new_lpni->lpni_peer_net->lpn_peer;
2124         spin_lock(&peer->lp_lock);
2125         if (lpni == new_lpni && lnet_peer_is_uptodate_locked(peer)) {
2126                 /* The peer NI did not change and the peer is up to date.
2127                  * Nothing more to do.
2128                  */
2129                 spin_unlock(&peer->lp_lock);
2130                 lnet_peer_ni_decref_locked(lpni);
2131                 lnet_peer_ni_decref_locked(new_lpni);
2132                 return 0;
2133         }
2134         spin_unlock(&peer->lp_lock);
2135
2136         /* Either the peer NI changed during discovery, or the peer isn't up
2137          * to date. In both cases we want to queue the message on the
2138          * (possibly new) peer's pending queue and queue the peer for discovery
2139          */
2140         msg->msg_sending = 0;
2141         msg->msg_txpeer = NULL;
2142         lnet_net_unlock(cpt);
2143         lnet_peer_queue_message(peer, msg);
2144         lnet_net_lock(cpt);
2145
2146         lnet_peer_ni_decref_locked(lpni);
2147         lnet_peer_ni_decref_locked(new_lpni);
2148
2149         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2150                msg, libcfs_nidstr(&peer->lp_primary_nid));
2151
2152         return LNET_DC_WAIT;
2153 }
2154
2155 static int
2156 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2157                              struct lnet_nid *dst_nid,
2158                              struct lnet_peer_ni **gw_lpni,
2159                              struct lnet_peer **gw_peer)
2160 {
2161         int rc;
2162         struct lnet_peer *gw;
2163         struct lnet_peer *lp;
2164         struct lnet_peer_net *lpn;
2165         struct lnet_peer_net *best_lpn = NULL;
2166         struct lnet_remotenet *rnet, *best_rnet = NULL;
2167         struct lnet_route *best_route = NULL;
2168         struct lnet_route *last_route = NULL;
2169         struct lnet_peer_ni *lpni = NULL;
2170         struct lnet_peer_ni *gwni = NULL;
2171         bool route_found = false;
2172         struct lnet_nid *src_nid =
2173                 !LNET_NID_IS_ANY(&sd->sd_src_nid) || !sd->sd_best_ni
2174                 ? &sd->sd_src_nid
2175                 : &sd->sd_best_ni->ni_nid;
2176         int best_lpn_healthv = 0;
2177         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2178
2179         CDEBUG(D_NET, "%s route (%s) from local NI %s to destination %s\n",
2180                LNET_NID_IS_ANY(&sd->sd_rtr_nid) ? "Lookup" : "Specified",
2181                libcfs_nidstr(&sd->sd_rtr_nid), libcfs_nidstr(src_nid),
2182                libcfs_nidstr(&sd->sd_dst_nid));
2183
2184         /* If a router nid was specified then we are replying to a GET or
2185          * sending an ACK. In this case we use the gateway associated with the
2186          * specified router nid.
2187          */
2188         if (!LNET_NID_IS_ANY(&sd->sd_rtr_nid)) {
2189                 gwni = lnet_peer_ni_find_locked(&sd->sd_rtr_nid);
2190                 if (gwni) {
2191                         gw = gwni->lpni_peer_net->lpn_peer;
2192                         lnet_peer_ni_decref_locked(gwni);
2193                         if (gw->lp_rtr_refcount)
2194                                 route_found = true;
2195                 } else {
2196                         CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n",
2197                               libcfs_nidstr(&sd->sd_rtr_nid));
2198                 }
2199         }
2200
2201         if (!route_found) {
2202                 if (sd->sd_msg->msg_routing || !LNET_NID_IS_ANY(src_nid)) {
2203                         /* If I'm routing this message then I need to find the
2204                          * next hop based on the destination NID
2205                          *
2206                          * We also find next hop based on the destination NID
2207                          * if the source NI was specified
2208                          */
2209                         best_rnet = lnet_find_rnet_locked(LNET_NID_NET(&sd->sd_dst_nid));
2210                         if (!best_rnet) {
2211                                 CERROR("Unable to send message from %s to %s - Route table may be misconfigured\n",
2212                                        (src_nid && LNET_NID_IS_ANY(src_nid)) ?
2213                                                 "any local NI" :
2214                                                 libcfs_nidstr(src_nid),
2215                                        libcfs_nidstr(&sd->sd_dst_nid));
2216                                 return -EHOSTUNREACH;
2217                         }
2218                         CDEBUG(D_NET, "best_rnet %s\n",
2219                                libcfs_net2str(best_rnet->lrn_net));
2220                 } else {
2221                         /* we've already looked up the initial lpni using
2222                          * dst_nid
2223                          */
2224                         lpni = sd->sd_best_lpni;
2225                         /* the peer tree must be in existence */
2226                         LASSERT(lpni && lpni->lpni_peer_net &&
2227                                 lpni->lpni_peer_net->lpn_peer);
2228                         lp = lpni->lpni_peer_net->lpn_peer;
2229
2230                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2231                                 /* is this remote network reachable?  */
2232                                 rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2233                                 if (!rnet)
2234                                         continue;
2235
2236                                 if (!best_lpn)
2237                                         goto use_lpn;
2238                                 else
2239                                         CDEBUG(D_NET, "n[%s, %s] h[%d, %d], p[%u, %u], s[%d, %d]\n",
2240                                                libcfs_net2str(lpn->lpn_net_id),
2241                                                libcfs_net2str(best_lpn->lpn_net_id),
2242                                                lpn->lpn_healthv,
2243                                                best_lpn->lpn_healthv,
2244                                                lpn->lpn_sel_priority,
2245                                                best_lpn->lpn_sel_priority,
2246                                                lpn->lpn_seq,
2247                                                best_lpn->lpn_seq);
2248
2249                                 /* select the preferred peer net */
2250                                 if (best_lpn_healthv > lpn->lpn_healthv)
2251                                         continue;
2252                                 else if (best_lpn_healthv < lpn->lpn_healthv)
2253                                         goto use_lpn;
2254
2255                                 if (best_lpn_sel_prio < lpn->lpn_sel_priority)
2256                                         continue;
2257                                 else if (best_lpn_sel_prio > lpn->lpn_sel_priority)
2258                                         goto use_lpn;
2259
2260                                 if (best_lpn->lpn_seq <= lpn->lpn_seq)
2261                                         continue;
2262 use_lpn:
2263                                 best_lpn_healthv = lpn->lpn_healthv;
2264                                 best_lpn_sel_prio = lpn->lpn_sel_priority;
2265                                 best_lpn = lpn;
2266                                 best_rnet = rnet;
2267                         }
2268
2269                         if (!best_lpn) {
2270                                 CERROR("peer %s has no available nets\n",
2271                                        libcfs_nidstr(&sd->sd_dst_nid));
2272                                 return -EHOSTUNREACH;
2273                         }
2274
2275                         CDEBUG(D_NET, "selected best_lpn %s\n",
2276                                libcfs_net2str(best_lpn->lpn_net_id));
2277
2278                         sd->sd_best_lpni = lnet_find_best_lpni(sd->sd_best_ni,
2279                                                                lnet_nid_to_nid4(&sd->sd_dst_nid),
2280                                                                lp,
2281                                                                best_lpn->lpn_net_id);
2282                         if (!sd->sd_best_lpni) {
2283                                 CERROR("peer %s is unreachable\n",
2284                                        libcfs_nidstr(&sd->sd_dst_nid));
2285                                 return -EHOSTUNREACH;
2286                         }
2287
2288                         /* We're attempting to round robin over the remote peer
2289                          * NI's so update the final destination we selected
2290                          */
2291                         sd->sd_final_dst_lpni = sd->sd_best_lpni;
2292                 }
2293
2294                 /*
2295                  * find the best route. Restrict the selection on the net of the
2296                  * local NI if we've already picked the local NI to send from.
2297                  * Otherwise, let's pick any route we can find and then find
2298                  * a local NI we can reach the route's gateway on. Any route we
2299                  * select will be reachable by virtue of the restriction we have
2300                  * when adding a route.
2301                  */
2302                 best_route = lnet_find_route_locked(best_rnet,
2303                                                     LNET_NID_NET(src_nid),
2304                                                     sd->sd_best_lpni,
2305                                                     &last_route, &gwni);
2306
2307                 if (!best_route) {
2308                         CERROR("no route to %s from %s\n",
2309                                libcfs_nidstr(dst_nid),
2310                                libcfs_nidstr(src_nid));
2311                         return -EHOSTUNREACH;
2312                 }
2313
2314                 if (!gwni) {
2315                         CERROR("Internal Error. Route expected to %s from %s\n",
2316                                libcfs_nidstr(dst_nid),
2317                                libcfs_nidstr(src_nid));
2318                         return -EFAULT;
2319                 }
2320
2321                 gw = best_route->lr_gateway;
2322                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2323         }
2324
2325         /*
2326          * If the router checker is not active then discover the gateway here.
2327          * This ensures we are able to take advantage of multi-rail routing, but
2328          * if the router checker is active then we do not unecessarily delay
2329          * messages while the gateway is being checked by the dedicated monitor
2330          * thread.
2331          *
2332          * NB: We're only checking the alive_router_check_interval here, rather
2333          * than calling lnet_router_checker_active(), because the other
2334          * conditions that are checked by that function are either
2335          * irrelevant (the_lnet.ln_routing) or must be true (list of routers
2336          * is not empty)
2337          */
2338         if (alive_router_check_interval <= 0) {
2339                 rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt);
2340                 if (rc)
2341                         return rc;
2342         }
2343
2344         if (!sd->sd_best_ni) {
2345                 lpn = gwni->lpni_peer_net;
2346                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw, lpn,
2347                                                                sd->sd_msg,
2348                                                                sd->sd_md_cpt);
2349                 if (!sd->sd_best_ni) {
2350                         CERROR("Internal Error. Expected local ni on %s but non found: %s\n",
2351                                libcfs_net2str(lpn->lpn_net_id),
2352                                libcfs_nidstr(&sd->sd_src_nid));
2353                         return -EFAULT;
2354                 }
2355         }
2356
2357         *gw_lpni = gwni;
2358         *gw_peer = gw;
2359
2360         /*
2361          * increment the sequence number since now we're sure we're
2362          * going to use this route
2363          */
2364         if (LNET_NID_IS_ANY(&sd->sd_rtr_nid)) {
2365                 LASSERT(best_route && last_route);
2366                 best_route->lr_seq = last_route->lr_seq + 1;
2367         }
2368
2369         return 0;
2370 }
2371
2372 /*
2373  * Handle two cases:
2374  *
2375  * Case 1:
2376  *  Source specified
2377  *  Remote destination
2378  *  Non-MR destination
2379  *
2380  * Case 2:
2381  *  Source specified
2382  *  Remote destination
2383  *  MR destination
2384  *
2385  * The handling of these two cases is similar. Even though the destination
2386  * can be MR or non-MR, we'll deal directly with the router.
2387  */
2388 static int
2389 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2390 {
2391         int rc;
2392         struct lnet_peer_ni *gw_lpni = NULL;
2393         struct lnet_peer *gw_peer = NULL;
2394
2395         /* find local NI */
2396         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2397         if (!sd->sd_best_ni) {
2398                 CERROR("Can't send to %s: src %s is not a local nid\n",
2399                        libcfs_nidstr(&sd->sd_dst_nid),
2400                        libcfs_nidstr(&sd->sd_src_nid));
2401                 return -EINVAL;
2402         }
2403
2404         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid,
2405                                           &gw_lpni, &gw_peer);
2406         if (rc)
2407                 return rc;
2408
2409         if (sd->sd_send_case & NMR_DST)
2410                 /*
2411                  * since the final destination is non-MR let's set its preferred
2412                  * NID before we send
2413                  */
2414                 lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni,
2415                                          sd->sd_msg);
2416
2417         /*
2418          * We're going to send to the gw found so let's set its
2419          * info
2420          */
2421         sd->sd_peer = gw_peer;
2422         sd->sd_best_lpni = gw_lpni;
2423
2424         return lnet_handle_send(sd);
2425 }
2426
2427 struct lnet_ni *
2428 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2429                                struct lnet_msg *msg, bool discovery)
2430 {
2431         struct lnet_peer_net *lpn = NULL;
2432         struct lnet_peer_net *best_lpn = NULL;
2433         struct lnet_net *net = NULL;
2434         struct lnet_net *best_net = NULL;
2435         struct lnet_ni *best_ni = NULL;
2436         int best_lpn_healthv = 0;
2437         int best_net_healthv = 0;
2438         int net_healthv;
2439         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2440         __u32 lpn_sel_prio;
2441         __u32 best_net_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2442         __u32 net_sel_prio;
2443
2444         /* if this is a discovery message and lp_disc_net_id is
2445          * specified then use that net to send the discovery on.
2446          */
2447         if (discovery && peer->lp_disc_net_id) {
2448                 best_lpn = lnet_peer_get_net_locked(peer, peer->lp_disc_net_id);
2449                 if (best_lpn && lnet_get_net_locked(best_lpn->lpn_net_id))
2450                         goto select_best_ni;
2451         }
2452
2453         /*
2454          * The peer can have multiple interfaces, some of them can be on
2455          * the local network and others on a routed network. We should
2456          * prefer the local network. However if the local network is not
2457          * available then we need to try the routed network
2458          */
2459
2460         /* go through all the peer nets and find the best_ni */
2461         list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
2462                 /*
2463                  * The peer's list of nets can contain non-local nets. We
2464                  * want to only examine the local ones.
2465                  */
2466                 net = lnet_get_net_locked(lpn->lpn_net_id);
2467                 if (!net)
2468                         continue;
2469
2470                 lpn_sel_prio = lpn->lpn_sel_priority;
2471                 net_healthv = lnet_get_net_healthv_locked(net);
2472                 net_sel_prio = net->net_sel_priority;
2473
2474                 if (!best_lpn)
2475                         goto select_lpn;
2476                 else
2477                         CDEBUG(D_NET,
2478                                "n[%s, %s] ph[%d, %d], pp[%u, %u], nh[%d, %d], np[%u, %u], ps[%u, %u], ns[%u, %u]\n",
2479                                libcfs_net2str(lpn->lpn_net_id),
2480                                libcfs_net2str(best_lpn->lpn_net_id),
2481                                lpn->lpn_healthv,
2482                                best_lpn_healthv,
2483                                lpn_sel_prio,
2484                                best_lpn_sel_prio,
2485                                net_healthv,
2486                                best_net_healthv,
2487                                net_sel_prio,
2488                                best_net_sel_prio,
2489                                lpn->lpn_seq,
2490                                best_lpn->lpn_seq,
2491                                net->net_seq,
2492                                best_net->net_seq);
2493
2494                 /* always select the lpn with the best health */
2495                 if (best_lpn_healthv > lpn->lpn_healthv)
2496                         continue;
2497                 else if (best_lpn_healthv < lpn->lpn_healthv)
2498                         goto select_lpn;
2499
2500                 /* select the preferred peer and local nets */
2501                 if (best_lpn_sel_prio < lpn_sel_prio)
2502                         continue;
2503                 else if (best_lpn_sel_prio > lpn_sel_prio)
2504                         goto select_lpn;
2505
2506                 if (best_net_healthv > net_healthv)
2507                         continue;
2508                 else if (best_net_healthv < net_healthv)
2509                         goto select_lpn;
2510
2511                 if (best_net_sel_prio < net_sel_prio)
2512                         continue;
2513                 else if (best_net_sel_prio > net_sel_prio)
2514                         goto select_lpn;
2515
2516                 if (best_lpn->lpn_seq < lpn->lpn_seq)
2517                         continue;
2518                 else if (best_lpn->lpn_seq > lpn->lpn_seq)
2519                         goto select_lpn;
2520
2521                 /* round robin over the local networks */
2522                 if (best_net->net_seq <= net->net_seq)
2523                         continue;
2524
2525 select_lpn:
2526                 best_net_healthv = net_healthv;
2527                 best_net_sel_prio = net_sel_prio;
2528                 best_lpn_healthv = lpn->lpn_healthv;
2529                 best_lpn_sel_prio = lpn_sel_prio;
2530                 best_lpn = lpn;
2531                 best_net = net;
2532         }
2533
2534         if (best_lpn) {
2535                 /* Select the best NI on the same net as best_lpn chosen
2536                  * above
2537                  */
2538 select_best_ni:
2539                 CDEBUG(D_NET, "selected best_lpn %s\n",
2540                        libcfs_net2str(best_lpn->lpn_net_id));
2541                 best_ni = lnet_find_best_ni_on_spec_net(NULL, peer, best_lpn,
2542                                                         msg, md_cpt);
2543         }
2544
2545         return best_ni;
2546 }
2547
2548 static struct lnet_ni *
2549 lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt)
2550 {
2551         struct lnet_ni *best_ni = NULL;
2552         struct lnet_peer_net *peer_net = lpni->lpni_peer_net;
2553         struct lnet_peer_ni *lpni_entry;
2554
2555         /*
2556          * We must use a consistent source address when sending to a
2557          * non-MR peer. However, a non-MR peer can have multiple NIDs
2558          * on multiple networks, and we may even need to talk to this
2559          * peer on multiple networks -- certain types of
2560          * load-balancing configuration do this.
2561          *
2562          * So we need to pick the NI the peer prefers for this
2563          * particular network.
2564          */
2565         LASSERT(peer_net);
2566         list_for_each_entry(lpni_entry, &peer_net->lpn_peer_nis,
2567                             lpni_peer_nis) {
2568                 if (lpni_entry->lpni_pref_nnids == 0)
2569                         continue;
2570                 LASSERT(lpni_entry->lpni_pref_nnids == 1);
2571                 best_ni = lnet_nid_to_ni_locked(&lpni_entry->lpni_pref.nid,
2572                                                 cpt);
2573                 break;
2574         }
2575
2576         return best_ni;
2577 }
2578
2579 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2580 static int
2581 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2582 {
2583         struct lnet_ni *best_ni = NULL;
2584
2585         /*
2586          * We must use a consistent source address when sending to a
2587          * non-MR peer. However, a non-MR peer can have multiple NIDs
2588          * on multiple networks, and we may even need to talk to this
2589          * peer on multiple networks -- certain types of
2590          * load-balancing configuration do this.
2591          *
2592          * So we need to pick the NI the peer prefers for this
2593          * particular network.
2594          *
2595          * An exception is traffic on LNET_RESERVED_PORTAL. Internal LNet
2596          * traffic doesn't care which source NI is used, and we don't actually
2597          * want to restrict local recovery pings to a single source NI.
2598          */
2599         if (!lnet_reserved_msg(sd->sd_msg))
2600                 best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2601                                                                sd->sd_cpt);
2602
2603         if (!best_ni)
2604                 best_ni = lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2605                                                 sd->sd_best_lpni->lpni_peer_net,
2606                                                 sd->sd_msg,
2607                                                 sd->sd_md_cpt);
2608
2609         /* If there is no best_ni we don't have a route */
2610         if (!best_ni) {
2611                 CERROR("no path to %s from net %s\n",
2612                         libcfs_nidstr(&sd->sd_best_lpni->lpni_nid),
2613                         libcfs_net2str(sd->sd_best_lpni->lpni_net->net_id));
2614                 return -EHOSTUNREACH;
2615         }
2616
2617         sd->sd_best_ni = best_ni;
2618
2619         /* Set preferred NI if necessary. */
2620         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2621
2622         return 0;
2623 }
2624
2625
2626 /*
2627  * Source not specified
2628  * Local destination
2629  * Non-MR Peer
2630  *
2631  * always use the same source NID for NMR peers
2632  * If we've talked to that peer before then we already have a preferred
2633  * source NI associated with it. Otherwise, we select a preferred local NI
2634  * and store it in the peer
2635  */
2636 static int
2637 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2638 {
2639         int rc = 0;
2640
2641         /* sd->sd_best_lpni is already set to the final destination */
2642
2643         /*
2644          * At this point we should've created the peer ni and peer. If we
2645          * can't find it, then something went wrong. Instead of assert
2646          * output a relevant message and fail the send
2647          */
2648         if (!sd->sd_best_lpni) {
2649                 CERROR("Internal fault. Unable to send msg %s to %s. NID not known\n",
2650                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2651                        libcfs_nidstr(&sd->sd_dst_nid));
2652                 return -EFAULT;
2653         }
2654
2655         if (sd->sd_msg->msg_routing) {
2656                 /* If I'm forwarding this message then I can choose any NI
2657                  * on the destination peer net
2658                  */
2659                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL,
2660                                                                sd->sd_peer,
2661                                                                sd->sd_best_lpni->lpni_peer_net,
2662                                                                sd->sd_msg,
2663                                                                sd->sd_md_cpt);
2664                 if (!sd->sd_best_ni) {
2665                         CERROR("Unable to forward message to %s. No local NI available\n",
2666                                libcfs_nidstr(&sd->sd_dst_nid));
2667                         rc = -EHOSTUNREACH;
2668                 }
2669         } else
2670                 rc = lnet_select_preferred_best_ni(sd);
2671
2672         if (!rc)
2673                 rc = lnet_handle_send(sd);
2674
2675         return rc;
2676 }
2677
2678 static int
2679 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2680 {
2681         /*
2682          * NOTE we've already handled the remote peer case. So we only
2683          * need to worry about the local case here.
2684          *
2685          * if we're sending a response, ACK or reply, we need to send it
2686          * to the destination NID given to us. At this point we already
2687          * have the peer_ni we're suppose to send to, so just find the
2688          * best_ni on the peer net and use that. Since we're sending to an
2689          * MR peer then we can just run the selection algorithm on our
2690          * local NIs and pick the best one.
2691          */
2692         if (sd->sd_send_case & SND_RESP) {
2693                 sd->sd_best_ni =
2694                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2695                                                 sd->sd_best_lpni->lpni_peer_net,
2696                                                 sd->sd_msg,
2697                                                 sd->sd_md_cpt);
2698
2699                 if (!sd->sd_best_ni) {
2700                         /*
2701                          * We're not going to deal with not able to send
2702                          * a response to the provided final destination
2703                          */
2704                         CERROR("Can't send response to %s. No local NI available\n",
2705                                 libcfs_nidstr(&sd->sd_dst_nid));
2706                         return -EHOSTUNREACH;
2707                 }
2708
2709                 return lnet_handle_send(sd);
2710         }
2711
2712         /*
2713          * If we get here that means we're sending a fresh request, PUT or
2714          * GET, so we need to run our standard selection algorithm.
2715          * First find the best local interface that's on any of the peer's
2716          * networks.
2717          */
2718         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2719                                         sd->sd_md_cpt,
2720                                         sd->sd_msg,
2721                                         lnet_msg_discovery(sd->sd_msg));
2722         if (sd->sd_best_ni) {
2723                 sd->sd_best_lpni =
2724                   lnet_find_best_lpni(sd->sd_best_ni,
2725                                              lnet_nid_to_nid4(&sd->sd_dst_nid),
2726                                       sd->sd_peer,
2727                                       sd->sd_best_ni->ni_net->net_id);
2728
2729                 /*
2730                  * if we're successful in selecting a peer_ni on the local
2731                  * network, then send to it. Otherwise fall through and
2732                  * try and see if we can reach it over another routed
2733                  * network
2734                  */
2735                 if (sd->sd_best_lpni &&
2736                     nid_same(&sd->sd_best_lpni->lpni_nid,
2737                              &the_lnet.ln_loni->ni_nid)) {
2738                         /*
2739                          * in case we initially started with a routed
2740                          * destination, let's reset to local
2741                          */
2742                         sd->sd_send_case &= ~REMOTE_DST;
2743                         sd->sd_send_case |= LOCAL_DST;
2744                         return lnet_handle_lo_send(sd);
2745                 } else if (sd->sd_best_lpni) {
2746                         /*
2747                          * in case we initially started with a routed
2748                          * destination, let's reset to local
2749                          */
2750                         sd->sd_send_case &= ~REMOTE_DST;
2751                         sd->sd_send_case |= LOCAL_DST;
2752                         return lnet_handle_send(sd);
2753                 }
2754
2755                 CERROR("Internal Error. Expected to have a best_lpni: "
2756                        "%s -> %s\n",
2757                        libcfs_nidstr(&sd->sd_src_nid),
2758                        libcfs_nidstr(&sd->sd_dst_nid));
2759
2760                 return -EFAULT;
2761         }
2762
2763         /*
2764          * Peer doesn't have a local network. Let's see if there is
2765          * a remote network we can reach it on.
2766          */
2767         return PASS_THROUGH;
2768 }
2769
2770 /*
2771  * Case 1:
2772  *      Source NID not specified
2773  *      Local destination
2774  *      MR peer
2775  *
2776  * Case 2:
2777  *      Source NID not speified
2778  *      Remote destination
2779  *      MR peer
2780  *
2781  * In both of these cases if we're sending a response, ACK or REPLY, then
2782  * we need to send to the destination NID provided.
2783  *
2784  * In the remote case let's deal with MR routers.
2785  *
2786  */
2787
2788 static int
2789 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2790 {
2791         int rc = 0;
2792         struct lnet_peer *gw_peer = NULL;
2793         struct lnet_peer_ni *gw_lpni = NULL;
2794
2795         /*
2796          * handle sending a response to a remote peer here so we don't
2797          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2798          */
2799         if (sd->sd_send_case & REMOTE_DST &&
2800             sd->sd_send_case & SND_RESP) {
2801                 struct lnet_peer_ni *gw;
2802                 struct lnet_peer *gw_peer;
2803
2804                 rc = lnet_handle_find_routed_path(
2805                         sd, &sd->sd_dst_nid, &gw, &gw_peer);
2806                 if (rc < 0) {
2807                         CERROR("Can't send response to %s. No route available\n",
2808                                libcfs_nidstr(&sd->sd_dst_nid));
2809                         return -EHOSTUNREACH;
2810                 } else if (rc > 0) {
2811                         return rc;
2812                 }
2813
2814                 sd->sd_best_lpni = gw;
2815                 sd->sd_peer = gw_peer;
2816
2817                 return lnet_handle_send(sd);
2818         }
2819
2820         /*
2821          * Even though the NID for the peer might not be on a local network,
2822          * since the peer is MR there could be other interfaces on the
2823          * local network. In that case we'd still like to prefer the local
2824          * network over the routed network. If we're unable to do that
2825          * then we select the best router among the different routed networks,
2826          * and if the router is MR then we can deal with it as such.
2827          */
2828         rc = lnet_handle_any_mr_dsta(sd);
2829         if (rc != PASS_THROUGH)
2830                 return rc;
2831
2832         /*
2833          * Now that we must route to the destination, we must consider the
2834          * MR case, where the destination has multiple interfaces, some of
2835          * which we can route to and others we do not. For this reason we
2836          * need to select the destination which we can route to and if
2837          * there are multiple, we need to round robin.
2838          */
2839         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid,
2840                                           &gw_lpni, &gw_peer);
2841         if (rc)
2842                 return rc;
2843
2844         sd->sd_send_case &= ~LOCAL_DST;
2845         sd->sd_send_case |= REMOTE_DST;
2846
2847         sd->sd_peer = gw_peer;
2848         sd->sd_best_lpni = gw_lpni;
2849
2850         return lnet_handle_send(sd);
2851 }
2852
2853 /*
2854  * Source not specified
2855  * Remote destination
2856  * Non-MR peer
2857  *
2858  * Must send to the specified peer NID using the same source NID that
2859  * we've used before. If it's the first time to talk to that peer then
2860  * find the source NI and assign it as preferred to that peer
2861  */
2862 static int
2863 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2864 {
2865         int rc;
2866         struct lnet_peer_ni *gw_lpni = NULL;
2867         struct lnet_peer *gw_peer = NULL;
2868
2869         /*
2870          * Let's see if we have a preferred NI to talk to this NMR peer
2871          */
2872         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2873                                                               sd->sd_cpt);
2874
2875         /*
2876          * find the router and that'll find the best NI if we didn't find
2877          * it already.
2878          */
2879         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid, &gw_lpni,
2880                                           &gw_peer);
2881         if (rc)
2882                 return rc;
2883
2884         /*
2885          * set the best_ni we've chosen as the preferred one for
2886          * this peer
2887          */
2888         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2889
2890         /* we'll be sending to the gw */
2891         sd->sd_best_lpni = gw_lpni;
2892         sd->sd_peer = gw_peer;
2893
2894         return lnet_handle_send(sd);
2895 }
2896
2897 static int
2898 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2899 {
2900         /*
2901          * turn off the SND_RESP bit.
2902          * It will be checked in the case handling
2903          */
2904         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2905
2906         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2907                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2908                 (send_case & SRC_SPEC) ? libcfs_nidstr(&sd->sd_src_nid) : "",
2909                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2910                 libcfs_nidstr(&sd->sd_dst_nid),
2911                 (send_case & LOCAL_DST) ? "local" : "routed");
2912
2913         switch (send_case) {
2914         /*
2915          * For all cases where the source is specified, we should always
2916          * use the destination NID, whether it's an MR destination or not,
2917          * since we're continuing a series of related messages for the
2918          * same RPC
2919          */
2920         case SRC_SPEC_LOCAL_NMR_DST:
2921                 return lnet_handle_spec_local_nmr_dst(sd);
2922         case SRC_SPEC_LOCAL_MR_DST:
2923                 return lnet_handle_spec_local_mr_dst(sd);
2924         case SRC_SPEC_ROUTER_NMR_DST:
2925         case SRC_SPEC_ROUTER_MR_DST:
2926                 return lnet_handle_spec_router_dst(sd);
2927         case SRC_ANY_LOCAL_NMR_DST:
2928                 return lnet_handle_any_local_nmr_dst(sd);
2929         case SRC_ANY_LOCAL_MR_DST:
2930         case SRC_ANY_ROUTER_MR_DST:
2931                 return lnet_handle_any_mr_dst(sd);
2932         case SRC_ANY_ROUTER_NMR_DST:
2933                 return lnet_handle_any_router_nmr_dst(sd);
2934         default:
2935                 CERROR("Unknown send case\n");
2936                 return -1;
2937         }
2938 }
2939
2940 static int
2941 lnet_select_pathway(struct lnet_nid *src_nid,
2942                     struct lnet_nid *dst_nid,
2943                     struct lnet_msg *msg,
2944                     struct lnet_nid *rtr_nid)
2945 {
2946         struct lnet_peer_ni *lpni;
2947         struct lnet_peer *peer;
2948         struct lnet_send_data send_data;
2949         int cpt, rc;
2950         int md_cpt;
2951         __u32 send_case = 0;
2952         bool final_hop;
2953         bool mr_forwarding_allowed;
2954
2955         memset(&send_data, 0, sizeof(send_data));
2956
2957         /*
2958          * get an initial CPT to use for locking. The idea here is not to
2959          * serialize the calls to select_pathway, so that as many
2960          * operations can run concurrently as possible. To do that we use
2961          * the CPT where this call is being executed. Later on when we
2962          * determine the CPT to use in lnet_message_commit, we switch the
2963          * lock and check if there was any configuration change.  If none,
2964          * then we proceed, if there is, then we restart the operation.
2965          */
2966         cpt = lnet_net_lock_current();
2967
2968         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
2969         if (md_cpt == CFS_CPT_ANY)
2970                 md_cpt = cpt;
2971
2972 again:
2973
2974         /*
2975          * If we're being asked to send to the loopback interface, there
2976          * is no need to go through any selection. We can just shortcut
2977          * the entire process and send over lolnd
2978          */
2979         send_data.sd_msg = msg;
2980         send_data.sd_cpt = cpt;
2981         if (nid_is_lo0(dst_nid)) {
2982                 rc = lnet_handle_lo_send(&send_data);
2983                 lnet_net_unlock(cpt);
2984                 return rc;
2985         }
2986
2987         /*
2988          * find an existing peer_ni, or create one and mark it as having been
2989          * created due to network traffic. This call will create the
2990          * peer->peer_net->peer_ni tree.
2991          */
2992         lpni = lnet_peerni_by_nid_locked(dst_nid, NULL, cpt);
2993         if (IS_ERR(lpni)) {
2994                 lnet_net_unlock(cpt);
2995                 return PTR_ERR(lpni);
2996         }
2997
2998         /*
2999          * Cache the original src_nid and rtr_nid. If we need to resend the
3000          * message then we'll need to know whether the src_nid was originally
3001          * specified for this message. If it was originally specified,
3002          * then we need to keep using the same src_nid since it's
3003          * continuing the same sequence of messages. Similarly, rtr_nid will
3004          * affect our choice of next hop.
3005          */
3006         if (src_nid)
3007                 msg->msg_src_nid_param = *src_nid;
3008         else
3009                 msg->msg_src_nid_param = LNET_ANY_NID;
3010         if (rtr_nid)
3011                 msg->msg_rtr_nid_param = *rtr_nid;
3012         else
3013                 msg->msg_rtr_nid_param = LNET_ANY_NID;
3014
3015         /*
3016          * If necessary, perform discovery on the peer that owns this peer_ni.
3017          * Note, this can result in the ownership of this peer_ni changing
3018          * to another peer object.
3019          */
3020         rc = lnet_initiate_peer_discovery(lpni, msg, cpt);
3021         if (rc) {
3022                 lnet_peer_ni_decref_locked(lpni);
3023                 lnet_net_unlock(cpt);
3024                 return rc;
3025         }
3026         lnet_peer_ni_decref_locked(lpni);
3027
3028         peer = lpni->lpni_peer_net->lpn_peer;
3029
3030         /*
3031          * Identify the different send cases
3032          */
3033         if (!src_nid || LNET_NID_IS_ANY(src_nid)) {
3034                 send_case |= SRC_ANY;
3035                 if (lnet_get_net_locked(LNET_NID_NET(dst_nid)))
3036                         send_case |= LOCAL_DST;
3037                 else
3038                         send_case |= REMOTE_DST;
3039         } else {
3040                 send_case |= SRC_SPEC;
3041                 if (LNET_NID_NET(src_nid) == LNET_NID_NET(dst_nid))
3042                         send_case |= LOCAL_DST;
3043                 else
3044                         send_case |= REMOTE_DST;
3045         }
3046
3047         final_hop = false;
3048         if (msg->msg_routing && (send_case & LOCAL_DST))
3049                 final_hop = true;
3050
3051         /* Determine whether to allow MR forwarding for this message.
3052          * NB: MR forwarding is allowed if the message originator and the
3053          * destination are both MR capable, and the destination lpni that was
3054          * originally chosen by the originator is unhealthy or down.
3055          * We check the MR capability of the destination further below
3056          */
3057         mr_forwarding_allowed = false;
3058         if (final_hop) {
3059                 struct lnet_peer *src_lp;
3060                 struct lnet_peer_ni *src_lpni;
3061
3062                 src_lpni = lnet_peerni_by_nid_locked(&msg->msg_hdr.src_nid,
3063                                                    NULL, cpt);
3064                 /* We don't fail the send if we hit any errors here. We'll just
3065                  * try to send it via non-multi-rail criteria
3066                  */
3067                 if (!IS_ERR(src_lpni)) {
3068                         /* Drop ref taken by lnet_peerni_by_nid_locked() */
3069                         lnet_peer_ni_decref_locked(src_lpni);
3070                         src_lp = lpni->lpni_peer_net->lpn_peer;
3071                         if (lnet_peer_is_multi_rail(src_lp) &&
3072                             !lnet_is_peer_ni_alive(lpni))
3073                                 mr_forwarding_allowed = true;
3074
3075                 }
3076                 CDEBUG(D_NET, "msg %p MR forwarding %s\n", msg,
3077                        mr_forwarding_allowed ? "allowed" : "not allowed");
3078         }
3079
3080         /*
3081          * Deal with the peer as NMR in the following cases:
3082          * 1. the peer is NMR
3083          * 2. We're trying to recover a specific peer NI
3084          * 3. I'm a router sending to the final destination and MR forwarding is
3085          *    not allowed for this message (as determined above).
3086          *    In this case the source of the message would've
3087          *    already selected the final destination so my job
3088          *    is to honor the selection.
3089          */
3090         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery ||
3091             (final_hop && !mr_forwarding_allowed))
3092                 send_case |= NMR_DST;
3093         else
3094                 send_case |= MR_DST;
3095
3096         if (lnet_msg_is_response(msg))
3097                 send_case |= SND_RESP;
3098
3099         /* assign parameters to the send_data */
3100         if (rtr_nid)
3101                 send_data.sd_rtr_nid = *rtr_nid;
3102         else
3103                 send_data.sd_rtr_nid = LNET_ANY_NID;
3104         if (src_nid)
3105                 send_data.sd_src_nid = *src_nid;
3106         else
3107                 send_data.sd_src_nid = LNET_ANY_NID;
3108         send_data.sd_dst_nid = *dst_nid;
3109         send_data.sd_best_lpni = lpni;
3110         /*
3111          * keep a pointer to the final destination in case we're going to
3112          * route, so we'll need to access it later
3113          */
3114         send_data.sd_final_dst_lpni = lpni;
3115         send_data.sd_peer = peer;
3116         send_data.sd_md_cpt = md_cpt;
3117         send_data.sd_send_case = send_case;
3118
3119         rc = lnet_handle_send_case_locked(&send_data);
3120
3121         /*
3122          * Update the local cpt since send_data.sd_cpt might've been
3123          * updated as a result of calling lnet_handle_send_case_locked().
3124          */
3125         cpt = send_data.sd_cpt;
3126
3127         if (rc == REPEAT_SEND)
3128                 goto again;
3129
3130         lnet_net_unlock(cpt);
3131
3132         return rc;
3133 }
3134
3135 int
3136 lnet_send(struct lnet_nid *src_nid, struct lnet_msg *msg,
3137           struct lnet_nid *rtr_nid)
3138 {
3139         struct lnet_nid *dst_nid = &msg->msg_target.nid;
3140         int rc;
3141
3142         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
3143         LASSERT(msg->msg_txpeer == NULL);
3144         LASSERT(msg->msg_txni == NULL);
3145         LASSERT(!msg->msg_sending);
3146         LASSERT(!msg->msg_target_is_router);
3147         LASSERT(!msg->msg_receiving);
3148
3149         msg->msg_sending = 1;
3150
3151         LASSERT(!msg->msg_tx_committed);
3152
3153         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
3154         if (rc < 0) {
3155                 if (rc == -EHOSTUNREACH)
3156                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
3157                 else
3158                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
3159                 return rc;
3160         }
3161
3162         if (rc == LNET_CREDIT_OK)
3163                 lnet_ni_send(msg->msg_txni, msg);
3164
3165         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
3166         return 0;
3167 }
3168
3169 enum lnet_mt_event_type {
3170         MT_TYPE_LOCAL_NI = 0,
3171         MT_TYPE_PEER_NI
3172 };
3173
3174 struct lnet_mt_event_info {
3175         enum lnet_mt_event_type mt_type;
3176         struct lnet_nid mt_nid;
3177 };
3178
3179 /* called with res_lock held */
3180 void
3181 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
3182 {
3183         struct lnet_rsp_tracker *rspt;
3184
3185         /*
3186          * msg has a refcount on the MD so the MD is not going away.
3187          * The rspt queue for the cpt is protected by
3188          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
3189          */
3190         if (!md->md_rspt_ptr)
3191                 return;
3192
3193         rspt = md->md_rspt_ptr;
3194
3195         /* debug code */
3196         LASSERT(rspt->rspt_cpt == cpt);
3197
3198         md->md_rspt_ptr = NULL;
3199
3200         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3201                 /*
3202                  * The monitor thread has invalidated this handle because the
3203                  * response timed out, but it failed to lookup the MD. That
3204                  * means this response tracker is on the zombie list. We can
3205                  * safely remove it under the resource lock (held by caller) and
3206                  * free the response tracker block.
3207                  */
3208                 list_del(&rspt->rspt_on_list);
3209                 lnet_rspt_free(rspt, cpt);
3210         } else {
3211                 /*
3212                  * invalidate the handle to indicate that a response has been
3213                  * received, which will then lead the monitor thread to clean up
3214                  * the rspt block.
3215                  */
3216                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
3217         }
3218 }
3219
3220 void
3221 lnet_clean_zombie_rstqs(void)
3222 {
3223         struct lnet_rsp_tracker *rspt, *tmp;
3224         int i;
3225
3226         cfs_cpt_for_each(i, lnet_cpt_table()) {
3227                 list_for_each_entry_safe(rspt, tmp,
3228                                          the_lnet.ln_mt_zombie_rstqs[i],
3229                                          rspt_on_list) {
3230                         list_del(&rspt->rspt_on_list);
3231                         lnet_rspt_free(rspt, i);
3232                 }
3233         }
3234
3235         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
3236 }
3237
3238 static void
3239 lnet_finalize_expired_responses(void)
3240 {
3241         struct lnet_libmd *md;
3242         struct lnet_rsp_tracker *rspt, *tmp;
3243         ktime_t now;
3244         int i;
3245
3246         if (the_lnet.ln_mt_rstq == NULL)
3247                 return;
3248
3249         cfs_cpt_for_each(i, lnet_cpt_table()) {
3250                 LIST_HEAD(local_queue);
3251
3252                 lnet_net_lock(i);
3253                 if (!the_lnet.ln_mt_rstq[i]) {
3254                         lnet_net_unlock(i);
3255                         continue;
3256                 }
3257                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
3258                 lnet_net_unlock(i);
3259
3260                 now = ktime_get();
3261
3262                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
3263                         /*
3264                          * The rspt mdh will be invalidated when a response
3265                          * is received or whenever we want to discard the
3266                          * block the monitor thread will walk the queue
3267                          * and clean up any rsts with an invalid mdh.
3268                          * The monitor thread will walk the queue until
3269                          * the first unexpired rspt block. This means that
3270                          * some rspt blocks which received their
3271                          * corresponding responses will linger in the
3272                          * queue until they are cleaned up eventually.
3273                          */
3274                         lnet_res_lock(i);
3275                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3276                                 lnet_res_unlock(i);
3277                                 list_del(&rspt->rspt_on_list);
3278                                 lnet_rspt_free(rspt, i);
3279                                 continue;
3280                         }
3281
3282                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
3283                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
3284                                 struct lnet_peer_ni *lpni;
3285                                 struct lnet_nid nid;
3286
3287                                 md = lnet_handle2md(&rspt->rspt_mdh);
3288                                 if (!md) {
3289                                         /* MD has been queued for unlink, but
3290                                          * rspt hasn't been detached (Note we've
3291                                          * checked above that the rspt_mdh is
3292                                          * valid). Since we cannot lookup the MD
3293                                          * we're unable to detach the rspt
3294                                          * ourselves. Thus, move the rspt to the
3295                                          * zombie list where we'll wait for
3296                                          * either:
3297                                          *   1. The remaining operations on the
3298                                          *   MD to complete. In this case the
3299                                          *   final operation will result in
3300                                          *   lnet_msg_detach_md()->
3301                                          *   lnet_detach_rsp_tracker() where
3302                                          *   we will clean up this response
3303                                          *   tracker.
3304                                          *   2. LNet to shutdown. In this case
3305                                          *   we'll wait until after all LND Nets
3306                                          *   have shutdown and then we can
3307                                          *   safely free any remaining response
3308                                          *   tracker blocks on the zombie list.
3309                                          * Note: We need to hold the resource
3310                                          * lock when adding to the zombie list
3311                                          * because we may have concurrent access
3312                                          * with lnet_detach_rsp_tracker().
3313                                          */
3314                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
3315                                         list_move(&rspt->rspt_on_list,
3316                                                   the_lnet.ln_mt_zombie_rstqs[i]);
3317                                         lnet_res_unlock(i);
3318                                         continue;
3319                                 }
3320                                 LASSERT(md->md_rspt_ptr == rspt);
3321                                 md->md_rspt_ptr = NULL;
3322                                 lnet_res_unlock(i);
3323
3324                                 LNetMDUnlink(rspt->rspt_mdh);
3325
3326                                 nid = rspt->rspt_next_hop_nid;
3327
3328                                 list_del(&rspt->rspt_on_list);
3329                                 lnet_rspt_free(rspt, i);
3330
3331                                 /* If we're shutting down we just want to clean
3332                                  * up the rspt blocks
3333                                  */
3334                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3335                                         continue;
3336
3337                                 lnet_net_lock(i);
3338                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
3339                                 lnet_net_unlock(i);
3340
3341                                 CDEBUG(D_NET,
3342                                        "Response timeout: md = %p: nid = %s\n",
3343                                        md, libcfs_nidstr(&nid));
3344
3345                                 /*
3346                                  * If there is a timeout on the response
3347                                  * from the next hop decrement its health
3348                                  * value so that we don't use it
3349                                  */
3350                                 lnet_net_lock(0);
3351                                 lpni = lnet_peer_ni_find_locked(&nid);
3352                                 if (lpni) {
3353                                         lnet_handle_remote_failure_locked(lpni);
3354                                         lnet_peer_ni_decref_locked(lpni);
3355                                 }
3356                                 lnet_net_unlock(0);
3357                         } else {
3358                                 lnet_res_unlock(i);
3359                                 break;
3360                         }
3361                 }
3362
3363                 if (!list_empty(&local_queue)) {
3364                         lnet_net_lock(i);
3365                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
3366                         lnet_net_unlock(i);
3367                 }
3368         }
3369 }
3370
3371 static void
3372 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
3373 {
3374         struct lnet_msg *msg;
3375
3376         while (!list_empty(resendq)) {
3377                 struct lnet_peer_ni *lpni;
3378
3379                 msg = list_entry(resendq->next, struct lnet_msg,
3380                                  msg_list);
3381
3382                 list_del_init(&msg->msg_list);
3383
3384                 lpni = lnet_peer_ni_find_locked(&msg->msg_hdr.dest_nid);
3385                 if (!lpni) {
3386                         lnet_net_unlock(cpt);
3387                         CERROR("Expected that a peer is already created for %s\n",
3388                                libcfs_nidstr(&msg->msg_hdr.dest_nid));
3389                         msg->msg_no_resend = true;
3390                         lnet_finalize(msg, -EFAULT);
3391                         lnet_net_lock(cpt);
3392                 } else {
3393                         int rc;
3394
3395                         lnet_peer_ni_decref_locked(lpni);
3396
3397                         lnet_net_unlock(cpt);
3398                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3399                                libcfs_nidstr(&msg->msg_src_nid_param),
3400                                libcfs_idstr(&msg->msg_target),
3401                                lnet_msgtyp2str(msg->msg_type),
3402                                msg->msg_recovery,
3403                                msg->msg_retry_count);
3404                         rc = lnet_send(&msg->msg_src_nid_param, msg,
3405                                        &msg->msg_rtr_nid_param);
3406                         if (rc) {
3407                                 CERROR("Error sending %s to %s: %d\n",
3408                                        lnet_msgtyp2str(msg->msg_type),
3409                                        libcfs_idstr(&msg->msg_target), rc);
3410                                 msg->msg_no_resend = true;
3411                                 lnet_finalize(msg, rc);
3412                         }
3413                         lnet_net_lock(cpt);
3414                         if (!rc)
3415                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3416                 }
3417         }
3418 }
3419
3420 static void
3421 lnet_resend_pending_msgs(void)
3422 {
3423         int i;
3424
3425         cfs_cpt_for_each(i, lnet_cpt_table()) {
3426                 lnet_net_lock(i);
3427                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3428                 lnet_net_unlock(i);
3429         }
3430 }
3431
3432 /* called with cpt and ni_lock held */
3433 static void
3434 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3435 {
3436         struct lnet_handle_md recovery_mdh;
3437
3438         LNetInvalidateMDHandle(&recovery_mdh);
3439
3440         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3441             force) {
3442                 recovery_mdh = ni->ni_ping_mdh;
3443                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3444         }
3445         lnet_ni_unlock(ni);
3446         lnet_net_unlock(cpt);
3447         if (!LNetMDHandleIsInvalid(recovery_mdh))
3448                 LNetMDUnlink(recovery_mdh);
3449         lnet_net_lock(cpt);
3450         lnet_ni_lock(ni);
3451 }
3452
3453 static void
3454 lnet_recover_local_nis(void)
3455 {
3456         struct lnet_mt_event_info *ev_info;
3457         LIST_HEAD(processed_list);
3458         LIST_HEAD(local_queue);
3459         struct lnet_handle_md mdh;
3460         struct lnet_ni *tmp;
3461         struct lnet_ni *ni;
3462         struct lnet_nid nid;
3463         int healthv;
3464         int rc;
3465         time64_t now;
3466
3467         /*
3468          * splice the recovery queue on a local queue. We will iterate
3469          * through the local queue and update it as needed. Once we're
3470          * done with the traversal, we'll splice the local queue back on
3471          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3472          * will be traversed in the next iteration.
3473          */
3474         lnet_net_lock(0);
3475         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3476                          &local_queue);
3477         lnet_net_unlock(0);
3478
3479         now = ktime_get_seconds();
3480
3481         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3482                 /*
3483                  * if an NI is being deleted or it is now healthy, there
3484                  * is no need to keep it around in the recovery queue.
3485                  * The monitor thread is the only thread responsible for
3486                  * removing the NI from the recovery queue.
3487                  * Multiple threads can be adding NIs to the recovery
3488                  * queue.
3489                  */
3490                 healthv = atomic_read(&ni->ni_healthv);
3491
3492                 lnet_net_lock(0);
3493                 lnet_ni_lock(ni);
3494                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3495                     healthv == LNET_MAX_HEALTH_VALUE) {
3496                         list_del_init(&ni->ni_recovery);
3497                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3498                         lnet_ni_unlock(ni);
3499                         lnet_ni_decref_locked(ni, 0);
3500                         lnet_net_unlock(0);
3501                         continue;
3502                 }
3503
3504                 /*
3505                  * if the local NI failed recovery we must unlink the md.
3506                  * But we want to keep the local_ni on the recovery queue
3507                  * so we can continue the attempts to recover it.
3508                  */
3509                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3510                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3511                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3512                 }
3513
3514
3515                 lnet_ni_unlock(ni);
3516
3517                 if (now < ni->ni_next_ping) {
3518                         lnet_net_unlock(0);
3519                         continue;
3520                 }
3521
3522                 lnet_net_unlock(0);
3523
3524                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3525                        libcfs_nidstr(&ni->ni_nid));
3526
3527                 lnet_ni_lock(ni);
3528                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3529                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3530                         lnet_ni_unlock(ni);
3531
3532                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3533                         if (!ev_info) {
3534                                 CERROR("out of memory. Can't recover %s\n",
3535                                        libcfs_nidstr(&ni->ni_nid));
3536                                 lnet_ni_lock(ni);
3537                                 ni->ni_recovery_state &=
3538                                   ~LNET_NI_RECOVERY_PENDING;
3539                                 lnet_ni_unlock(ni);
3540                                 continue;
3541                         }
3542
3543                         mdh = ni->ni_ping_mdh;
3544                         /*
3545                          * Invalidate the ni mdh in case it's deleted.
3546                          * We'll unlink the mdh in this case below.
3547                          */
3548                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3549                         nid = ni->ni_nid;
3550
3551                         /*
3552                          * remove the NI from the local queue and drop the
3553                          * reference count to it while we're recovering
3554                          * it. The reason for that, is that the NI could
3555                          * be deleted, and the way the code is structured
3556                          * is if we don't drop the NI, then the deletion
3557                          * code will enter a loop waiting for the
3558                          * reference count to be removed while holding the
3559                          * ln_mutex_lock(). When we look up the peer to
3560                          * send to in lnet_select_pathway() we will try to
3561                          * lock the ln_mutex_lock() as well, leading to
3562                          * a deadlock. By dropping the refcount and
3563                          * removing it from the list, we allow for the NI
3564                          * to be removed, then we use the cached NID to
3565                          * look it up again. If it's gone, then we just
3566                          * continue examining the rest of the queue.
3567                          */
3568                         lnet_net_lock(0);
3569                         list_del_init(&ni->ni_recovery);
3570                         lnet_ni_decref_locked(ni, 0);
3571                         lnet_net_unlock(0);
3572
3573                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3574                         ev_info->mt_nid = nid;
3575                         rc = lnet_send_ping(&nid, &mdh, LNET_INTERFACES_MIN,
3576                                             ev_info, the_lnet.ln_mt_handler,
3577                                             true);
3578                         /* lookup the nid again */
3579                         lnet_net_lock(0);
3580                         ni = lnet_nid_to_ni_locked(&nid, 0);
3581                         if (!ni) {
3582                                 /*
3583                                  * the NI has been deleted when we dropped
3584                                  * the ref count
3585                                  */
3586                                 lnet_net_unlock(0);
3587                                 LNetMDUnlink(mdh);
3588                                 continue;
3589                         }
3590                         ni->ni_ping_count++;
3591
3592                         ni->ni_ping_mdh = mdh;
3593                         lnet_ni_add_to_recoveryq_locked(ni, &processed_list,
3594                                                         now);
3595
3596                         if (rc) {
3597                                 lnet_ni_lock(ni);
3598                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3599                                 lnet_ni_unlock(ni);
3600                         }
3601                         lnet_net_unlock(0);
3602                 } else
3603                         lnet_ni_unlock(ni);
3604         }
3605
3606         /*
3607          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3608          * reexamined in the next iteration.
3609          */
3610         list_splice_init(&processed_list, &local_queue);
3611         lnet_net_lock(0);
3612         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3613         lnet_net_unlock(0);
3614 }
3615
3616 static int
3617 lnet_resendqs_create(void)
3618 {
3619         struct list_head **resendqs;
3620         resendqs = lnet_create_array_of_queues();
3621
3622         if (!resendqs)
3623                 return -ENOMEM;
3624
3625         lnet_net_lock(LNET_LOCK_EX);
3626         the_lnet.ln_mt_resendqs = resendqs;
3627         lnet_net_unlock(LNET_LOCK_EX);
3628
3629         return 0;
3630 }
3631
3632 static void
3633 lnet_clean_local_ni_recoveryq(void)
3634 {
3635         struct lnet_ni *ni;
3636
3637         /* This is only called when the monitor thread has stopped */
3638         lnet_net_lock(0);
3639
3640         while ((ni = list_first_entry_or_null(&the_lnet.ln_mt_localNIRecovq,
3641                                               struct lnet_ni,
3642                                               ni_recovery)) != NULL) {
3643                 list_del_init(&ni->ni_recovery);
3644                 lnet_ni_lock(ni);
3645                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3646                 lnet_ni_unlock(ni);
3647                 lnet_ni_decref_locked(ni, 0);
3648         }
3649
3650         lnet_net_unlock(0);
3651 }
3652
3653 static void
3654 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3655                                      bool force)
3656 {
3657         struct lnet_handle_md recovery_mdh;
3658
3659         LNetInvalidateMDHandle(&recovery_mdh);
3660
3661         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3662                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3663                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3664         }
3665         spin_unlock(&lpni->lpni_lock);
3666         lnet_net_unlock(cpt);
3667         if (!LNetMDHandleIsInvalid(recovery_mdh))
3668                 LNetMDUnlink(recovery_mdh);
3669         lnet_net_lock(cpt);
3670         spin_lock(&lpni->lpni_lock);
3671 }
3672
3673 static void
3674 lnet_clean_peer_ni_recoveryq(void)
3675 {
3676         struct lnet_peer_ni *lpni, *tmp;
3677
3678         lnet_net_lock(LNET_LOCK_EX);
3679
3680         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3681                                  lpni_recovery) {
3682                 list_del_init(&lpni->lpni_recovery);
3683                 spin_lock(&lpni->lpni_lock);
3684                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3685                 spin_unlock(&lpni->lpni_lock);
3686                 lnet_peer_ni_decref_locked(lpni);
3687         }
3688
3689         lnet_net_unlock(LNET_LOCK_EX);
3690 }
3691
3692 static void
3693 lnet_clean_resendqs(void)
3694 {
3695         struct lnet_msg *msg, *tmp;
3696         LIST_HEAD(msgs);
3697         int i;
3698
3699         cfs_cpt_for_each(i, lnet_cpt_table()) {
3700                 lnet_net_lock(i);
3701                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3702                 lnet_net_unlock(i);
3703                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3704                         list_del_init(&msg->msg_list);
3705                         msg->msg_no_resend = true;
3706                         lnet_finalize(msg, -ESHUTDOWN);
3707                 }
3708         }
3709
3710         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3711 }
3712
3713 static void
3714 lnet_recover_peer_nis(void)
3715 {
3716         struct lnet_mt_event_info *ev_info;
3717         LIST_HEAD(processed_list);
3718         LIST_HEAD(local_queue);
3719         struct lnet_handle_md mdh;
3720         struct lnet_peer_ni *lpni;
3721         struct lnet_peer_ni *tmp;
3722         struct lnet_nid nid;
3723         int healthv;
3724         int rc;
3725         time64_t now;
3726
3727         /*
3728          * Always use cpt 0 for locking across all interactions with
3729          * ln_mt_peerNIRecovq
3730          */
3731         lnet_net_lock(0);
3732         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3733                          &local_queue);
3734         lnet_net_unlock(0);
3735
3736         now = ktime_get_seconds();
3737
3738         list_for_each_entry_safe(lpni, tmp, &local_queue,
3739                                  lpni_recovery) {
3740                 /*
3741                  * The same protection strategy is used here as is in the
3742                  * local recovery case.
3743                  */
3744                 lnet_net_lock(0);
3745                 healthv = atomic_read(&lpni->lpni_healthv);
3746                 spin_lock(&lpni->lpni_lock);
3747                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3748                     healthv == LNET_MAX_HEALTH_VALUE) {
3749                         list_del_init(&lpni->lpni_recovery);
3750                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3751                         spin_unlock(&lpni->lpni_lock);
3752                         lnet_peer_ni_decref_locked(lpni);
3753                         lnet_net_unlock(0);
3754                         continue;
3755                 }
3756
3757                 /*
3758                  * If the peer NI has failed recovery we must unlink the
3759                  * md. But we want to keep the peer ni on the recovery
3760                  * queue so we can try to continue recovering it
3761                  */
3762                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3763                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3764                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3765                 }
3766
3767                 spin_unlock(&lpni->lpni_lock);
3768
3769                 if (now < lpni->lpni_next_ping) {
3770                         lnet_net_unlock(0);
3771                         continue;
3772                 }
3773
3774                 lnet_net_unlock(0);
3775
3776                 /*
3777                  * NOTE: we're racing with peer deletion from user space.
3778                  * It's possible that a peer is deleted after we check its
3779                  * state. In this case the recovery can create a new peer
3780                  */
3781                 spin_lock(&lpni->lpni_lock);
3782                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3783                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3784                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3785                         spin_unlock(&lpni->lpni_lock);
3786
3787                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3788                         if (!ev_info) {
3789                                 CERROR("out of memory. Can't recover %s\n",
3790                                        libcfs_nidstr(&lpni->lpni_nid));
3791                                 spin_lock(&lpni->lpni_lock);
3792                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3793                                 spin_unlock(&lpni->lpni_lock);
3794                                 continue;
3795                         }
3796
3797                         /* look at the comments in lnet_recover_local_nis() */
3798                         mdh = lpni->lpni_recovery_ping_mdh;
3799                         nid = lpni->lpni_nid;
3800                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3801                         lnet_net_lock(0);
3802                         list_del_init(&lpni->lpni_recovery);
3803                         lnet_peer_ni_decref_locked(lpni);
3804                         lnet_net_unlock(0);
3805
3806                         ev_info->mt_type = MT_TYPE_PEER_NI;
3807                         ev_info->mt_nid = nid;
3808                         rc = lnet_send_ping(&nid, &mdh, LNET_INTERFACES_MIN,
3809                                             ev_info, the_lnet.ln_mt_handler,
3810                                             true);
3811                         lnet_net_lock(0);
3812                         /*
3813                          * lnet_peer_ni_find_locked() grabs a refcount for
3814                          * us. No need to take it explicitly.
3815                          */
3816                         lpni = lnet_peer_ni_find_locked(&nid);
3817                         if (!lpni) {
3818                                 lnet_net_unlock(0);
3819                                 LNetMDUnlink(mdh);
3820                                 continue;
3821                         }
3822
3823                         lpni->lpni_ping_count++;
3824
3825                         lpni->lpni_recovery_ping_mdh = mdh;
3826
3827                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
3828                                                              &processed_list,
3829                                                              now);
3830                         if (rc) {
3831                                 spin_lock(&lpni->lpni_lock);
3832                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3833                                 spin_unlock(&lpni->lpni_lock);
3834                         }
3835
3836                         /* Drop the ref taken by lnet_peer_ni_find_locked() */
3837                         lnet_peer_ni_decref_locked(lpni);
3838                         lnet_net_unlock(0);
3839                 } else
3840                         spin_unlock(&lpni->lpni_lock);
3841         }
3842
3843         list_splice_init(&processed_list, &local_queue);
3844         lnet_net_lock(0);
3845         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3846         lnet_net_unlock(0);
3847 }
3848
3849 static int
3850 lnet_monitor_thread(void *arg)
3851 {
3852         time64_t rsp_timeout = 0;
3853         time64_t now;
3854
3855         wait_for_completion(&the_lnet.ln_started);
3856         /*
3857          * The monitor thread takes care of the following:
3858          *  1. Checks the aliveness of routers
3859          *  2. Checks if there are messages on the resend queue to resend
3860          *     them.
3861          *  3. Check if there are any NIs on the local recovery queue and
3862          *     pings them
3863          *  4. Checks if there are any NIs on the remote recovery queue
3864          *     and pings them.
3865          */
3866         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
3867                 now = ktime_get_real_seconds();
3868
3869                 if (lnet_router_checker_active())
3870                         lnet_check_routers();
3871
3872                 lnet_resend_pending_msgs();
3873
3874                 if (now >= rsp_timeout) {
3875                         lnet_finalize_expired_responses();
3876                         rsp_timeout = now + (lnet_transaction_timeout / 2);
3877                 }
3878
3879                 lnet_recover_local_nis();
3880                 lnet_recover_peer_nis();
3881
3882                 /*
3883                  * TODO do we need to check if we should sleep without
3884                  * timeout?  Technically, an active system will always
3885                  * have messages in flight so this check will always
3886                  * evaluate to false. And on an idle system do we care
3887                  * if we wake up every 1 second? Although, we've seen
3888                  * cases where we get a complaint that an idle thread
3889                  * is waking up unnecessarily.
3890                  */
3891                 wait_for_completion_interruptible_timeout(
3892                         &the_lnet.ln_mt_wait_complete,
3893                         cfs_time_seconds(1));
3894                 /* Must re-init the completion before testing anything,
3895                  * including ln_mt_state.
3896                  */
3897                 reinit_completion(&the_lnet.ln_mt_wait_complete);
3898         }
3899
3900         /* Shutting down */
3901         lnet_net_lock(LNET_LOCK_EX);
3902         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3903         lnet_net_unlock(LNET_LOCK_EX);
3904
3905         /* signal that the monitor thread is exiting */
3906         up(&the_lnet.ln_mt_signal);
3907
3908         return 0;
3909 }
3910
3911 /*
3912  * lnet_send_ping
3913  * Sends a ping.
3914  * Returns == 0 if success
3915  * Returns > 0 if LNetMDBind or prior fails
3916  * Returns < 0 if LNetGet fails
3917  */
3918 int
3919 lnet_send_ping(struct lnet_nid *dest_nid,
3920                struct lnet_handle_md *mdh, int nnis,
3921                void *user_data, lnet_handler_t handler, bool recovery)
3922 {
3923         struct lnet_md md = { NULL };
3924         struct lnet_processid id;
3925         struct lnet_ping_buffer *pbuf;
3926         int rc;
3927
3928         if (LNET_NID_IS_ANY(dest_nid)) {
3929                 rc = -EHOSTUNREACH;
3930                 goto fail_error;
3931         }
3932
3933         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
3934         if (!pbuf) {
3935                 rc = ENOMEM;
3936                 goto fail_error;
3937         }
3938
3939         /* initialize md content */
3940         md.start     = &pbuf->pb_info;
3941         md.length    = LNET_PING_INFO_SIZE(nnis);
3942         md.threshold = 2; /* GET/REPLY */
3943         md.max_size  = 0;
3944         md.options   = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE;
3945         md.user_ptr  = user_data;
3946         md.handler   = handler;
3947
3948         rc = LNetMDBind(&md, LNET_UNLINK, mdh);
3949         if (rc) {
3950                 lnet_ping_buffer_decref(pbuf);
3951                 CERROR("Can't bind MD: %d\n", rc);
3952                 rc = -rc; /* change the rc to positive */
3953                 goto fail_error;
3954         }
3955         id.pid = LNET_PID_LUSTRE;
3956         id.nid = *dest_nid;
3957
3958         rc = LNetGet(NULL, *mdh, &id,
3959                      LNET_RESERVED_PORTAL,
3960                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
3961
3962         if (rc)
3963                 goto fail_unlink_md;
3964
3965         return 0;
3966
3967 fail_unlink_md:
3968         LNetMDUnlink(*mdh);
3969         LNetInvalidateMDHandle(mdh);
3970 fail_error:
3971         return rc;
3972 }
3973
3974 static void
3975 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
3976                            int status, bool send, bool unlink_event)
3977 {
3978         struct lnet_nid *nid = &ev_info->mt_nid;
3979
3980         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
3981                 struct lnet_ni *ni;
3982
3983                 lnet_net_lock(0);
3984                 ni = lnet_nid_to_ni_locked(nid, 0);
3985                 if (!ni) {
3986                         lnet_net_unlock(0);
3987                         return;
3988                 }
3989                 lnet_ni_lock(ni);
3990                 if (!send || (send && status != 0))
3991                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3992                 if (status)
3993                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
3994                 lnet_ni_unlock(ni);
3995                 lnet_net_unlock(0);
3996
3997                 if (status != 0) {
3998                         CERROR("local NI (%s) recovery failed with %d\n",
3999                                libcfs_nidstr(nid), status);
4000                         return;
4001                 }
4002                 /*
4003                  * need to increment healthv for the ni here, because in
4004                  * the lnet_finalize() path we don't have access to this
4005                  * NI. And in order to get access to it, we'll need to
4006                  * carry forward too much information.
4007                  * In the peer case, it'll naturally be incremented
4008                  */
4009                 if (!unlink_event)
4010                         lnet_inc_healthv(&ni->ni_healthv,
4011                                          lnet_health_sensitivity);
4012         } else {
4013                 struct lnet_peer_ni *lpni;
4014                 int cpt;
4015
4016                 cpt = lnet_net_lock_current();
4017                 lpni = lnet_peer_ni_find_locked(nid);
4018                 if (!lpni) {
4019                         lnet_net_unlock(cpt);
4020                         return;
4021                 }
4022                 spin_lock(&lpni->lpni_lock);
4023                 if (!send || (send && status != 0))
4024                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
4025                 if (status)
4026                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
4027                 spin_unlock(&lpni->lpni_lock);
4028                 lnet_peer_ni_decref_locked(lpni);
4029                 lnet_net_unlock(cpt);
4030
4031                 if (status != 0)
4032                         CERROR("peer NI (%s) recovery failed with %d\n",
4033                                libcfs_nidstr(nid), status);
4034         }
4035 }
4036
4037 void
4038 lnet_mt_event_handler(struct lnet_event *event)
4039 {
4040         struct lnet_mt_event_info *ev_info = event->md_user_ptr;
4041         struct lnet_ping_buffer *pbuf;
4042
4043         /* TODO: remove assert */
4044         LASSERT(event->type == LNET_EVENT_REPLY ||
4045                 event->type == LNET_EVENT_SEND ||
4046                 event->type == LNET_EVENT_UNLINK);
4047
4048         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
4049                event->status);
4050
4051         switch (event->type) {
4052         case LNET_EVENT_UNLINK:
4053                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
4054                        libcfs_nidstr(&ev_info->mt_nid));
4055                 fallthrough;
4056         case LNET_EVENT_REPLY:
4057                 lnet_handle_recovery_reply(ev_info, event->status, false,
4058                                            event->type == LNET_EVENT_UNLINK);
4059                 break;
4060         case LNET_EVENT_SEND:
4061                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
4062                                libcfs_nidstr(&ev_info->mt_nid),
4063                                (event->status) ? "unsuccessfully" :
4064                                "successfully", event->status);
4065                 lnet_handle_recovery_reply(ev_info, event->status, true, false);
4066                 break;
4067         default:
4068                 CERROR("Unexpected event: %d\n", event->type);
4069                 break;
4070         }
4071         if (event->unlinked) {
4072                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
4073                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
4074                 lnet_ping_buffer_decref(pbuf);
4075         }
4076 }
4077
4078 static int
4079 lnet_rsp_tracker_create(void)
4080 {
4081         struct list_head **rstqs;
4082         rstqs = lnet_create_array_of_queues();
4083
4084         if (!rstqs)
4085                 return -ENOMEM;
4086
4087         the_lnet.ln_mt_rstq = rstqs;
4088
4089         return 0;
4090 }
4091
4092 static void
4093 lnet_rsp_tracker_clean(void)
4094 {
4095         lnet_finalize_expired_responses();
4096
4097         cfs_percpt_free(the_lnet.ln_mt_rstq);
4098         the_lnet.ln_mt_rstq = NULL;
4099 }
4100
4101 int lnet_monitor_thr_start(void)
4102 {
4103         int rc = 0;
4104         struct task_struct *task;
4105
4106         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
4107                 return -EALREADY;
4108
4109         rc = lnet_resendqs_create();
4110         if (rc)
4111                 return rc;
4112
4113         rc = lnet_rsp_tracker_create();
4114         if (rc)
4115                 goto clean_queues;
4116
4117         sema_init(&the_lnet.ln_mt_signal, 0);
4118
4119         lnet_net_lock(LNET_LOCK_EX);
4120         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
4121         lnet_net_unlock(LNET_LOCK_EX);
4122         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
4123         if (IS_ERR(task)) {
4124                 rc = PTR_ERR(task);
4125                 CERROR("Can't start monitor thread: %d\n", rc);
4126                 goto clean_thread;
4127         }
4128
4129         return 0;
4130
4131 clean_thread:
4132         lnet_net_lock(LNET_LOCK_EX);
4133         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4134         lnet_net_unlock(LNET_LOCK_EX);
4135         /* block until event callback signals exit */
4136         down(&the_lnet.ln_mt_signal);
4137         /* clean up */
4138         lnet_net_lock(LNET_LOCK_EX);
4139         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
4140         lnet_net_unlock(LNET_LOCK_EX);
4141         lnet_rsp_tracker_clean();
4142         lnet_clean_local_ni_recoveryq();
4143         lnet_clean_peer_ni_recoveryq();
4144         lnet_clean_resendqs();
4145         the_lnet.ln_mt_handler = NULL;
4146         return rc;
4147 clean_queues:
4148         lnet_rsp_tracker_clean();
4149         lnet_clean_local_ni_recoveryq();
4150         lnet_clean_peer_ni_recoveryq();
4151         lnet_clean_resendqs();
4152         return rc;
4153 }
4154
4155 void lnet_monitor_thr_stop(void)
4156 {
4157         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
4158                 return;
4159
4160         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
4161         lnet_net_lock(LNET_LOCK_EX);
4162         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4163         lnet_net_unlock(LNET_LOCK_EX);
4164
4165         /* tell the monitor thread that we're shutting down */
4166         complete(&the_lnet.ln_mt_wait_complete);
4167
4168         /* block until monitor thread signals that it's done */
4169         mutex_unlock(&the_lnet.ln_api_mutex);
4170         down(&the_lnet.ln_mt_signal);
4171         mutex_lock(&the_lnet.ln_api_mutex);
4172         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
4173
4174         /* perform cleanup tasks */
4175         lnet_rsp_tracker_clean();
4176         lnet_clean_local_ni_recoveryq();
4177         lnet_clean_peer_ni_recoveryq();
4178         lnet_clean_resendqs();
4179 }
4180
4181 void
4182 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
4183                   __u32 msg_type)
4184 {
4185         lnet_net_lock(cpt);
4186         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
4187         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4188         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
4189         lnet_net_unlock(cpt);
4190
4191         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
4192 }
4193
4194 static void
4195 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
4196 {
4197         struct lnet_hdr *hdr = &msg->msg_hdr;
4198
4199         if (msg->msg_wanted != 0)
4200                 lnet_setpayloadbuffer(msg);
4201
4202         lnet_build_msg_event(msg, LNET_EVENT_PUT);
4203
4204         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
4205          * it back into the ACK during lnet_finalize() */
4206         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
4207                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
4208
4209         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
4210                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
4211 }
4212
4213 static int
4214 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
4215 {
4216         struct lnet_hdr         *hdr = &msg->msg_hdr;
4217         struct lnet_match_info  info;
4218         int                     rc;
4219         bool                    ready_delay;
4220
4221         /* Convert put fields to host byte order */
4222         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
4223         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
4224         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
4225
4226         /* Primary peer NID. */
4227         info.mi_id.nid = msg->msg_initiator;
4228         info.mi_id.pid  = hdr->src_pid;
4229         info.mi_opc     = LNET_MD_OP_PUT;
4230         info.mi_portal  = hdr->msg.put.ptl_index;
4231         info.mi_rlength = hdr->payload_length;
4232         info.mi_roffset = hdr->msg.put.offset;
4233         info.mi_mbits   = hdr->msg.put.match_bits;
4234         info.mi_cpt     = lnet_nid2cpt(&msg->msg_initiator, ni);
4235
4236         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
4237         ready_delay = msg->msg_rx_ready_delay;
4238
4239  again:
4240         rc = lnet_ptl_match_md(&info, msg);
4241         switch (rc) {
4242         default:
4243                 LBUG();
4244
4245         case LNET_MATCHMD_OK:
4246                 lnet_recv_put(ni, msg);
4247                 return 0;
4248
4249         case LNET_MATCHMD_NONE:
4250                 if (ready_delay)
4251                         /* no eager_recv or has already called it, should
4252                          * have been attached on delayed list */
4253                         return 0;
4254
4255                 rc = lnet_ni_eager_recv(ni, msg);
4256                 if (rc == 0) {
4257                         ready_delay = true;
4258                         goto again;
4259                 }
4260                 fallthrough;
4261
4262         case LNET_MATCHMD_DROP:
4263                 CNETERR("Dropping PUT from %s portal %d match %llu"
4264                         " offset %d length %d: %d\n",
4265                         libcfs_idstr(&info.mi_id), info.mi_portal,
4266                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
4267
4268                 return -ENOENT; /* -ve: OK but no match */
4269         }
4270 }
4271
4272 static int
4273 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
4274 {
4275         struct lnet_match_info info;
4276         struct lnet_hdr *hdr = &msg->msg_hdr;
4277         struct lnet_processid source_id;
4278         struct lnet_handle_wire reply_wmd;
4279         int rc;
4280
4281         /* Convert get fields to host byte order */
4282         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
4283         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
4284         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
4285         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
4286
4287         source_id.nid = hdr->src_nid;
4288         source_id.pid = hdr->src_pid;
4289         /* Primary peer NID */
4290         info.mi_id.nid  = msg->msg_initiator;
4291         info.mi_id.pid  = hdr->src_pid;
4292         info.mi_opc     = LNET_MD_OP_GET;
4293         info.mi_portal  = hdr->msg.get.ptl_index;
4294         info.mi_rlength = hdr->msg.get.sink_length;
4295         info.mi_roffset = hdr->msg.get.src_offset;
4296         info.mi_mbits   = hdr->msg.get.match_bits;
4297         info.mi_cpt     = lnet_nid2cpt(&msg->msg_initiator, ni);
4298
4299         rc = lnet_ptl_match_md(&info, msg);
4300         if (rc == LNET_MATCHMD_DROP) {
4301                 CNETERR("Dropping GET from %s portal %d match %llu"
4302                         " offset %d length %d\n",
4303                         libcfs_idstr(&info.mi_id), info.mi_portal,
4304                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
4305                 return -ENOENT; /* -ve: OK but no match */
4306         }
4307
4308         LASSERT(rc == LNET_MATCHMD_OK);
4309
4310         lnet_build_msg_event(msg, LNET_EVENT_GET);
4311
4312         reply_wmd = hdr->msg.get.return_wmd;
4313
4314         lnet_prep_send(msg, LNET_MSG_REPLY, &source_id,
4315                        msg->msg_offset, msg->msg_wanted);
4316
4317         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
4318
4319         if (rdma_get) {
4320                 /* The LND completes the REPLY from her recv procedure */
4321                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
4322                              msg->msg_offset, msg->msg_len, msg->msg_len);
4323                 return 0;
4324         }
4325
4326         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
4327         msg->msg_receiving = 0;
4328
4329         rc = lnet_send(&ni->ni_nid, msg, &msg->msg_from);
4330         if (rc < 0) {
4331                 /* didn't get as far as lnet_ni_send() */
4332                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
4333                        libcfs_nidstr(&ni->ni_nid),
4334                        libcfs_idstr(&info.mi_id), rc);
4335
4336                 lnet_finalize(msg, rc);
4337         }
4338
4339         return 0;
4340 }
4341
4342 static int
4343 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
4344 {
4345         void *private = msg->msg_private;
4346         struct lnet_hdr *hdr = &msg->msg_hdr;
4347         struct lnet_processid src = {};
4348         struct lnet_libmd *md;
4349         unsigned int rlength;
4350         unsigned int mlength;
4351         int cpt;
4352
4353         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4354         lnet_res_lock(cpt);
4355
4356         src.nid = hdr->src_nid;
4357         src.pid = hdr->src_pid;
4358
4359         /* NB handles only looked up by creator (no flips) */
4360         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4361         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4362                 CNETERR("%s: Dropping REPLY from %s for %s "
4363                         "MD %#llx.%#llx\n",
4364                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4365                         (md == NULL) ? "invalid" : "inactive",
4366                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4367                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4368                 if (md != NULL && md->md_me != NULL)
4369                         CERROR("REPLY MD also attached to portal %d\n",
4370                                md->md_me->me_portal);
4371
4372                 lnet_res_unlock(cpt);
4373                 return -ENOENT; /* -ve: OK but no match */
4374         }
4375
4376         LASSERT(md->md_offset == 0);
4377
4378         rlength = hdr->payload_length;
4379         mlength = min(rlength, md->md_length);
4380
4381         if (mlength < rlength &&
4382             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4383                 CNETERR("%s: Dropping REPLY from %s length %d "
4384                         "for MD %#llx would overflow (%d)\n",
4385                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4386                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4387                         mlength);
4388                 lnet_res_unlock(cpt);
4389                 return -ENOENT; /* -ve: OK but no match */
4390         }
4391
4392         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4393                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4394                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4395
4396         lnet_msg_attach_md(msg, md, 0, mlength);
4397
4398         if (mlength != 0)
4399                 lnet_setpayloadbuffer(msg);
4400
4401         lnet_res_unlock(cpt);
4402
4403         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4404
4405         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4406         return 0;
4407 }
4408
4409 static int
4410 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4411 {
4412         struct lnet_hdr *hdr = &msg->msg_hdr;
4413         struct lnet_processid src = {};
4414         struct lnet_libmd *md;
4415         int cpt;
4416
4417         src.nid = hdr->src_nid;
4418         src.pid = hdr->src_pid;
4419
4420         /* Convert ack fields to host byte order */
4421         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4422         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4423
4424         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4425         lnet_res_lock(cpt);
4426
4427         /* NB handles only looked up by creator (no flips) */
4428         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4429         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4430                 /* Don't moan; this is expected */
4431                 CDEBUG(D_NET,
4432                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4433                        libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4434                        (md == NULL) ? "invalid" : "inactive",
4435                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4436                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4437                 if (md != NULL && md->md_me != NULL)
4438                         CERROR("Source MD also attached to portal %d\n",
4439                                md->md_me->me_portal);
4440
4441                 lnet_res_unlock(cpt);
4442                 return -ENOENT;                  /* -ve! */
4443         }
4444
4445         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4446                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4447                hdr->msg.ack.dst_wmd.wh_object_cookie);
4448
4449         lnet_msg_attach_md(msg, md, 0, 0);
4450
4451         lnet_res_unlock(cpt);
4452
4453         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4454
4455         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4456         return 0;
4457 }
4458
4459 /**
4460  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4461  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4462  * \retval -ve                  error code
4463  */
4464 int
4465 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4466 {
4467         int     rc = 0;
4468
4469         if (!the_lnet.ln_routing)
4470                 return -ECANCELED;
4471
4472         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4473             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4474                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4475                         msg->msg_rx_ready_delay = 1;
4476                 } else {
4477                         lnet_net_unlock(msg->msg_rx_cpt);
4478                         rc = lnet_ni_eager_recv(ni, msg);
4479                         lnet_net_lock(msg->msg_rx_cpt);
4480                 }
4481         }
4482
4483         if (rc == 0)
4484                 rc = lnet_post_routed_recv_locked(msg, 0);
4485         return rc;
4486 }
4487
4488 int
4489 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4490 {
4491         int     rc;
4492
4493         switch (msg->msg_type) {
4494         case LNET_MSG_ACK:
4495                 rc = lnet_parse_ack(ni, msg);
4496                 break;
4497         case LNET_MSG_PUT:
4498                 rc = lnet_parse_put(ni, msg);
4499                 break;
4500         case LNET_MSG_GET:
4501                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4502                 break;
4503         case LNET_MSG_REPLY:
4504                 rc = lnet_parse_reply(ni, msg);
4505                 break;
4506         default: /* prevent an unused label if !kernel */
4507                 LASSERT(0);
4508                 return -EPROTO;
4509         }
4510
4511         LASSERT(rc == 0 || rc == -ENOENT);
4512         return rc;
4513 }
4514
4515 char *
4516 lnet_msgtyp2str (int type)
4517 {
4518         switch (type) {
4519         case LNET_MSG_ACK:
4520                 return ("ACK");
4521         case LNET_MSG_PUT:
4522                 return ("PUT");
4523         case LNET_MSG_GET:
4524                 return ("GET");
4525         case LNET_MSG_REPLY:
4526                 return ("REPLY");
4527         case LNET_MSG_HELLO:
4528                 return ("HELLO");
4529         default:
4530                 return ("<UNKNOWN>");
4531         }
4532 }
4533 EXPORT_SYMBOL(lnet_msgtyp2str);
4534
4535 int
4536 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr,
4537            struct lnet_nid *from_nid, void *private, int rdma_req)
4538 {
4539         struct lnet_peer_ni *lpni;
4540         struct lnet_msg *msg;
4541         __u32 payload_length;
4542         lnet_pid_t dest_pid;
4543         struct lnet_nid dest_nid;
4544         struct lnet_nid src_nid;
4545         bool push = false;
4546         int for_me;
4547         __u32 type;
4548         int rc = 0;
4549         int cpt;
4550         time64_t now = ktime_get_seconds();
4551
4552         LASSERT (!in_interrupt ());
4553
4554         type = hdr->type;
4555         src_nid = hdr->src_nid;
4556         dest_nid = hdr->dest_nid;
4557         dest_pid = hdr->dest_pid;
4558         payload_length = hdr->payload_length;
4559
4560         for_me = nid_same(&ni->ni_nid, &dest_nid);
4561         cpt = lnet_nid2cpt(from_nid, ni);
4562
4563         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4564                 libcfs_nidstr(&dest_nid),
4565                 libcfs_nidstr(&ni->ni_nid),
4566                 libcfs_nidstr(&src_nid),
4567                 lnet_msgtyp2str(type),
4568                 (for_me) ? "for me" : "routed");
4569
4570         switch (type) {
4571         case LNET_MSG_ACK:
4572         case LNET_MSG_GET:
4573                 if (payload_length > 0) {
4574                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4575                                libcfs_nidstr(from_nid),
4576                                libcfs_nidstr(&src_nid),
4577                                lnet_msgtyp2str(type), payload_length);
4578                         return -EPROTO;
4579                 }
4580                 break;
4581
4582         case LNET_MSG_PUT:
4583         case LNET_MSG_REPLY:
4584                 if (payload_length >
4585                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4586                         CERROR("%s, src %s: bad %s payload %d "
4587                                "(%d max expected)\n",
4588                                libcfs_nidstr(from_nid),
4589                                libcfs_nidstr(&src_nid),
4590                                lnet_msgtyp2str(type),
4591                                payload_length,
4592                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4593                         return -EPROTO;
4594                 }
4595                 break;
4596
4597         default:
4598                 CERROR("%s, src %s: Bad message type 0x%x\n",
4599                        libcfs_nidstr(from_nid),
4600                        libcfs_nidstr(&src_nid), type);
4601                 return -EPROTO;
4602         }
4603
4604         /* Only update net_last_alive for incoming GETs on the reserved portal
4605          * (i.e. incoming lnet/discovery pings).
4606          * This avoids situations where the router's own traffic results in NI
4607          * status changes
4608          */
4609         if (the_lnet.ln_routing && type == LNET_MSG_GET &&
4610             hdr->msg.get.ptl_index == LNET_RESERVED_PORTAL &&
4611             !lnet_islocalnid(&src_nid) &&
4612             ni->ni_net->net_last_alive != now) {
4613                 lnet_ni_lock(ni);
4614                 spin_lock(&ni->ni_net->net_lock);
4615                 ni->ni_net->net_last_alive = now;
4616                 spin_unlock(&ni->ni_net->net_lock);
4617                 push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP);
4618                 lnet_ni_unlock(ni);
4619         }
4620
4621         if (push)
4622                 lnet_push_update_to_peers(1);
4623
4624         /* Regard a bad destination NID as a protocol error.  Senders should
4625          * know what they're doing; if they don't they're misconfigured, buggy
4626          * or malicious so we chop them off at the knees :) */
4627
4628         if (!for_me) {
4629                 if (LNET_NID_NET(&dest_nid) == LNET_NID_NET(&ni->ni_nid)) {
4630                         /* should have gone direct */
4631                         CERROR("%s, src %s: Bad dest nid %s "
4632                                "(should have been sent direct)\n",
4633                                 libcfs_nidstr(from_nid),
4634                                 libcfs_nidstr(&src_nid),
4635                                 libcfs_nidstr(&dest_nid));
4636                         return -EPROTO;
4637                 }
4638
4639                 if (lnet_islocalnid(&dest_nid)) {
4640                         /* dest is another local NI; sender should have used
4641                          * this node's NID on its own network */
4642                         CERROR("%s, src %s: Bad dest nid %s "
4643                                "(it's my nid but on a different network)\n",
4644                                 libcfs_nidstr(from_nid),
4645                                 libcfs_nidstr(&src_nid),
4646                                 libcfs_nidstr(&dest_nid));
4647                         return -EPROTO;
4648                 }
4649
4650                 if (rdma_req && type == LNET_MSG_GET) {
4651                         CERROR("%s, src %s: Bad optimized GET for %s "
4652                                "(final destination must be me)\n",
4653                                 libcfs_nidstr(from_nid),
4654                                 libcfs_nidstr(&src_nid),
4655                                 libcfs_nidstr(&dest_nid));
4656                         return -EPROTO;
4657                 }
4658
4659                 if (!the_lnet.ln_routing) {
4660                         CERROR("%s, src %s: Dropping message for %s "
4661                                "(routing not enabled)\n",
4662                                 libcfs_nidstr(from_nid),
4663                                 libcfs_nidstr(&src_nid),
4664                                 libcfs_nidstr(&dest_nid));
4665                         goto drop;
4666                 }
4667         }
4668
4669         /* Message looks OK; we're not going to return an error, so we MUST
4670          * call back lnd_recv() come what may... */
4671
4672         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4673             fail_peer(&src_nid, 0)) {                   /* shall we now? */
4674                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4675                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4676                        lnet_msgtyp2str(type));
4677                 goto drop;
4678         }
4679
4680         if (!list_empty(&the_lnet.ln_drop_rules) &&
4681             lnet_drop_rule_match(hdr, &ni->ni_nid, NULL)) {
4682                 CDEBUG(D_NET,
4683                        "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
4684                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4685                        libcfs_nidstr(&dest_nid), lnet_msgtyp2str(type));
4686                 goto drop;
4687         }
4688
4689         msg = lnet_msg_alloc();
4690         if (msg == NULL) {
4691                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4692                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4693                        lnet_msgtyp2str(type));
4694                 goto drop;
4695         }
4696
4697         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4698          * pointers NULL etc */
4699
4700         msg->msg_type = type;
4701         msg->msg_private = private;
4702         msg->msg_receiving = 1;
4703         msg->msg_rdma_get = rdma_req;
4704         msg->msg_len = msg->msg_wanted = payload_length;
4705         msg->msg_offset = 0;
4706         msg->msg_hdr = *hdr;
4707         /* for building message event */
4708         msg->msg_from = *from_nid;
4709         if (!for_me) {
4710                 msg->msg_target.pid = dest_pid;
4711                 msg->msg_target.nid = dest_nid;
4712                 msg->msg_routing = 1;
4713         }
4714
4715         lnet_net_lock(cpt);
4716         lpni = lnet_peerni_by_nid_locked(from_nid, &ni->ni_nid, cpt);
4717         if (IS_ERR(lpni)) {
4718                 lnet_net_unlock(cpt);
4719                 rc = PTR_ERR(lpni);
4720                 CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
4721                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4722                        lnet_msgtyp2str(type), rc);
4723                 lnet_msg_free(msg);
4724                 if (rc == -ESHUTDOWN)
4725                         /* We are shutting down.  Don't do anything more */
4726                         return rc;
4727                 goto drop;
4728         }
4729
4730         /* If this message was forwarded to us from a router then we may need
4731          * to update router aliveness or check for an asymmetrical route
4732          * (or both)
4733          */
4734         if (((lnet_drop_asym_route && for_me) ||
4735              !lpni->lpni_peer_net->lpn_peer->lp_alive) &&
4736             LNET_NID_NET(&src_nid) != LNET_NID_NET(from_nid)) {
4737                 __u32 src_net_id = LNET_NID_NET(&src_nid);
4738                 struct lnet_peer *gw = lpni->lpni_peer_net->lpn_peer;
4739                 struct lnet_route *route;
4740                 bool found = false;
4741
4742                 list_for_each_entry(route, &gw->lp_routes, lr_gwlist) {
4743                         if (route->lr_net == src_net_id) {
4744                                 found = true;
4745                                 /* If we're transitioning the gateway from
4746                                  * dead -> alive, and discovery is disabled
4747                                  * locally or on the gateway, then we need to
4748                                  * update the cached route aliveness for each
4749                                  * route to the src_nid's net.
4750                                  *
4751                                  * Otherwise, we're only checking for
4752                                  * symmetrical route, and we can break the
4753                                  * loop
4754                                  */
4755                                 if (!gw->lp_alive &&
4756                                     lnet_is_discovery_disabled(gw))
4757                                         lnet_set_route_aliveness(route, true);
4758                                 else
4759                                         break;
4760                         }
4761                 }
4762                 if (lnet_drop_asym_route && for_me && !found) {
4763                         /* Drop ref taken by lnet_nid2peerni_locked() */
4764                         lnet_peer_ni_decref_locked(lpni);
4765                         lnet_net_unlock(cpt);
4766                         /* we would not use from_nid to route a message to
4767                          * src_nid
4768                          * => asymmetric routing detected but forbidden
4769                          */
4770                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4771                                libcfs_nidstr(from_nid),
4772                                libcfs_nidstr(&src_nid), lnet_msgtyp2str(type));
4773                         lnet_msg_free(msg);
4774                         goto drop;
4775                 }
4776                 if (!gw->lp_alive) {
4777                         struct lnet_peer_net *lpn;
4778                         struct lnet_peer_ni *lpni2;
4779
4780                         gw->lp_alive = true;
4781                         /* Mark all remote NIs on src_nid's net UP */
4782                         lpn = lnet_peer_get_net_locked(gw, src_net_id);
4783                         if (lpn)
4784                                 list_for_each_entry(lpni2, &lpn->lpn_peer_nis,
4785                                                     lpni_peer_nis)
4786                                         lpni2->lpni_ns_status = LNET_NI_STATUS_UP;
4787                 }
4788         }
4789
4790         lpni->lpni_last_alive = now;
4791
4792         msg->msg_rxpeer = lpni;
4793         msg->msg_rxni = ni;
4794         lnet_ni_addref_locked(ni, cpt);
4795         /* Multi-Rail: Primary NID of source. */
4796         lnet_peer_primary_nid_locked(&src_nid, &msg->msg_initiator);
4797
4798         /*
4799          * mark the status of this lpni as UP since we received a message
4800          * from it. The ping response reports back the ns_status which is
4801          * marked on the remote as up or down and we cache it here.
4802          */
4803         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
4804
4805         lnet_msg_commit(msg, cpt);
4806
4807         /* message delay simulation */
4808         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
4809                      lnet_delay_rule_match_locked(hdr, msg))) {
4810                 lnet_net_unlock(cpt);
4811                 return 0;
4812         }
4813
4814         if (!for_me) {
4815                 rc = lnet_parse_forward_locked(ni, msg);
4816                 lnet_net_unlock(cpt);
4817
4818                 if (rc < 0)
4819                         goto free_drop;
4820
4821                 if (rc == LNET_CREDIT_OK) {
4822                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
4823                                      0, payload_length, payload_length);
4824                 }
4825                 return 0;
4826         }
4827
4828         lnet_net_unlock(cpt);
4829
4830         rc = lnet_parse_local(ni, msg);
4831         if (rc != 0)
4832                 goto free_drop;
4833         return 0;
4834
4835  free_drop:
4836         LASSERT(msg->msg_md == NULL);
4837         lnet_finalize(msg, rc);
4838
4839  drop:
4840         lnet_drop_message(ni, cpt, private, payload_length, type);
4841         return 0;
4842 }
4843 EXPORT_SYMBOL(lnet_parse);
4844
4845 void
4846 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
4847 {
4848         struct lnet_msg *msg;
4849
4850         while ((msg = list_first_entry_or_null(head, struct lnet_msg,
4851                                                msg_list)) != NULL) {
4852                 struct lnet_processid id = {};
4853
4854                 list_del(&msg->msg_list);
4855
4856                 id.nid = msg->msg_hdr.src_nid;
4857                 id.pid = msg->msg_hdr.src_pid;
4858
4859                 LASSERT(msg->msg_md == NULL);
4860                 LASSERT(msg->msg_rx_delayed);
4861                 LASSERT(msg->msg_rxpeer != NULL);
4862                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4863
4864                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
4865                       " offset %d length %d: %s\n",
4866                       libcfs_idstr(&id),
4867                       msg->msg_hdr.msg.put.ptl_index,
4868                       msg->msg_hdr.msg.put.match_bits,
4869                       msg->msg_hdr.msg.put.offset,
4870                       msg->msg_hdr.payload_length, reason);
4871
4872                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
4873                  * called lnet_drop_message(), so I just hang onto msg as well
4874                  * until that's done */
4875
4876                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
4877                                   msg->msg_private, msg->msg_len,
4878                                   msg->msg_type);
4879
4880                 msg->msg_no_resend = true;
4881                 /*
4882                  * NB: message will not generate event because w/o attached MD,
4883                  * but we still should give error code so lnet_msg_decommit()
4884                  * can skip counters operations and other checks.
4885                  */
4886                 lnet_finalize(msg, -ENOENT);
4887         }
4888 }
4889
4890 void
4891 lnet_recv_delayed_msg_list(struct list_head *head)
4892 {
4893         struct lnet_msg *msg;
4894
4895         while ((msg = list_first_entry_or_null(head, struct lnet_msg,
4896                                                msg_list)) != NULL) {
4897                 struct lnet_processid id;
4898
4899                 list_del(&msg->msg_list);
4900
4901                 /* md won't disappear under me, since each msg
4902                  * holds a ref on it */
4903
4904                 id.nid = msg->msg_hdr.src_nid;
4905                 id.pid = msg->msg_hdr.src_pid;
4906
4907                 LASSERT(msg->msg_rx_delayed);
4908                 LASSERT(msg->msg_md != NULL);
4909                 LASSERT(msg->msg_rxpeer != NULL);
4910                 LASSERT(msg->msg_rxni != NULL);
4911                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4912
4913                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
4914                        "match %llu offset %d length %d.\n",
4915                         libcfs_idstr(&id), msg->msg_hdr.msg.put.ptl_index,
4916                         msg->msg_hdr.msg.put.match_bits,
4917                         msg->msg_hdr.msg.put.offset,
4918                         msg->msg_hdr.payload_length);
4919
4920                 lnet_recv_put(msg->msg_rxni, msg);
4921         }
4922 }
4923
4924 static void
4925 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
4926                         struct lnet_libmd *md, struct lnet_handle_md mdh)
4927 {
4928         s64 timeout_ns;
4929         struct lnet_rsp_tracker *local_rspt;
4930
4931         /*
4932          * MD has a refcount taken by message so it's not going away.
4933          * The MD however can be looked up. We need to secure the access
4934          * to the md_rspt_ptr by taking the res_lock.
4935          * The rspt can be accessed without protection up to when it gets
4936          * added to the list.
4937          */
4938
4939         lnet_res_lock(cpt);
4940         local_rspt = md->md_rspt_ptr;
4941         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
4942         if (local_rspt != NULL) {
4943                 /*
4944                  * we already have an rspt attached to the md, so we'll
4945                  * update the deadline on that one.
4946                  */
4947                 lnet_rspt_free(rspt, cpt);
4948         } else {
4949                 /* new md */
4950                 rspt->rspt_mdh = mdh;
4951                 rspt->rspt_cpt = cpt;
4952                 /* store the rspt so we can access it when we get the REPLY */
4953                 md->md_rspt_ptr = rspt;
4954                 local_rspt = rspt;
4955         }
4956         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
4957
4958         /*
4959          * add to the list of tracked responses. It's added to tail of the
4960          * list in order to expire all the older entries first.
4961          */
4962         lnet_net_lock(cpt);
4963         list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
4964         lnet_net_unlock(cpt);
4965         lnet_res_unlock(cpt);
4966 }
4967
4968 /**
4969  * Initiate an asynchronous PUT operation.
4970  *
4971  * There are several events associated with a PUT: completion of the send on
4972  * the initiator node (LNET_EVENT_SEND), and when the send completes
4973  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
4974  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
4975  * used at the target node to indicate the completion of incoming data
4976  * delivery.
4977  *
4978  * The local events will be logged in the EQ associated with the MD pointed to
4979  * by \a mdh handle. Using a MD without an associated EQ results in these
4980  * events being discarded. In this case, the caller must have another
4981  * mechanism (e.g., a higher level protocol) for determining when it is safe
4982  * to modify the memory region associated with the MD.
4983  *
4984  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
4985  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
4986  *
4987  * \param self Indicates the NID of a local interface through which to send
4988  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
4989  * \param mdh A handle for the MD that describes the memory to be sent. The MD
4990  * must be "free floating" (See LNetMDBind()).
4991  * \param ack Controls whether an acknowledgment is requested.
4992  * Acknowledgments are only sent when they are requested by the initiating
4993  * process and the target MD enables them.
4994  * \param target A process identifier for the target process.
4995  * \param portal The index in the \a target's portal table.
4996  * \param match_bits The match bits to use for MD selection at the target
4997  * process.
4998  * \param offset The offset into the target MD (only used when the target
4999  * MD has the LNET_MD_MANAGE_REMOTE option set).
5000  * \param hdr_data 64 bits of user data that can be included in the message
5001  * header. This data is written to an event queue entry at the target if an
5002  * EQ is present on the matching MD.
5003  *
5004  * \retval  0      Success, and only in this case events will be generated
5005  * and logged to EQ (if it exists).
5006  * \retval -EIO    Simulated failure.
5007  * \retval -ENOMEM Memory allocation failure.
5008  * \retval -ENOENT Invalid MD object.
5009  *
5010  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
5011  */
5012 int
5013 LNetPut(struct lnet_nid *self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
5014         struct lnet_processid *target, unsigned int portal,
5015         __u64 match_bits, unsigned int offset,
5016         __u64 hdr_data)
5017 {
5018         struct lnet_msg *msg;
5019         struct lnet_libmd *md;
5020         int cpt;
5021         int rc;
5022         struct lnet_rsp_tracker *rspt = NULL;
5023
5024         LASSERT(the_lnet.ln_refcount > 0);
5025
5026         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5027             fail_peer(&target->nid, 1)) {               /* shall we now? */
5028                 CERROR("Dropping PUT to %s: simulated failure\n",
5029                        libcfs_idstr(target));
5030                 return -EIO;
5031         }
5032
5033         msg = lnet_msg_alloc();
5034         if (msg == NULL) {
5035                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
5036                        libcfs_idstr(target));
5037                 return -ENOMEM;
5038         }
5039         msg->msg_vmflush = !!(current->flags & PF_MEMALLOC);
5040
5041         cpt = lnet_cpt_of_cookie(mdh.cookie);
5042
5043         if (ack == LNET_ACK_REQ) {
5044                 rspt = lnet_rspt_alloc(cpt);
5045                 if (!rspt) {
5046                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
5047                                 libcfs_idstr(target));
5048                         return -ENOMEM;
5049                 }
5050                 INIT_LIST_HEAD(&rspt->rspt_on_list);
5051         }
5052
5053         lnet_res_lock(cpt);
5054
5055         md = lnet_handle2md(&mdh);
5056         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5057                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
5058                        match_bits, portal, libcfs_idstr(target),
5059                        md == NULL ? -1 : md->md_threshold);
5060                 if (md != NULL && md->md_me != NULL)
5061                         CERROR("Source MD also attached to portal %d\n",
5062                                md->md_me->me_portal);
5063                 lnet_res_unlock(cpt);
5064
5065                 if (rspt)
5066                         lnet_rspt_free(rspt, cpt);
5067
5068                 lnet_msg_free(msg);
5069                 return -ENOENT;
5070         }
5071
5072         CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_idstr(target));
5073
5074         lnet_msg_attach_md(msg, md, 0, 0);
5075
5076         lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
5077
5078         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
5079         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
5080         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
5081         msg->msg_hdr.msg.put.hdr_data = hdr_data;
5082
5083         /* NB handles only looked up by creator (no flips) */
5084         if (ack == LNET_ACK_REQ) {
5085                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5086                         the_lnet.ln_interface_cookie;
5087                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5088                         md->md_lh.lh_cookie;
5089         } else {
5090                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5091                         LNET_WIRE_HANDLE_COOKIE_NONE;
5092                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5093                         LNET_WIRE_HANDLE_COOKIE_NONE;
5094         }
5095
5096         lnet_res_unlock(cpt);
5097
5098         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5099
5100         if (rspt && lnet_response_tracking_enabled(LNET_MSG_PUT,
5101                                                    md->md_options))
5102                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5103         else if (rspt)
5104                 lnet_rspt_free(rspt, cpt);
5105
5106         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
5107                                  CFS_FAIL_ONCE))
5108                 rc = -EIO;
5109         else
5110                 rc = lnet_send(self, msg, NULL);
5111
5112         if (rc != 0) {
5113                 CNETERR("Error sending PUT to %s: %d\n",
5114                         libcfs_idstr(target), rc);
5115                 msg->msg_no_resend = true;
5116                 lnet_finalize(msg, rc);
5117         }
5118
5119         /* completion will be signalled by an event */
5120         return 0;
5121 }
5122 EXPORT_SYMBOL(LNetPut);
5123
5124 /*
5125  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
5126  * returns a msg for the LND to pass to lnet_finalize() when the sink
5127  * data has been received.
5128  *
5129  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
5130  * lnet_finalize() is called on it, so the LND must call this first
5131  */
5132 struct lnet_msg *
5133 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
5134 {
5135         struct lnet_msg *msg = lnet_msg_alloc();
5136         struct lnet_libmd *getmd = getmsg->msg_md;
5137         struct lnet_processid *peer_id = &getmsg->msg_target;
5138         int cpt;
5139
5140         LASSERT(!getmsg->msg_target_is_router);
5141         LASSERT(!getmsg->msg_routing);
5142
5143         if (msg == NULL) {
5144                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
5145                        libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id));
5146                 goto drop;
5147         }
5148
5149         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
5150         lnet_res_lock(cpt);
5151
5152         LASSERT(getmd->md_refcount > 0);
5153
5154         if (getmd->md_threshold == 0) {
5155                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
5156                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id),
5157                         getmd);
5158                 lnet_res_unlock(cpt);
5159                 goto drop;
5160         }
5161
5162         LASSERT(getmd->md_offset == 0);
5163
5164         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
5165                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id), getmd);
5166
5167         /* setup information for lnet_build_msg_event */
5168         msg->msg_initiator =
5169                 getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
5170         msg->msg_from = peer_id->nid;
5171         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
5172         msg->msg_hdr.src_nid = peer_id->nid;
5173         msg->msg_hdr.payload_length = getmd->md_length;
5174         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
5175
5176         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
5177         lnet_res_unlock(cpt);
5178
5179         cpt = lnet_nid2cpt(&peer_id->nid, ni);
5180
5181         lnet_net_lock(cpt);
5182         lnet_msg_commit(msg, cpt);
5183         lnet_net_unlock(cpt);
5184
5185         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
5186
5187         return msg;
5188
5189  drop:
5190         cpt = lnet_nid2cpt(&peer_id->nid, ni);
5191
5192         lnet_net_lock(cpt);
5193         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
5194         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
5195         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
5196                 getmd->md_length;
5197         lnet_net_unlock(cpt);
5198
5199         if (msg != NULL)
5200                 lnet_msg_free(msg);
5201
5202         return NULL;
5203 }
5204 EXPORT_SYMBOL(lnet_create_reply_msg);
5205
5206 void
5207 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
5208                        unsigned int len)
5209 {
5210         /* Set the REPLY length, now the RDMA that elides the REPLY message has
5211          * completed and I know it. */
5212         LASSERT(reply != NULL);
5213         LASSERT(reply->msg_type == LNET_MSG_GET);
5214         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
5215
5216         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
5217          * the end of my buffer, I might as well be dead. */
5218         LASSERT(len <= reply->msg_ev.mlength);
5219
5220         reply->msg_ev.mlength = len;
5221 }
5222 EXPORT_SYMBOL(lnet_set_reply_msg_len);
5223
5224 /**
5225  * Initiate an asynchronous GET operation.
5226  *
5227  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
5228  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
5229  * the target node in the REPLY has been written to local MD.
5230  *
5231  * On the target node, an LNET_EVENT_GET is logged when the GET request
5232  * arrives and is accepted into a MD.
5233  *
5234  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
5235  * \param mdh A handle for the MD that describes the memory into which the
5236  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
5237  *
5238  * \retval  0      Success, and only in this case events will be generated
5239  * and logged to EQ (if it exists) of the MD.
5240  * \retval -EIO    Simulated failure.
5241  * \retval -ENOMEM Memory allocation failure.
5242  * \retval -ENOENT Invalid MD object.
5243  */
5244 int
5245 LNetGet(struct lnet_nid *self, struct lnet_handle_md mdh,
5246         struct lnet_processid *target, unsigned int portal,
5247         __u64 match_bits, unsigned int offset, bool recovery)
5248 {
5249         struct lnet_msg *msg;
5250         struct lnet_libmd *md;
5251         struct lnet_rsp_tracker *rspt;
5252         int cpt;
5253         int rc;
5254
5255         LASSERT(the_lnet.ln_refcount > 0);
5256
5257         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5258             fail_peer(&target->nid, 1))         /* shall we now? */
5259         {
5260                 CERROR("Dropping GET to %s: simulated failure\n",
5261                        libcfs_idstr(target));
5262                 return -EIO;
5263         }
5264
5265         msg = lnet_msg_alloc();
5266         if (!msg) {
5267                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
5268                        libcfs_idstr(target));
5269                 return -ENOMEM;
5270         }
5271
5272         cpt = lnet_cpt_of_cookie(mdh.cookie);
5273
5274         rspt = lnet_rspt_alloc(cpt);
5275         if (!rspt) {
5276                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
5277                        libcfs_idstr(target));
5278                 return -ENOMEM;
5279         }
5280         INIT_LIST_HEAD(&rspt->rspt_on_list);
5281
5282         msg->msg_recovery = recovery;
5283
5284         lnet_res_lock(cpt);
5285
5286         md = lnet_handle2md(&mdh);
5287         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5288                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
5289                        match_bits, portal, libcfs_idstr(target),
5290                        md == NULL ? -1 : md->md_threshold);
5291                 if (md != NULL && md->md_me != NULL)
5292                         CERROR("REPLY MD also attached to portal %d\n",
5293                                md->md_me->me_portal);
5294
5295                 lnet_res_unlock(cpt);
5296
5297                 lnet_msg_free(msg);
5298                 lnet_rspt_free(rspt, cpt);
5299                 return -ENOENT;
5300         }
5301
5302         CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_idstr(target));
5303
5304         lnet_msg_attach_md(msg, md, 0, 0);
5305
5306         lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
5307
5308         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5309         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5310         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5311         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5312
5313         /* NB handles only looked up by creator (no flips) */
5314         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5315                 the_lnet.ln_interface_cookie;
5316         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5317                 md->md_lh.lh_cookie;
5318
5319         lnet_res_unlock(cpt);
5320
5321         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5322
5323         if (lnet_response_tracking_enabled(LNET_MSG_GET, md->md_options))
5324                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5325         else
5326                 lnet_rspt_free(rspt, cpt);
5327
5328         rc = lnet_send(self, msg, NULL);
5329         if (rc < 0) {
5330                 CNETERR("Error sending GET to %s: %d\n",
5331                         libcfs_idstr(target), rc);
5332                 msg->msg_no_resend = true;
5333                 lnet_finalize(msg, rc);
5334         }
5335
5336         /* completion will be signalled by an event */
5337         return 0;
5338 }
5339 EXPORT_SYMBOL(LNetGet);
5340
5341 /**
5342  * Calculate distance to node at \a dstnid.
5343  *
5344  * \param dstnid Target NID.
5345  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5346  * is saved here.
5347  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5348  * here.
5349  *
5350  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5351  * local_nid_dist_zero is set, which is the default.
5352  * \retval positives Distance to target NID, i.e. number of hops plus one.
5353  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5354  */
5355 int
5356 LNetDist(struct lnet_nid *dstnid, struct lnet_nid *srcnid, __u32 *orderp)
5357 {
5358         struct lnet_ni *ni = NULL;
5359         struct lnet_remotenet *rnet;
5360         __u32 dstnet = LNET_NID_NET(dstnid);
5361         int hops;
5362         int cpt;
5363         __u32 order = 2;
5364         struct list_head *rn_list;
5365         struct lnet_ni *matched_dstnet = NULL;
5366
5367         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5368          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5369          * keep order 0 free for 0@lo and order 1 free for a local NID
5370          * match
5371          * WARNING: dstnid and srcnid might point to same place.
5372          * Don't set *srcnid until late.
5373          */
5374
5375         LASSERT(the_lnet.ln_refcount > 0);
5376
5377         cpt = lnet_net_lock_current();
5378
5379         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5380                 if (nid_same(&ni->ni_nid, dstnid)) {
5381                         if (orderp != NULL) {
5382                                 if (nid_is_lo0(dstnid))
5383                                         *orderp = 0;
5384                                 else
5385                                         *orderp = 1;
5386                         }
5387                         if (srcnid)
5388                                 *srcnid = *dstnid;
5389                         lnet_net_unlock(cpt);
5390
5391                         return local_nid_dist_zero ? 0 : 1;
5392                 }
5393
5394                 if (!matched_dstnet && LNET_NID_NET(&ni->ni_nid) == dstnet) {
5395                         matched_dstnet = ni;
5396                         /* We matched the destination net, but we may have
5397                          * additional local NIs to inspect.
5398                          *
5399                          * We record the order as appropriate, but
5400                          * they may be overwritten if we match local NI above.
5401                          */
5402
5403                         if (orderp) {
5404                                 /* Check if ni was originally created in
5405                                  * current net namespace.
5406                                  * If not, assign order above 0xffff0000,
5407                                  * to make this ni not a priority.
5408                                  */
5409                                 if (current->nsproxy &&
5410                                     !net_eq(ni->ni_net_ns,
5411                                             current->nsproxy->net_ns))
5412                                         *orderp = order + 0xffff0000;
5413                                 else
5414                                         *orderp = order;
5415                         }
5416                 }
5417
5418                 order++;
5419         }
5420
5421         if (matched_dstnet) {
5422                 if (srcnid)
5423                         *srcnid = matched_dstnet->ni_nid;
5424                 lnet_net_unlock(cpt);
5425                 return 1;
5426         }
5427
5428         rn_list = lnet_net2rnethash(dstnet);
5429         list_for_each_entry(rnet, rn_list, lrn_list) {
5430                 if (rnet->lrn_net == dstnet) {
5431                         struct lnet_route *route;
5432                         struct lnet_route *shortest = NULL;
5433                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5434                         __u32 route_hops;
5435
5436                         LASSERT(!list_empty(&rnet->lrn_routes));
5437
5438                         list_for_each_entry(route, &rnet->lrn_routes,
5439                                             lr_list) {
5440                                 route_hops = route->lr_hops;
5441                                 if (route_hops == LNET_UNDEFINED_HOPS)
5442                                         route_hops = 1;
5443                                 if (shortest == NULL ||
5444                                     route_hops < shortest_hops) {
5445                                         shortest = route;
5446                                         shortest_hops = route_hops;
5447                                 }
5448                         }
5449
5450                         LASSERT(shortest != NULL);
5451                         hops = shortest_hops;
5452                         if (srcnid) {
5453                                 struct lnet_net *net;
5454                                 net = lnet_get_net_locked(shortest->lr_lnet);
5455                                 LASSERT(net);
5456                                 ni = lnet_get_next_ni_locked(net, NULL);
5457                                 *srcnid = ni->ni_nid;
5458                         }
5459                         if (orderp != NULL)
5460                                 *orderp = order;
5461                         lnet_net_unlock(cpt);
5462                         return hops + 1;
5463                 }
5464                 order++;
5465         }
5466
5467         lnet_net_unlock(cpt);
5468         return -EHOSTUNREACH;
5469 }
5470 EXPORT_SYMBOL(LNetDist);