Whamcloud - gitweb
LU-16517 build: pass extra configure options to "make debs"
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/lib-move.c
32  *
33  * Data movement routines
34  */
35
36 #define DEBUG_SUBSYSTEM S_LNET
37
38 #include <linux/pagemap.h>
39
40 #include <lnet/lib-lnet.h>
41 #include <linux/nsproxy.h>
42 #include <lnet/lnet_rdma.h>
43 #include <net/net_namespace.h>
44
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48
49 struct lnet_send_data {
50         struct lnet_ni *sd_best_ni;
51         struct lnet_peer_ni *sd_best_lpni;
52         struct lnet_peer_ni *sd_final_dst_lpni;
53         struct lnet_peer *sd_peer;
54         struct lnet_peer *sd_gw_peer;
55         struct lnet_peer_ni *sd_gw_lpni;
56         struct lnet_peer_net *sd_peer_net;
57         struct lnet_msg *sd_msg;
58         struct lnet_nid sd_dst_nid;
59         struct lnet_nid sd_src_nid;
60         struct lnet_nid sd_rtr_nid;
61         int sd_cpt;
62         int sd_md_cpt;
63         __u32 sd_send_case;
64 };
65
66 static inline bool
67 lnet_msg_is_response(struct lnet_msg *msg)
68 {
69         return msg->msg_type == LNET_MSG_ACK || msg->msg_type == LNET_MSG_REPLY;
70 }
71
72 static inline bool
73 lnet_response_tracking_enabled(__u32 msg_type, unsigned int md_options)
74 {
75         if (md_options & LNET_MD_NO_TRACK_RESPONSE)
76                 /* Explicitly disabled in MD options */
77                 return false;
78
79         if (md_options & LNET_MD_TRACK_RESPONSE)
80                 /* Explicity enabled in MD options */
81                 return true;
82
83         if (lnet_response_tracking == 3)
84                 /* Enabled for all message types */
85                 return true;
86
87         if (msg_type == LNET_MSG_PUT)
88                 return lnet_response_tracking == 2;
89
90         if (msg_type == LNET_MSG_GET)
91                 return lnet_response_tracking == 1;
92
93         return false;
94 }
95
96 static inline struct lnet_comm_count *
97 get_stats_counts(struct lnet_element_stats *stats,
98                  enum lnet_stats_type stats_type)
99 {
100         switch (stats_type) {
101         case LNET_STATS_TYPE_SEND:
102                 return &stats->el_send_stats;
103         case LNET_STATS_TYPE_RECV:
104                 return &stats->el_recv_stats;
105         case LNET_STATS_TYPE_DROP:
106                 return &stats->el_drop_stats;
107         default:
108                 CERROR("Unknown stats type\n");
109         }
110
111         return NULL;
112 }
113
114 void lnet_incr_stats(struct lnet_element_stats *stats,
115                      enum lnet_msg_type msg_type,
116                      enum lnet_stats_type stats_type)
117 {
118         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
119         if (!counts)
120                 return;
121
122         switch (msg_type) {
123         case LNET_MSG_ACK:
124                 atomic_inc(&counts->co_ack_count);
125                 break;
126         case LNET_MSG_PUT:
127                 atomic_inc(&counts->co_put_count);
128                 break;
129         case LNET_MSG_GET:
130                 atomic_inc(&counts->co_get_count);
131                 break;
132         case LNET_MSG_REPLY:
133                 atomic_inc(&counts->co_reply_count);
134                 break;
135         case LNET_MSG_HELLO:
136                 atomic_inc(&counts->co_hello_count);
137                 break;
138         default:
139                 CERROR("There is a BUG in the code. Unknown message type\n");
140                 break;
141         }
142 }
143
144 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
145                      enum lnet_stats_type stats_type)
146 {
147         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
148         if (!counts)
149                 return 0;
150
151         return (atomic_read(&counts->co_ack_count) +
152                 atomic_read(&counts->co_put_count) +
153                 atomic_read(&counts->co_get_count) +
154                 atomic_read(&counts->co_reply_count) +
155                 atomic_read(&counts->co_hello_count));
156 }
157
158 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
159                                 struct lnet_comm_count *counts)
160 {
161         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
162         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
163         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
164         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
165         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
166 }
167
168 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
169                               struct lnet_element_stats *stats)
170 {
171         struct lnet_comm_count *counts;
172
173         LASSERT(msg_stats);
174         LASSERT(stats);
175
176         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
177         if (!counts)
178                 return;
179         assign_stats(&msg_stats->im_send_stats, counts);
180
181         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
182         if (!counts)
183                 return;
184         assign_stats(&msg_stats->im_recv_stats, counts);
185
186         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
187         if (!counts)
188                 return;
189         assign_stats(&msg_stats->im_drop_stats, counts);
190 }
191
192 int
193 lnet_fail_nid(lnet_nid_t nid4, unsigned int threshold)
194 {
195         struct lnet_test_peer *tp;
196         struct list_head *el;
197         struct list_head *next;
198         struct lnet_nid nid;
199         LIST_HEAD(cull);
200
201         lnet_nid4_to_nid(nid4, &nid);
202         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
203         if (threshold != 0) {
204                 /* Adding a new entry */
205                 LIBCFS_ALLOC(tp, sizeof(*tp));
206                 if (tp == NULL)
207                         return -ENOMEM;
208
209                 tp->tp_nid = nid;
210                 tp->tp_threshold = threshold;
211
212                 lnet_net_lock(0);
213                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
214                 lnet_net_unlock(0);
215                 return 0;
216         }
217
218         lnet_net_lock(0);
219
220         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
221                 tp = list_entry(el, struct lnet_test_peer, tp_list);
222
223                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
224                     LNET_NID_IS_ANY(&nid) ||    /* removing all entries */
225                     nid_same(&tp->tp_nid, &nid)) {      /* matched this one */
226                         list_move(&tp->tp_list, &cull);
227                 }
228         }
229
230         lnet_net_unlock(0);
231
232         while (!list_empty(&cull)) {
233                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
234
235                 list_del(&tp->tp_list);
236                 LIBCFS_FREE(tp, sizeof(*tp));
237         }
238         return 0;
239 }
240
241 static int
242 fail_peer(struct lnet_nid *nid, int outgoing)
243 {
244         struct lnet_test_peer *tp;
245         struct list_head *el;
246         struct list_head *next;
247         LIST_HEAD(cull);
248         int fail = 0;
249
250         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
251         lnet_net_lock(0);
252
253         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
254                 tp = list_entry(el, struct lnet_test_peer, tp_list);
255
256                 if (tp->tp_threshold == 0) {
257                         /* zombie entry */
258                         if (outgoing) {
259                                 /* only cull zombies on outgoing tests,
260                                  * since we may be at interrupt priority on
261                                  * incoming messages. */
262                                 list_move(&tp->tp_list, &cull);
263                         }
264                         continue;
265                 }
266
267                 if (LNET_NID_IS_ANY(&tp->tp_nid) ||     /* fail every peer */
268                     nid_same(nid, &tp->tp_nid)) {       /* fail this peer */
269                         fail = 1;
270
271                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
272                                 tp->tp_threshold--;
273                                 if (outgoing &&
274                                     tp->tp_threshold == 0) {
275                                         /* see above */
276                                         list_move(&tp->tp_list, &cull);
277                                 }
278                         }
279                         break;
280                 }
281         }
282
283         lnet_net_unlock(0);
284
285         while (!list_empty(&cull)) {
286                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
287                 list_del(&tp->tp_list);
288
289                 LIBCFS_FREE(tp, sizeof(*tp));
290         }
291
292         return fail;
293 }
294
295 unsigned int
296 lnet_iov_nob(unsigned int niov, struct kvec *iov)
297 {
298         unsigned int nob = 0;
299
300         LASSERT(niov == 0 || iov != NULL);
301         while (niov-- > 0)
302                 nob += (iov++)->iov_len;
303
304         return (nob);
305 }
306 EXPORT_SYMBOL(lnet_iov_nob);
307
308 void
309 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
310                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
311                   unsigned int nob)
312 {
313         /* NB diov, siov are READ-ONLY */
314         unsigned int this_nob;
315
316         if (nob == 0)
317                 return;
318
319         /* skip complete frags before 'doffset' */
320         LASSERT(ndiov > 0);
321         while (doffset >= diov->iov_len) {
322                 doffset -= diov->iov_len;
323                 diov++;
324                 ndiov--;
325                 LASSERT(ndiov > 0);
326         }
327
328         /* skip complete frags before 'soffset' */
329         LASSERT(nsiov > 0);
330         while (soffset >= siov->iov_len) {
331                 soffset -= siov->iov_len;
332                 siov++;
333                 nsiov--;
334                 LASSERT(nsiov > 0);
335         }
336
337         do {
338                 LASSERT(ndiov > 0);
339                 LASSERT(nsiov > 0);
340                 this_nob = min3((unsigned int)diov->iov_len - doffset,
341                                 (unsigned int)siov->iov_len - soffset,
342                                 nob);
343
344                 memcpy((char *)diov->iov_base + doffset,
345                        (char *)siov->iov_base + soffset, this_nob);
346                 nob -= this_nob;
347
348                 if (diov->iov_len > doffset + this_nob) {
349                         doffset += this_nob;
350                 } else {
351                         diov++;
352                         ndiov--;
353                         doffset = 0;
354                 }
355
356                 if (siov->iov_len > soffset + this_nob) {
357                         soffset += this_nob;
358                 } else {
359                         siov++;
360                         nsiov--;
361                         soffset = 0;
362                 }
363         } while (nob > 0);
364 }
365 EXPORT_SYMBOL(lnet_copy_iov2iov);
366
367 unsigned int
368 lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
369 {
370         unsigned int  nob = 0;
371
372         LASSERT(niov == 0 || kiov != NULL);
373         while (niov-- > 0)
374                 nob += (kiov++)->bv_len;
375
376         return (nob);
377 }
378 EXPORT_SYMBOL(lnet_kiov_nob);
379
380 void
381 lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov,
382                     unsigned int doffset,
383                     unsigned int nsiov, struct bio_vec *siov,
384                     unsigned int soffset,
385                     unsigned int nob)
386 {
387         /* NB diov, siov are READ-ONLY */
388         unsigned int    this_nob;
389         char           *daddr = NULL;
390         char           *saddr = NULL;
391
392         if (nob == 0)
393                 return;
394
395         LASSERT (!in_interrupt ());
396
397         LASSERT (ndiov > 0);
398         while (doffset >= diov->bv_len) {
399                 doffset -= diov->bv_len;
400                 diov++;
401                 ndiov--;
402                 LASSERT(ndiov > 0);
403         }
404
405         LASSERT(nsiov > 0);
406         while (soffset >= siov->bv_len) {
407                 soffset -= siov->bv_len;
408                 siov++;
409                 nsiov--;
410                 LASSERT(nsiov > 0);
411         }
412
413         do {
414                 LASSERT(ndiov > 0);
415                 LASSERT(nsiov > 0);
416                 this_nob = min3(diov->bv_len - doffset,
417                                 siov->bv_len - soffset,
418                                 nob);
419
420                 if (daddr == NULL)
421                         daddr = ((char *)kmap(diov->bv_page)) +
422                                 diov->bv_offset + doffset;
423                 if (saddr == NULL)
424                         saddr = ((char *)kmap(siov->bv_page)) +
425                                 siov->bv_offset + soffset;
426
427                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
428                  * However in practice at least one of the kiovs will be mapped
429                  * kernel pages and the map/unmap will be NOOPs */
430
431                 memcpy (daddr, saddr, this_nob);
432                 nob -= this_nob;
433
434                 if (diov->bv_len > doffset + this_nob) {
435                         daddr += this_nob;
436                         doffset += this_nob;
437                 } else {
438                         kunmap(diov->bv_page);
439                         daddr = NULL;
440                         diov++;
441                         ndiov--;
442                         doffset = 0;
443                 }
444
445                 if (siov->bv_len > soffset + this_nob) {
446                         saddr += this_nob;
447                         soffset += this_nob;
448                 } else {
449                         kunmap(siov->bv_page);
450                         saddr = NULL;
451                         siov++;
452                         nsiov--;
453                         soffset = 0;
454                 }
455         } while (nob > 0);
456
457         if (daddr != NULL)
458                 kunmap(diov->bv_page);
459         if (saddr != NULL)
460                 kunmap(siov->bv_page);
461 }
462 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
463
464 void
465 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
466                     unsigned int nkiov, struct bio_vec *kiov,
467                     unsigned int kiovoffset,
468                     unsigned int nob)
469 {
470         /* NB iov, kiov are READ-ONLY */
471         unsigned int    this_nob;
472         char           *addr = NULL;
473
474         if (nob == 0)
475                 return;
476
477         LASSERT (!in_interrupt ());
478
479         LASSERT (niov > 0);
480         while (iovoffset >= iov->iov_len) {
481                 iovoffset -= iov->iov_len;
482                 iov++;
483                 niov--;
484                 LASSERT(niov > 0);
485         }
486
487         LASSERT(nkiov > 0);
488         while (kiovoffset >= kiov->bv_len) {
489                 kiovoffset -= kiov->bv_len;
490                 kiov++;
491                 nkiov--;
492                 LASSERT(nkiov > 0);
493         }
494
495         do {
496                 LASSERT(niov > 0);
497                 LASSERT(nkiov > 0);
498                 this_nob = min3((unsigned int)iov->iov_len - iovoffset,
499                                 (unsigned int)kiov->bv_len - kiovoffset,
500                                 nob);
501
502                 if (addr == NULL)
503                         addr = ((char *)kmap(kiov->bv_page)) +
504                                 kiov->bv_offset + kiovoffset;
505
506                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
507                 nob -= this_nob;
508
509                 if (iov->iov_len > iovoffset + this_nob) {
510                         iovoffset += this_nob;
511                 } else {
512                         iov++;
513                         niov--;
514                         iovoffset = 0;
515                 }
516
517                 if (kiov->bv_len > kiovoffset + this_nob) {
518                         addr += this_nob;
519                         kiovoffset += this_nob;
520                 } else {
521                         kunmap(kiov->bv_page);
522                         addr = NULL;
523                         kiov++;
524                         nkiov--;
525                         kiovoffset = 0;
526                 }
527
528         } while (nob > 0);
529
530         if (addr != NULL)
531                 kunmap(kiov->bv_page);
532 }
533 EXPORT_SYMBOL(lnet_copy_kiov2iov);
534
535 void
536 lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
537                    unsigned int kiovoffset,
538                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
539                    unsigned int nob)
540 {
541         /* NB kiov, iov are READ-ONLY */
542         unsigned int    this_nob;
543         char           *addr = NULL;
544
545         if (nob == 0)
546                 return;
547
548         LASSERT (!in_interrupt ());
549
550         LASSERT (nkiov > 0);
551         while (kiovoffset >= kiov->bv_len) {
552                 kiovoffset -= kiov->bv_len;
553                 kiov++;
554                 nkiov--;
555                 LASSERT(nkiov > 0);
556         }
557
558         LASSERT(niov > 0);
559         while (iovoffset >= iov->iov_len) {
560                 iovoffset -= iov->iov_len;
561                 iov++;
562                 niov--;
563                 LASSERT(niov > 0);
564         }
565
566         do {
567                 LASSERT(nkiov > 0);
568                 LASSERT(niov > 0);
569                 this_nob = min3((unsigned int)kiov->bv_len - kiovoffset,
570                                 (unsigned int)iov->iov_len - iovoffset,
571                                 nob);
572
573                 if (addr == NULL)
574                         addr = ((char *)kmap(kiov->bv_page)) +
575                                 kiov->bv_offset + kiovoffset;
576
577                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
578                 nob -= this_nob;
579
580                 if (kiov->bv_len > kiovoffset + this_nob) {
581                         addr += this_nob;
582                         kiovoffset += this_nob;
583                 } else {
584                         kunmap(kiov->bv_page);
585                         addr = NULL;
586                         kiov++;
587                         nkiov--;
588                         kiovoffset = 0;
589                 }
590
591                 if (iov->iov_len > iovoffset + this_nob) {
592                         iovoffset += this_nob;
593                 } else {
594                         iov++;
595                         niov--;
596                         iovoffset = 0;
597                 }
598         } while (nob > 0);
599
600         if (addr != NULL)
601                 kunmap(kiov->bv_page);
602 }
603 EXPORT_SYMBOL(lnet_copy_iov2kiov);
604
605 int
606 lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
607                   int src_niov, struct bio_vec *src,
608                   unsigned int offset, unsigned int len)
609 {
610         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
611          * for exactly 'len' bytes, and return the number of entries.
612          * NB not destructive to 'src' */
613         unsigned int    frag_len;
614         unsigned int    niov;
615
616         if (len == 0)                           /* no data => */
617                 return (0);                     /* no frags */
618
619         LASSERT(src_niov > 0);
620         while (offset >= src->bv_len) {      /* skip initial frags */
621                 offset -= src->bv_len;
622                 src_niov--;
623                 src++;
624                 LASSERT(src_niov > 0);
625         }
626
627         niov = 1;
628         for (;;) {
629                 LASSERT(src_niov > 0);
630                 LASSERT((int)niov <= dst_niov);
631
632                 frag_len = src->bv_len - offset;
633                 dst->bv_page = src->bv_page;
634                 dst->bv_offset = src->bv_offset + offset;
635
636                 if (len <= frag_len) {
637                         dst->bv_len = len;
638                         LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
639                         return niov;
640                 }
641
642                 dst->bv_len = frag_len;
643                 LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
644
645                 len -= frag_len;
646                 dst++;
647                 src++;
648                 niov++;
649                 src_niov--;
650                 offset = 0;
651         }
652 }
653 EXPORT_SYMBOL(lnet_extract_kiov);
654
655 void
656 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
657              int delayed, unsigned int offset, unsigned int mlen,
658              unsigned int rlen)
659 {
660         unsigned int niov = 0;
661         struct kvec *iov = NULL;
662         struct bio_vec  *kiov = NULL;
663         int rc;
664
665         LASSERT (!in_interrupt ());
666         LASSERT (mlen == 0 || msg != NULL);
667
668         if (msg != NULL) {
669                 LASSERT(msg->msg_receiving);
670                 LASSERT(!msg->msg_sending);
671                 LASSERT(rlen == msg->msg_len);
672                 LASSERT(mlen <= msg->msg_len);
673                 LASSERT(msg->msg_offset == offset);
674                 LASSERT(msg->msg_wanted == mlen);
675
676                 msg->msg_receiving = 0;
677
678                 if (mlen != 0) {
679                         niov = msg->msg_niov;
680                         kiov = msg->msg_kiov;
681
682                         LASSERT (niov > 0);
683                         LASSERT ((iov == NULL) != (kiov == NULL));
684                 }
685         }
686
687         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
688                                              niov, kiov, offset, mlen,
689                                              rlen);
690         if (rc < 0)
691                 lnet_finalize(msg, rc);
692 }
693
694 static void
695 lnet_setpayloadbuffer(struct lnet_msg *msg)
696 {
697         struct lnet_libmd *md = msg->msg_md;
698
699         LASSERT(msg->msg_len > 0);
700         LASSERT(!msg->msg_routing);
701         LASSERT(md != NULL);
702         LASSERT(msg->msg_niov == 0);
703         LASSERT(msg->msg_kiov == NULL);
704
705         msg->msg_niov = md->md_niov;
706         msg->msg_kiov = md->md_kiov;
707 }
708
709 void
710 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_processid *target,
711                unsigned int offset, unsigned int len)
712 {
713         msg->msg_type = type;
714         msg->msg_target = *target;
715         msg->msg_len = len;
716         msg->msg_offset = offset;
717
718         if (len != 0)
719                 lnet_setpayloadbuffer(msg);
720
721         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
722         msg->msg_hdr.type           = type;
723         /* dest_nid will be overwritten by lnet_select_pathway() */
724         msg->msg_hdr.dest_nid = target->nid;
725         msg->msg_hdr.dest_pid = target->pid;
726         /* src_nid will be set later */
727         msg->msg_hdr.src_pid        = the_lnet.ln_pid;
728         msg->msg_hdr.payload_length = len;
729 }
730
731 void
732 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
733 {
734         void *priv = msg->msg_private;
735         int rc;
736
737         LASSERT(!in_interrupt());
738         LASSERT(nid_is_lo0(&ni->ni_nid) ||
739                 (msg->msg_txcredit && msg->msg_peertxcredit));
740
741         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
742         if (rc < 0) {
743                 msg->msg_no_resend = true;
744                 lnet_finalize(msg, rc);
745         }
746 }
747
748 static int
749 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
750 {
751         int     rc;
752
753         LASSERT(!msg->msg_sending);
754         LASSERT(msg->msg_receiving);
755         LASSERT(!msg->msg_rx_ready_delay);
756         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
757
758         msg->msg_rx_ready_delay = 1;
759         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
760                                                   &msg->msg_private);
761         if (rc != 0) {
762                 CERROR("recv from %s / send to %s aborted: "
763                        "eager_recv failed %d\n",
764                        libcfs_nidstr(&msg->msg_rxpeer->lpni_nid),
765                        libcfs_idstr(&msg->msg_target), rc);
766                 LASSERT(rc < 0); /* required by my callers */
767         }
768
769         return rc;
770 }
771
772 static bool
773 lnet_is_peer_deadline_passed(struct lnet_peer_ni *lpni, time64_t now)
774 {
775         time64_t deadline;
776
777         deadline = lpni->lpni_last_alive +
778                    lpni->lpni_net->net_tunables.lct_peer_timeout;
779
780         /*
781          * assume peer_ni is alive as long as we're within the configured
782          * peer timeout
783          */
784         if (deadline > now)
785                 return false;
786
787         return true;
788 }
789
790 /* NB: returns 1 when alive, 0 when dead, negative when error;
791  *     may drop the lnet_net_lock */
792 static int
793 lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
794                        struct lnet_msg *msg)
795 {
796         time64_t now = ktime_get_seconds();
797
798         if (!lnet_peer_aliveness_enabled(lpni))
799                 return -ENODEV;
800
801         /*
802          * If we're resending a message, let's attempt to send it even if
803          * the peer is down to fulfill our resend quota on the message
804          */
805         if (msg->msg_retry_count > 0)
806                 return 1;
807
808         /* try and send recovery messages irregardless */
809         if (msg->msg_recovery)
810                 return 1;
811
812         /* always send any responses */
813         if (lnet_msg_is_response(msg))
814                 return 1;
815
816         /* always send non-routed messages */
817         if (!msg->msg_routing)
818                 return 1;
819
820         if (!lnet_is_peer_deadline_passed(lpni, now))
821                 return true;
822
823         return lnet_is_peer_ni_alive(lpni);
824 }
825
826 /**
827  * \param msg The message to be sent.
828  * \param do_send True if lnet_ni_send() should be called in this function.
829  *        lnet_send() is going to lnet_net_unlock immediately after this, so
830  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
831  *
832  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
833  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
834  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
835  * \retval -ECANCELED If the MD of the message has been unlinked.
836  */
837 static int
838 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
839 {
840         struct lnet_peer_ni     *lp = msg->msg_txpeer;
841         struct lnet_ni          *ni = msg->msg_txni;
842         int                     cpt = msg->msg_tx_cpt;
843         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
844
845         /* non-lnet_send() callers have checked before */
846         LASSERT(!do_send || msg->msg_tx_delayed);
847         LASSERT(!msg->msg_receiving);
848         LASSERT(msg->msg_tx_committed);
849
850         /* can't get here if we're sending to the loopback interface */
851         if (the_lnet.ln_loni)
852                 LASSERT(!nid_same(&lp->lpni_nid, &the_lnet.ln_loni->ni_nid));
853
854         /* NB 'lp' is always the next hop */
855         if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
856             lnet_peer_alive_locked(ni, lp, msg) == 0) {
857                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
858                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
859                         msg->msg_len;
860                 lnet_net_unlock(cpt);
861                 if (msg->msg_txpeer)
862                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
863                                         msg->msg_type,
864                                         LNET_STATS_TYPE_DROP);
865                 if (msg->msg_txni)
866                         lnet_incr_stats(&msg->msg_txni->ni_stats,
867                                         msg->msg_type,
868                                         LNET_STATS_TYPE_DROP);
869
870                 CNETERR("Dropping message for %s: peer not alive\n",
871                         libcfs_idstr(&msg->msg_target));
872                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
873                 if (do_send)
874                         lnet_finalize(msg, -EHOSTUNREACH);
875
876                 lnet_net_lock(cpt);
877                 return -EHOSTUNREACH;
878         }
879
880         if (msg->msg_md != NULL &&
881             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
882                 lnet_net_unlock(cpt);
883
884                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
885                         "called on the MD/ME.\n",
886                         libcfs_idstr(&msg->msg_target));
887                 if (do_send) {
888                         msg->msg_no_resend = true;
889                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
890                                msg, libcfs_idstr(&msg->msg_target));
891                         lnet_finalize(msg, -ECANCELED);
892                 }
893
894                 lnet_net_lock(cpt);
895                 return -ECANCELED;
896         }
897
898         if (!msg->msg_peertxcredit) {
899                 spin_lock(&lp->lpni_lock);
900                 LASSERT((lp->lpni_txcredits < 0) ==
901                         !list_empty(&lp->lpni_txq));
902
903                 msg->msg_peertxcredit = 1;
904                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr_nid4);
905                 lp->lpni_txcredits--;
906
907                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
908                         lp->lpni_mintxcredits = lp->lpni_txcredits;
909
910                 if (lp->lpni_txcredits < 0) {
911                         msg->msg_tx_delayed = 1;
912                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
913                         spin_unlock(&lp->lpni_lock);
914                         return LNET_CREDIT_WAIT;
915                 }
916                 spin_unlock(&lp->lpni_lock);
917         }
918
919         if (!msg->msg_txcredit) {
920                 LASSERT((tq->tq_credits < 0) ==
921                         !list_empty(&tq->tq_delayed));
922
923                 msg->msg_txcredit = 1;
924                 tq->tq_credits--;
925                 atomic_dec(&ni->ni_tx_credits);
926
927                 if (tq->tq_credits < tq->tq_credits_min)
928                         tq->tq_credits_min = tq->tq_credits;
929
930                 if (tq->tq_credits < 0) {
931                         msg->msg_tx_delayed = 1;
932                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
933                         return LNET_CREDIT_WAIT;
934                 }
935         }
936
937         if (unlikely(!list_empty(&the_lnet.ln_delay_rules)) &&
938             lnet_delay_rule_match_locked(&msg->msg_hdr, msg)) {
939                 msg->msg_tx_delayed = 1;
940                 return LNET_CREDIT_WAIT;
941         }
942
943         /* unset the tx_delay flag as we're going to send it now */
944         msg->msg_tx_delayed = 0;
945
946         if (do_send) {
947                 lnet_net_unlock(cpt);
948                 lnet_ni_send(ni, msg);
949                 lnet_net_lock(cpt);
950         }
951         return LNET_CREDIT_OK;
952 }
953
954
955 static struct lnet_rtrbufpool *
956 lnet_msg2bufpool(struct lnet_msg *msg)
957 {
958         struct lnet_rtrbufpool  *rbp;
959         int                     cpt;
960
961         LASSERT(msg->msg_rx_committed);
962
963         cpt = msg->msg_rx_cpt;
964         rbp = &the_lnet.ln_rtrpools[cpt][0];
965
966         LASSERT(msg->msg_len <= LNET_MTU);
967         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
968                 rbp++;
969                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
970         }
971
972         return rbp;
973 }
974
975 static int
976 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
977 {
978         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
979          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
980          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
981          * received or OK to receive */
982         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
983         struct lnet_peer *lp;
984         struct lnet_rtrbufpool *rbp;
985         struct lnet_rtrbuf *rb;
986
987         LASSERT(msg->msg_kiov == NULL);
988         LASSERT(msg->msg_niov == 0);
989         LASSERT(msg->msg_routing);
990         LASSERT(msg->msg_receiving);
991         LASSERT(!msg->msg_sending);
992         LASSERT(lpni->lpni_peer_net);
993         LASSERT(lpni->lpni_peer_net->lpn_peer);
994
995         lp = lpni->lpni_peer_net->lpn_peer;
996
997         /* non-lnet_parse callers only receive delayed messages */
998         LASSERT(!do_recv || msg->msg_rx_delayed);
999
1000         if (!msg->msg_peerrtrcredit) {
1001                 /* lpni_lock protects the credit manipulation */
1002                 spin_lock(&lpni->lpni_lock);
1003
1004                 msg->msg_peerrtrcredit = 1;
1005                 lpni->lpni_rtrcredits--;
1006                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
1007                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
1008
1009                 if (lpni->lpni_rtrcredits < 0) {
1010                         spin_unlock(&lpni->lpni_lock);
1011                         /* must have checked eager_recv before here */
1012                         LASSERT(msg->msg_rx_ready_delay);
1013                         msg->msg_rx_delayed = 1;
1014                         /* lp_lock protects the lp_rtrq */
1015                         spin_lock(&lp->lp_lock);
1016                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1017                         spin_unlock(&lp->lp_lock);
1018                         return LNET_CREDIT_WAIT;
1019                 }
1020                 spin_unlock(&lpni->lpni_lock);
1021         }
1022
1023         rbp = lnet_msg2bufpool(msg);
1024
1025         if (!msg->msg_rtrcredit) {
1026                 msg->msg_rtrcredit = 1;
1027                 rbp->rbp_credits--;
1028                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1029                         rbp->rbp_mincredits = rbp->rbp_credits;
1030
1031                 if (rbp->rbp_credits < 0) {
1032                         /* must have checked eager_recv before here */
1033                         LASSERT(msg->msg_rx_ready_delay);
1034                         msg->msg_rx_delayed = 1;
1035                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1036                         return LNET_CREDIT_WAIT;
1037                 }
1038         }
1039
1040         LASSERT(!list_empty(&rbp->rbp_bufs));
1041         rb = list_entry(rbp->rbp_bufs.next, struct lnet_rtrbuf, rb_list);
1042         list_del(&rb->rb_list);
1043
1044         msg->msg_niov = rbp->rbp_npages;
1045         msg->msg_kiov = &rb->rb_kiov[0];
1046
1047         /* unset the msg-rx_delayed flag since we're receiving the message */
1048         msg->msg_rx_delayed = 0;
1049
1050         if (do_recv) {
1051                 int cpt = msg->msg_rx_cpt;
1052
1053                 lnet_net_unlock(cpt);
1054                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1055                              0, msg->msg_len, msg->msg_len);
1056                 lnet_net_lock(cpt);
1057         }
1058         return LNET_CREDIT_OK;
1059 }
1060
1061 void
1062 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1063 {
1064         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1065         struct lnet_ni          *txni = msg->msg_txni;
1066         struct lnet_msg         *msg2;
1067
1068         if (msg->msg_txcredit) {
1069                 struct lnet_ni       *ni = msg->msg_txni;
1070                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1071
1072                 /* give back NI txcredits */
1073                 msg->msg_txcredit = 0;
1074
1075                 LASSERT((tq->tq_credits < 0) ==
1076                         !list_empty(&tq->tq_delayed));
1077
1078                 tq->tq_credits++;
1079                 atomic_inc(&ni->ni_tx_credits);
1080                 if (tq->tq_credits <= 0) {
1081                         msg2 = list_entry(tq->tq_delayed.next,
1082                                           struct lnet_msg, msg_list);
1083                         list_del(&msg2->msg_list);
1084
1085                         LASSERT(msg2->msg_txni == ni);
1086                         LASSERT(msg2->msg_tx_delayed);
1087                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1088
1089                         (void) lnet_post_send_locked(msg2, 1);
1090                 }
1091         }
1092
1093         if (msg->msg_peertxcredit) {
1094                 /* give back peer txcredits */
1095                 msg->msg_peertxcredit = 0;
1096
1097                 spin_lock(&txpeer->lpni_lock);
1098                 LASSERT((txpeer->lpni_txcredits < 0) ==
1099                         !list_empty(&txpeer->lpni_txq));
1100
1101                 txpeer->lpni_txqnob -=  msg->msg_len +
1102                                         sizeof(struct lnet_hdr_nid4);
1103                 LASSERT(txpeer->lpni_txqnob >= 0);
1104
1105                 txpeer->lpni_txcredits++;
1106                 if (txpeer->lpni_txcredits <= 0) {
1107                         int msg2_cpt;
1108
1109                         msg2 = list_entry(txpeer->lpni_txq.next,
1110                                               struct lnet_msg, msg_list);
1111                         list_del(&msg2->msg_list);
1112                         spin_unlock(&txpeer->lpni_lock);
1113
1114                         LASSERT(msg2->msg_txpeer == txpeer);
1115                         LASSERT(msg2->msg_tx_delayed);
1116
1117                         msg2_cpt = msg2->msg_tx_cpt;
1118
1119                         /*
1120                          * The msg_cpt can be different from the msg2_cpt
1121                          * so we need to make sure we lock the correct cpt
1122                          * for msg2.
1123                          * Once we call lnet_post_send_locked() it is no
1124                          * longer safe to access msg2, since it could've
1125                          * been freed by lnet_finalize(), but we still
1126                          * need to relock the correct cpt, so we cache the
1127                          * msg2_cpt for the purpose of the check that
1128                          * follows the call to lnet_pose_send_locked().
1129                          */
1130                         if (msg2_cpt != msg->msg_tx_cpt) {
1131                                 lnet_net_unlock(msg->msg_tx_cpt);
1132                                 lnet_net_lock(msg2_cpt);
1133                         }
1134                         (void) lnet_post_send_locked(msg2, 1);
1135                         if (msg2_cpt != msg->msg_tx_cpt) {
1136                                 lnet_net_unlock(msg2_cpt);
1137                                 lnet_net_lock(msg->msg_tx_cpt);
1138                         }
1139                 } else {
1140                         spin_unlock(&txpeer->lpni_lock);
1141                 }
1142         }
1143
1144         if (txni != NULL) {
1145                 msg->msg_txni = NULL;
1146                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1147         }
1148
1149         if (txpeer != NULL) {
1150                 msg->msg_txpeer = NULL;
1151                 lnet_peer_ni_decref_locked(txpeer);
1152         }
1153 }
1154
1155 void
1156 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1157 {
1158         struct lnet_msg *msg;
1159
1160         if (list_empty(&rbp->rbp_msgs))
1161                 return;
1162         msg = list_entry(rbp->rbp_msgs.next,
1163                          struct lnet_msg, msg_list);
1164         list_del(&msg->msg_list);
1165
1166         (void)lnet_post_routed_recv_locked(msg, 1);
1167 }
1168
1169 void
1170 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1171 {
1172         struct lnet_msg *msg;
1173         struct lnet_msg *tmp;
1174
1175         lnet_net_unlock(cpt);
1176
1177         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1178                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1179                              0, 0, 0, msg->msg_hdr.payload_length);
1180                 list_del_init(&msg->msg_list);
1181                 msg->msg_no_resend = true;
1182                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1183                 lnet_finalize(msg, -ECANCELED);
1184         }
1185
1186         lnet_net_lock(cpt);
1187 }
1188
1189 void
1190 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1191 {
1192         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1193         struct lnet_peer *lp;
1194         struct lnet_ni *rxni = msg->msg_rxni;
1195         struct lnet_msg *msg2;
1196
1197         if (msg->msg_rtrcredit) {
1198                 /* give back global router credits */
1199                 struct lnet_rtrbuf *rb;
1200                 struct lnet_rtrbufpool *rbp;
1201
1202                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1203                  * there until it gets one allocated, or aborts the wait
1204                  * itself */
1205                 LASSERT(msg->msg_kiov != NULL);
1206
1207                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1208                 rbp = rb->rb_pool;
1209
1210                 msg->msg_kiov = NULL;
1211                 msg->msg_rtrcredit = 0;
1212
1213                 LASSERT(rbp == lnet_msg2bufpool(msg));
1214
1215                 LASSERT((rbp->rbp_credits > 0) ==
1216                         !list_empty(&rbp->rbp_bufs));
1217
1218                 /* If routing is now turned off, we just drop this buffer and
1219                  * don't bother trying to return credits.  */
1220                 if (!the_lnet.ln_routing) {
1221                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1222                         goto routing_off;
1223                 }
1224
1225                 /* It is possible that a user has lowered the desired number of
1226                  * buffers in this pool.  Make sure we never put back
1227                  * more buffers than the stated number. */
1228                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1229                         /* Discard this buffer so we don't have too
1230                          * many. */
1231                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1232                         rbp->rbp_nbuffers--;
1233                 } else {
1234                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1235                         rbp->rbp_credits++;
1236                         if (rbp->rbp_credits <= 0)
1237                                 lnet_schedule_blocked_locked(rbp);
1238                 }
1239         }
1240
1241 routing_off:
1242         if (msg->msg_peerrtrcredit) {
1243                 LASSERT(rxpeerni);
1244                 LASSERT(rxpeerni->lpni_peer_net);
1245                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1246
1247                 /* give back peer router credits */
1248                 msg->msg_peerrtrcredit = 0;
1249
1250                 spin_lock(&rxpeerni->lpni_lock);
1251                 rxpeerni->lpni_rtrcredits++;
1252                 spin_unlock(&rxpeerni->lpni_lock);
1253
1254                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1255                 spin_lock(&lp->lp_lock);
1256
1257                 /* drop all messages which are queued to be routed on that
1258                  * peer. */
1259                 if (!the_lnet.ln_routing) {
1260                         LIST_HEAD(drop);
1261                         list_splice_init(&lp->lp_rtrq, &drop);
1262                         spin_unlock(&lp->lp_lock);
1263                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1264                 } else if (!list_empty(&lp->lp_rtrq)) {
1265                         int msg2_cpt;
1266
1267                         msg2 = list_entry(lp->lp_rtrq.next,
1268                                           struct lnet_msg, msg_list);
1269                         list_del(&msg2->msg_list);
1270                         msg2_cpt = msg2->msg_rx_cpt;
1271                         spin_unlock(&lp->lp_lock);
1272                         /*
1273                          * messages on the lp_rtrq can be from any NID in
1274                          * the peer, which means they might have different
1275                          * cpts. We need to make sure we lock the right
1276                          * one.
1277                          */
1278                         if (msg2_cpt != msg->msg_rx_cpt) {
1279                                 lnet_net_unlock(msg->msg_rx_cpt);
1280                                 lnet_net_lock(msg2_cpt);
1281                         }
1282                         (void) lnet_post_routed_recv_locked(msg2, 1);
1283                         if (msg2_cpt != msg->msg_rx_cpt) {
1284                                 lnet_net_unlock(msg2_cpt);
1285                                 lnet_net_lock(msg->msg_rx_cpt);
1286                         }
1287                 } else {
1288                         spin_unlock(&lp->lp_lock);
1289                 }
1290         }
1291         if (rxni != NULL) {
1292                 msg->msg_rxni = NULL;
1293                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1294         }
1295         if (rxpeerni != NULL) {
1296                 msg->msg_rxpeer = NULL;
1297                 lnet_peer_ni_decref_locked(rxpeerni);
1298         }
1299 }
1300
1301 static struct lnet_peer_ni *
1302 lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid,
1303                     struct lnet_peer *peer,
1304                     struct lnet_peer_ni *best_lpni,
1305                     struct lnet_peer_net *peer_net)
1306 {
1307         /*
1308          * Look at the peer NIs for the destination peer that connect
1309          * to the chosen net. If a peer_ni is preferred when using the
1310          * best_ni to communicate, we use that one. If there is no
1311          * preferred peer_ni, or there are multiple preferred peer_ni,
1312          * the available transmit credits are used. If the transmit
1313          * credits are equal, we round-robin over the peer_ni.
1314          */
1315         struct lnet_peer_ni *lpni = NULL;
1316         int best_lpni_credits = (best_lpni) ? best_lpni->lpni_txcredits :
1317                 INT_MIN;
1318         int best_lpni_healthv = (best_lpni) ?
1319                 atomic_read(&best_lpni->lpni_healthv) : 0;
1320         bool best_lpni_is_preferred = false;
1321         bool lpni_is_preferred;
1322         int lpni_healthv;
1323         __u32 lpni_sel_prio;
1324         __u32 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1325
1326         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1327                 /*
1328                  * if the best_ni we've chosen aleady has this lpni
1329                  * preferred, then let's use it
1330                  */
1331                 if (best_ni) {
1332                         lpni_is_preferred = lnet_peer_is_pref_nid_locked(
1333                                 lpni, &best_ni->ni_nid);
1334                         CDEBUG(D_NET, "%s lpni_is_preferred = %d\n",
1335                                libcfs_nidstr(&best_ni->ni_nid),
1336                                lpni_is_preferred);
1337                 } else {
1338                         lpni_is_preferred = false;
1339                 }
1340
1341                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1342                 lpni_sel_prio = lpni->lpni_sel_priority;
1343
1344                 if (best_lpni)
1345                         CDEBUG(D_NET, "n:[%s, %s] h:[%d, %d] p:[%d, %d] c:[%d, %d] s:[%d, %d]\n",
1346                                 libcfs_nidstr(&lpni->lpni_nid),
1347                                 libcfs_nidstr(&best_lpni->lpni_nid),
1348                                 lpni_healthv, best_lpni_healthv,
1349                                 lpni_sel_prio, best_sel_prio,
1350                                 lpni->lpni_txcredits, best_lpni_credits,
1351                                 lpni->lpni_seq, best_lpni->lpni_seq);
1352                 else
1353                         goto select_lpni;
1354
1355                 /* pick the healthiest peer ni */
1356                 if (lpni_healthv < best_lpni_healthv)
1357                         continue;
1358                 else if (lpni_healthv > best_lpni_healthv) {
1359                         if (best_lpni_is_preferred)
1360                                 best_lpni_is_preferred = false;
1361                         goto select_lpni;
1362                 }
1363
1364                 if (lpni_sel_prio > best_sel_prio)
1365                         continue;
1366                 else if (lpni_sel_prio < best_sel_prio) {
1367                         if (best_lpni_is_preferred)
1368                                 best_lpni_is_preferred = false;
1369                         goto select_lpni;
1370                 }
1371
1372                 /* if this is a preferred peer use it */
1373                 if (!best_lpni_is_preferred && lpni_is_preferred) {
1374                         best_lpni_is_preferred = true;
1375                         goto select_lpni;
1376                 } else if (best_lpni_is_preferred && !lpni_is_preferred) {
1377                         /* this is not the preferred peer so let's ignore
1378                          * it.
1379                          */
1380                         continue;
1381                 }
1382
1383                 if (lpni->lpni_txcredits < best_lpni_credits)
1384                         /* We already have a peer that has more credits
1385                          * available than this one. No need to consider
1386                          * this peer further.
1387                          */
1388                         continue;
1389                 else if (lpni->lpni_txcredits > best_lpni_credits)
1390                         goto select_lpni;
1391
1392                 /* The best peer found so far and the current peer
1393                  * have the same number of available credits let's
1394                  * make sure to select between them using Round Robin
1395                  */
1396                 if (best_lpni && (best_lpni->lpni_seq <= lpni->lpni_seq))
1397                         continue;
1398 select_lpni:
1399                 best_lpni_is_preferred = lpni_is_preferred;
1400                 best_lpni_healthv = lpni_healthv;
1401                 best_sel_prio = lpni_sel_prio;
1402                 best_lpni = lpni;
1403                 best_lpni_credits = lpni->lpni_txcredits;
1404         }
1405
1406         /* if we still can't find a peer ni then we can't reach it */
1407         if (!best_lpni) {
1408                 __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1409                         LNET_NIDNET(dst_nid);
1410                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1411                                 libcfs_net2str(net_id));
1412                 return NULL;
1413         }
1414
1415         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1416                libcfs_nidstr(&best_lpni->lpni_nid));
1417
1418         return best_lpni;
1419 }
1420
1421 /*
1422  * Prerequisite: the best_ni should already be set in the sd
1423  * Find the best lpni.
1424  * If the net id is provided then restrict lpni selection on
1425  * that particular net.
1426  * Otherwise find any reachable lpni. When dealing with an MR
1427  * gateway and it has multiple lpnis which we can use
1428  * we want to select the best one from the list of reachable
1429  * ones.
1430  */
1431 static inline struct lnet_peer_ni *
1432 lnet_find_best_lpni(struct lnet_ni *lni, lnet_nid_t dst_nid,
1433                     struct lnet_peer *peer, __u32 net_id)
1434 {
1435         struct lnet_peer_net *peer_net;
1436
1437         /* find the best_lpni on any local network */
1438         if (net_id == LNET_NET_ANY) {
1439                 struct lnet_peer_ni *best_lpni = NULL;
1440                 struct lnet_peer_net *lpn;
1441                 list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
1442                         /* no net specified find any reachable peer ni */
1443                         if (!lnet_islocalnet_locked(lpn->lpn_net_id))
1444                                 continue;
1445                         best_lpni = lnet_select_peer_ni(lni, dst_nid, peer,
1446                                                         best_lpni, lpn);
1447                 }
1448
1449                 return best_lpni;
1450         }
1451         /* restrict on the specified net */
1452         peer_net = lnet_peer_get_net_locked(peer, net_id);
1453         if (peer_net)
1454                 return lnet_select_peer_ni(lni, dst_nid, peer, NULL, peer_net);
1455
1456         return NULL;
1457 }
1458
1459 static int
1460 lnet_compare_gw_lpnis(struct lnet_peer_ni *lpni1, struct lnet_peer_ni *lpni2)
1461 {
1462         if (lpni1->lpni_txqnob < lpni2->lpni_txqnob)
1463                 return 1;
1464
1465         if (lpni1->lpni_txqnob > lpni2->lpni_txqnob)
1466                 return -1;
1467
1468         if (lpni1->lpni_txcredits > lpni2->lpni_txcredits)
1469                 return 1;
1470
1471         if (lpni1->lpni_txcredits < lpni2->lpni_txcredits)
1472                 return -1;
1473
1474         return 0;
1475 }
1476
1477 /* Compare route priorities and hop counts */
1478 static int
1479 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
1480 {
1481         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1482         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1483
1484         if (r1->lr_priority < r2->lr_priority)
1485                 return 1;
1486
1487         if (r1->lr_priority > r2->lr_priority)
1488                 return -1;
1489
1490         if (r1_hops < r2_hops)
1491                 return 1;
1492
1493         if (r1_hops > r2_hops)
1494                 return -1;
1495
1496         return 0;
1497 }
1498
1499 static struct lnet_route *
1500 lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net,
1501                        struct lnet_peer_ni *remote_lpni,
1502                        struct lnet_route **prev_route,
1503                        struct lnet_peer_ni **gwni)
1504 {
1505         struct lnet_peer_ni *lpni, *best_gw_ni = NULL;
1506         struct lnet_route *best_route;
1507         struct lnet_route *last_route;
1508         struct lnet_route *route;
1509         int rc;
1510         bool best_rte_is_preferred = false;
1511         struct lnet_nid *gw_pnid;
1512
1513         CDEBUG(D_NET, "Looking up a route to %s, from %s\n",
1514                libcfs_net2str(rnet->lrn_net), libcfs_net2str(src_net));
1515
1516         best_route = last_route = NULL;
1517         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1518                 if (!lnet_is_route_alive(route))
1519                         continue;
1520                 gw_pnid = &route->lr_gateway->lp_primary_nid;
1521
1522                 /* no protection on below fields, but it's harmless */
1523                 if (last_route && (last_route->lr_seq - route->lr_seq < 0))
1524                         last_route = route;
1525
1526                 /* if the best route found is in the preferred list then
1527                  * tag it as preferred and use it later on. But if we
1528                  * didn't find any routes which are on the preferred list
1529                  * then just use the best route possible.
1530                  */
1531                 rc = lnet_peer_is_pref_rtr_locked(remote_lpni, gw_pnid);
1532
1533                 if (!best_route || (rc && !best_rte_is_preferred)) {
1534                         /* Restrict the selection of the router NI on the
1535                          * src_net provided. If the src_net is LNET_NID_ANY,
1536                          * then select the best interface available.
1537                          */
1538                         lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1539                                                    route->lr_gateway,
1540                                                    src_net);
1541                         if (!lpni) {
1542                                 CDEBUG(D_NET,
1543                                        "Gateway %s does not have a peer NI on net %s\n",
1544                                        libcfs_nidstr(gw_pnid),
1545                                        libcfs_net2str(src_net));
1546                                 continue;
1547                         }
1548                 }
1549
1550                 if (rc && !best_rte_is_preferred) {
1551                         /* This is the first preferred route we found,
1552                          * so it beats any route found previously
1553                          */
1554                         best_route = route;
1555                         if (!last_route)
1556                                 last_route = route;
1557                         best_gw_ni = lpni;
1558                         best_rte_is_preferred = true;
1559                         CDEBUG(D_NET, "preferred gw = %s\n",
1560                                libcfs_nidstr(gw_pnid));
1561                         continue;
1562                 } else if ((!rc) && best_rte_is_preferred)
1563                         /* The best route we found so far is in the preferred
1564                          * list, so it beats any non-preferred route
1565                          */
1566                         continue;
1567
1568                 if (!best_route) {
1569                         best_route = last_route = route;
1570                         best_gw_ni = lpni;
1571                         continue;
1572                 }
1573
1574                 rc = lnet_compare_routes(route, best_route);
1575                 if (rc == -1)
1576                         continue;
1577
1578                 /* Restrict the selection of the router NI on the
1579                  * src_net provided. If the src_net is LNET_NID_ANY,
1580                  * then select the best interface available.
1581                  */
1582                 lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1583                                            route->lr_gateway,
1584                                            src_net);
1585                 if (!lpni) {
1586                         CDEBUG(D_NET,
1587                                "Gateway %s does not have a peer NI on net %s\n",
1588                                libcfs_nidstr(gw_pnid),
1589                                libcfs_net2str(src_net));
1590                         continue;
1591                 }
1592
1593                 if (rc == 1) {
1594                         best_route = route;
1595                         best_gw_ni = lpni;
1596                         continue;
1597                 }
1598
1599                 rc = lnet_compare_gw_lpnis(lpni, best_gw_ni);
1600                 if (rc == -1)
1601                         continue;
1602
1603                 if (rc == 1 || route->lr_seq <= best_route->lr_seq) {
1604                         best_route = route;
1605                         best_gw_ni = lpni;
1606                         continue;
1607                 }
1608         }
1609
1610         *prev_route = last_route;
1611         *gwni = best_gw_ni;
1612
1613         return best_route;
1614 }
1615
1616 static inline unsigned int
1617 lnet_dev_prio_of_md(struct lnet_ni *ni, unsigned int dev_idx)
1618 {
1619         if (dev_idx == UINT_MAX)
1620                 return UINT_MAX;
1621
1622         if (!ni || !ni->ni_net || !ni->ni_net->net_lnd ||
1623             !ni->ni_net->net_lnd->lnd_get_dev_prio)
1624                 return UINT_MAX;
1625
1626         return ni->ni_net->net_lnd->lnd_get_dev_prio(ni, dev_idx);
1627 }
1628
1629 static struct lnet_ni *
1630 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1631                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1632                  struct lnet_msg *msg, int md_cpt)
1633 {
1634         struct lnet_libmd *md = msg->msg_md;
1635         unsigned int offset = msg->msg_offset;
1636         unsigned int shortest_distance;
1637         struct lnet_ni *ni = NULL;
1638         int best_credits;
1639         int best_healthv;
1640         __u32 best_sel_prio;
1641         unsigned int best_dev_prio;
1642         unsigned int dev_idx = UINT_MAX;
1643         struct page *page = lnet_get_first_page(md, offset);
1644         msg->msg_rdma_force = lnet_is_rdma_only_page(page);
1645
1646         if (msg->msg_rdma_force)
1647                 dev_idx = lnet_get_dev_idx(page);
1648
1649         /*
1650          * If there is no peer_ni that we can send to on this network,
1651          * then there is no point in looking for a new best_ni here.
1652         */
1653         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1654                 return best_ni;
1655
1656         if (best_ni == NULL) {
1657                 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1658                 shortest_distance = UINT_MAX;
1659                 best_dev_prio = UINT_MAX;
1660                 best_credits = INT_MIN;
1661                 best_healthv = 0;
1662         } else {
1663                 best_dev_prio = lnet_dev_prio_of_md(best_ni, dev_idx);
1664                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1665                                                      best_ni->ni_dev_cpt);
1666                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1667                 best_healthv = atomic_read(&best_ni->ni_healthv);
1668                 best_sel_prio = best_ni->ni_sel_priority;
1669         }
1670
1671         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1672                 unsigned int distance;
1673                 int ni_credits;
1674                 int ni_healthv;
1675                 int ni_fatal;
1676                 __u32 ni_sel_prio;
1677                 unsigned int ni_dev_prio;
1678
1679                 ni_credits = atomic_read(&ni->ni_tx_credits);
1680                 ni_healthv = atomic_read(&ni->ni_healthv);
1681                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1682                 ni_sel_prio = ni->ni_sel_priority;
1683
1684                 /*
1685                  * calculate the distance from the CPT on which
1686                  * the message memory is allocated to the CPT of
1687                  * the NI's physical device
1688                  */
1689                 distance = cfs_cpt_distance(lnet_cpt_table(),
1690                                             md_cpt,
1691                                             ni->ni_dev_cpt);
1692
1693                 ni_dev_prio = lnet_dev_prio_of_md(ni, dev_idx);
1694
1695                 /*
1696                  * All distances smaller than the NUMA range
1697                  * are treated equally.
1698                  */
1699                 if (distance < lnet_numa_range)
1700                         distance = lnet_numa_range;
1701
1702                 /*
1703                  * Select on health, selection policy, direct dma prio,
1704                  * shorter distance, available credits, then round-robin.
1705                  */
1706                 if (ni_fatal)
1707                         continue;
1708
1709                 if (best_ni)
1710                         CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d, p:%u, g:%u, h:%d] with best_ni %s [c:%d, d:%d, s:%d, p:%u, g:%u, h:%d]\n",
1711                                libcfs_nidstr(&ni->ni_nid), ni_credits, distance,
1712                                ni->ni_seq, ni_sel_prio, ni_dev_prio, ni_healthv,
1713                                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid)
1714                                : "not selected", best_credits, shortest_distance,
1715                                (best_ni) ? best_ni->ni_seq : 0,
1716                                best_sel_prio, best_dev_prio, best_healthv);
1717                 else
1718                         goto select_ni;
1719
1720                 if (ni_healthv < best_healthv)
1721                         continue;
1722                 else if (ni_healthv > best_healthv)
1723                         goto select_ni;
1724
1725                 if (ni_sel_prio > best_sel_prio)
1726                         continue;
1727                 else if (ni_sel_prio < best_sel_prio)
1728                         goto select_ni;
1729
1730                 if (ni_dev_prio > best_dev_prio)
1731                         continue;
1732                 else if (ni_dev_prio < best_dev_prio)
1733                         goto select_ni;
1734
1735                 if (distance > shortest_distance)
1736                         continue;
1737                 else if (distance < shortest_distance)
1738                         goto select_ni;
1739
1740                 if (ni_credits < best_credits)
1741                         continue;
1742                 else if (ni_credits > best_credits)
1743                         goto select_ni;
1744
1745                 if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1746                         continue;
1747
1748 select_ni:
1749                 best_sel_prio = ni_sel_prio;
1750                 best_dev_prio = ni_dev_prio;
1751                 shortest_distance = distance;
1752                 best_healthv = ni_healthv;
1753                 best_ni = ni;
1754                 best_credits = ni_credits;
1755         }
1756
1757         CDEBUG(D_NET, "selected best_ni %s\n",
1758                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid) : "no selection");
1759
1760         return best_ni;
1761 }
1762
1763 static bool
1764 lnet_reserved_msg(struct lnet_msg *msg)
1765 {
1766         if (msg->msg_type == LNET_MSG_PUT) {
1767                 if (msg->msg_hdr.msg.put.ptl_index == LNET_RESERVED_PORTAL)
1768                         return true;
1769         } else if (msg->msg_type == LNET_MSG_GET) {
1770                 if (msg->msg_hdr.msg.get.ptl_index == LNET_RESERVED_PORTAL)
1771                         return true;
1772         }
1773         return false;
1774 }
1775
1776 /*
1777  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1778  * because such traffic is required to perform discovery. We therefore
1779  * exclude all GET and PUT on that portal. We also exclude all ACK and
1780  * REPLY traffic, but that is because the portal is not tracked in the
1781  * message structure for these message types. We could restrict this
1782  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1783  */
1784 static bool
1785 lnet_msg_discovery(struct lnet_msg *msg)
1786 {
1787         return !(lnet_reserved_msg(msg) || lnet_msg_is_response(msg));
1788 }
1789
1790 #define SRC_SPEC        0x0001
1791 #define SRC_ANY         0x0002
1792 #define LOCAL_DST       0x0004
1793 #define REMOTE_DST      0x0008
1794 #define MR_DST          0x0010
1795 #define NMR_DST         0x0020
1796 #define SND_RESP        0x0040
1797
1798 /* The following to defines are used for return codes */
1799 #define REPEAT_SEND     0x1000
1800 #define PASS_THROUGH    0x2000
1801
1802 /* The different cases lnet_select pathway needs to handle */
1803 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1804 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1805 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1806 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1807 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1808 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1809 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1810 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1811
1812 static int
1813 lnet_handle_lo_send(struct lnet_send_data *sd)
1814 {
1815         struct lnet_msg *msg = sd->sd_msg;
1816         int cpt = sd->sd_cpt;
1817
1818         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1819                 return -ESHUTDOWN;
1820
1821         /* No send credit hassles with LOLND */
1822         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1823         msg->msg_hdr.dest_nid = the_lnet.ln_loni->ni_nid;
1824         if (!msg->msg_routing)
1825                 msg->msg_hdr.src_nid = the_lnet.ln_loni->ni_nid;
1826         msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
1827         lnet_msg_commit(msg, cpt);
1828         msg->msg_txni = the_lnet.ln_loni;
1829
1830         return LNET_CREDIT_OK;
1831 }
1832
1833 static int
1834 lnet_handle_send(struct lnet_send_data *sd)
1835 {
1836         struct lnet_ni *best_ni = sd->sd_best_ni;
1837         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1838         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1839         struct lnet_msg *msg = sd->sd_msg;
1840         int cpt2;
1841         __u32 send_case = sd->sd_send_case;
1842         int rc;
1843         __u32 routing = send_case & REMOTE_DST;
1844          struct lnet_rsp_tracker *rspt;
1845
1846         /* Increment sequence number of the selected peer, peer net,
1847          * local ni and local net so that we pick the next ones
1848          * in Round Robin.
1849          */
1850         best_lpni->lpni_peer_net->lpn_seq++;
1851         best_lpni->lpni_seq = best_lpni->lpni_peer_net->lpn_seq;
1852         best_ni->ni_net->net_seq++;
1853         best_ni->ni_seq = best_ni->ni_net->net_seq;
1854
1855         CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n",
1856                libcfs_nidstr(&best_ni->ni_nid),
1857                best_ni->ni_seq, best_ni->ni_net->net_seq,
1858                atomic_read(&best_ni->ni_tx_credits),
1859                best_ni->ni_sel_priority,
1860                libcfs_nidstr(&best_lpni->lpni_nid),
1861                best_lpni->lpni_seq, best_lpni->lpni_peer_net->lpn_seq,
1862                best_lpni->lpni_txcredits,
1863                best_lpni->lpni_sel_priority);
1864
1865         /*
1866          * grab a reference on the peer_ni so it sticks around even if
1867          * we need to drop and relock the lnet_net_lock below.
1868          */
1869         lnet_peer_ni_addref_locked(best_lpni);
1870
1871         /*
1872          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1873          * message. This ensures that we get a CPT that is correct for
1874          * the NI when the NI has been restricted to a subset of all CPTs.
1875          * If the selected CPT differs from the one currently locked, we
1876          * must unlock and relock the lnet_net_lock(), and then check whether
1877          * the configuration has changed. We don't have a hold on the best_ni
1878          * yet, and it may have vanished.
1879          */
1880         cpt2 = lnet_cpt_of_nid_locked(&best_lpni->lpni_nid, best_ni);
1881         if (sd->sd_cpt != cpt2) {
1882                 __u32 seq = lnet_get_dlc_seq_locked();
1883                 lnet_net_unlock(sd->sd_cpt);
1884                 sd->sd_cpt = cpt2;
1885                 lnet_net_lock(sd->sd_cpt);
1886                 if (seq != lnet_get_dlc_seq_locked()) {
1887                         lnet_peer_ni_decref_locked(best_lpni);
1888                         return REPEAT_SEND;
1889                 }
1890         }
1891
1892         /*
1893          * store the best_lpni in the message right away to avoid having
1894          * to do the same operation under different conditions
1895          */
1896         msg->msg_txpeer = best_lpni;
1897         msg->msg_txni = best_ni;
1898
1899         /*
1900          * grab a reference for the best_ni since now it's in use in this
1901          * send. The reference will be dropped in lnet_finalize()
1902          */
1903         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1904
1905         /*
1906          * Always set the target.nid to the best peer picked. Either the
1907          * NID will be one of the peer NIDs selected, or the same NID as
1908          * what was originally set in the target or it will be the NID of
1909          * a router if this message should be routed
1910          */
1911         msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1912
1913         /*
1914          * lnet_msg_commit assigns the correct cpt to the message, which
1915          * is used to decrement the correct refcount on the ni when it's
1916          * time to return the credits
1917          */
1918         lnet_msg_commit(msg, sd->sd_cpt);
1919
1920         /*
1921          * If we are routing the message then we keep the src_nid that was
1922          * set by the originator. If we are not routing then we are the
1923          * originator and set it here.
1924          */
1925         if (!msg->msg_routing)
1926                 msg->msg_hdr.src_nid = msg->msg_txni->ni_nid;
1927
1928         if (routing) {
1929                 msg->msg_target_is_router = 1;
1930                 msg->msg_target.pid = LNET_PID_LUSTRE;
1931                 /*
1932                  * since we're routing we want to ensure that the
1933                  * msg_hdr.dest_nid is set to the final destination. When
1934                  * the router receives this message it knows how to route
1935                  * it.
1936                  *
1937                  * final_dst_lpni is set at the beginning of the
1938                  * lnet_select_pathway() function and is never changed.
1939                  * It's safe to use it here.
1940                  */
1941                 msg->msg_hdr.dest_nid = final_dst_lpni->lpni_nid;
1942         } else {
1943                 /*
1944                  * if we're not routing set the dest_nid to the best peer
1945                  * ni NID that we picked earlier in the algorithm.
1946                  */
1947                 msg->msg_hdr.dest_nid = msg->msg_txpeer->lpni_nid;
1948         }
1949
1950         /*
1951          * if we have response tracker block update it with the next hop
1952          * nid
1953          */
1954         if (msg->msg_md) {
1955                 rspt = msg->msg_md->md_rspt_ptr;
1956                 if (rspt) {
1957                         rspt->rspt_next_hop_nid =
1958                                 msg->msg_txpeer->lpni_nid;
1959                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1960                                libcfs_nidstr(&rspt->rspt_next_hop_nid));
1961                 }
1962         }
1963
1964         rc = lnet_post_send_locked(msg, 0);
1965
1966         if (!rc)
1967                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
1968                        libcfs_nidstr(&msg->msg_hdr.src_nid),
1969                        libcfs_nidstr(&msg->msg_txni->ni_nid),
1970                        libcfs_nidstr(&sd->sd_src_nid),
1971                        libcfs_nidstr(&msg->msg_hdr.dest_nid),
1972                        libcfs_nidstr(&sd->sd_dst_nid),
1973                        libcfs_nidstr(&msg->msg_txpeer->lpni_nid),
1974                        libcfs_nidstr(&sd->sd_rtr_nid),
1975                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
1976
1977         return rc;
1978 }
1979
1980 static inline void
1981 lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni,
1982                          struct lnet_msg *msg)
1983 {
1984         if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) &&
1985             !lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) {
1986                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
1987                        libcfs_nidstr(&lni->ni_nid),
1988                        libcfs_nidstr(&lpni->lpni_nid));
1989                 lnet_peer_ni_set_non_mr_pref_nid(lpni, &lni->ni_nid);
1990         }
1991 }
1992
1993 /*
1994  * Source Specified
1995  * Local Destination
1996  * non-mr peer
1997  *
1998  * use the source and destination NIDs as the pathway
1999  */
2000 static int
2001 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
2002 {
2003         /* the destination lpni is set before we get here. */
2004
2005         /* find local NI */
2006         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2007         if (!sd->sd_best_ni) {
2008                 CERROR("Can't send to %s: src %s is not a local nid\n",
2009                        libcfs_nidstr(&sd->sd_dst_nid),
2010                        libcfs_nidstr(&sd->sd_src_nid));
2011                 return -EINVAL;
2012         }
2013
2014         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2015
2016         return lnet_handle_send(sd);
2017 }
2018
2019 /*
2020  * Source Specified
2021  * Local Destination
2022  * MR Peer
2023  *
2024  * Don't run the selection algorithm on the peer NIs. By specifying the
2025  * local NID, we're also saying that we should always use the destination NID
2026  * provided. This handles the case where we should be using the same
2027  * destination NID for the all the messages which belong to the same RPC
2028  * request.
2029  */
2030 static int
2031 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
2032 {
2033         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2034         if (!sd->sd_best_ni) {
2035                 CERROR("Can't send to %s: src %s is not a local nid\n",
2036                        libcfs_nidstr(&sd->sd_dst_nid),
2037                        libcfs_nidstr(&sd->sd_src_nid));
2038                 return -EINVAL;
2039         }
2040
2041         if (sd->sd_best_lpni &&
2042             nid_same(&sd->sd_best_lpni->lpni_nid,
2043                       &the_lnet.ln_loni->ni_nid))
2044                 return lnet_handle_lo_send(sd);
2045         else if (sd->sd_best_lpni)
2046                 return lnet_handle_send(sd);
2047
2048         CERROR("can't send to %s. no NI on %s\n",
2049                libcfs_nidstr(&sd->sd_dst_nid),
2050                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
2051
2052         return -EHOSTUNREACH;
2053 }
2054
2055 struct lnet_ni *
2056 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
2057                               struct lnet_peer *peer,
2058                               struct lnet_peer_net *peer_net,
2059                               struct lnet_msg *msg,
2060                               int cpt)
2061 {
2062         struct lnet_net *local_net;
2063         struct lnet_ni *best_ni;
2064
2065         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
2066         if (!local_net)
2067                 return NULL;
2068
2069         /*
2070          * Iterate through the NIs in this local Net and select
2071          * the NI to send from. The selection is determined by
2072          * these 3 criterion in the following priority:
2073          *      1. NUMA
2074          *      2. NI available credits
2075          *      3. Round Robin
2076          */
2077         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
2078                                    peer, peer_net, msg, cpt);
2079
2080         return best_ni;
2081 }
2082
2083 static int
2084 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg,
2085                              int cpt)
2086 {
2087         struct lnet_peer *peer;
2088         struct lnet_peer_ni *new_lpni;
2089         int rc;
2090
2091         lnet_peer_ni_addref_locked(lpni);
2092
2093         peer = lpni->lpni_peer_net->lpn_peer;
2094
2095         if (lnet_peer_gw_discovery(peer)) {
2096                 lnet_peer_ni_decref_locked(lpni);
2097                 return 0;
2098         }
2099
2100         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
2101                 lnet_peer_ni_decref_locked(lpni);
2102                 return 0;
2103         }
2104
2105         rc = lnet_discover_peer_locked(lpni, cpt, false);
2106         if (rc) {
2107                 lnet_peer_ni_decref_locked(lpni);
2108                 return rc;
2109         }
2110
2111         new_lpni = lnet_find_peer_ni_locked(lnet_nid_to_nid4(&lpni->lpni_nid));
2112         if (!new_lpni) {
2113                 lnet_peer_ni_decref_locked(lpni);
2114                 return -ENOENT;
2115         }
2116
2117         peer = new_lpni->lpni_peer_net->lpn_peer;
2118         spin_lock(&peer->lp_lock);
2119         if (lpni == new_lpni && lnet_peer_is_uptodate_locked(peer)) {
2120                 /* The peer NI did not change and the peer is up to date.
2121                  * Nothing more to do.
2122                  */
2123                 spin_unlock(&peer->lp_lock);
2124                 lnet_peer_ni_decref_locked(lpni);
2125                 lnet_peer_ni_decref_locked(new_lpni);
2126                 return 0;
2127         }
2128         spin_unlock(&peer->lp_lock);
2129
2130         /* Either the peer NI changed during discovery, or the peer isn't up
2131          * to date. In both cases we want to queue the message on the
2132          * (possibly new) peer's pending queue and queue the peer for discovery
2133          */
2134         msg->msg_sending = 0;
2135         msg->msg_txpeer = NULL;
2136         lnet_net_unlock(cpt);
2137         lnet_peer_queue_message(peer, msg);
2138         lnet_net_lock(cpt);
2139
2140         lnet_peer_ni_decref_locked(lpni);
2141         lnet_peer_ni_decref_locked(new_lpni);
2142
2143         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2144                msg, libcfs_nidstr(&peer->lp_primary_nid));
2145
2146         return LNET_DC_WAIT;
2147 }
2148
2149 static int
2150 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2151                              struct lnet_nid *dst_nid,
2152                              struct lnet_peer_ni **gw_lpni,
2153                              struct lnet_peer **gw_peer)
2154 {
2155         int rc;
2156         struct lnet_peer *gw;
2157         struct lnet_peer *lp;
2158         struct lnet_peer_net *lpn;
2159         struct lnet_peer_net *best_lpn = NULL;
2160         struct lnet_remotenet *rnet, *best_rnet = NULL;
2161         struct lnet_route *best_route = NULL;
2162         struct lnet_route *last_route = NULL;
2163         struct lnet_peer_ni *lpni = NULL;
2164         struct lnet_peer_ni *gwni = NULL;
2165         bool route_found = false;
2166         struct lnet_nid *src_nid =
2167                 !LNET_NID_IS_ANY(&sd->sd_src_nid) || !sd->sd_best_ni
2168                 ? &sd->sd_src_nid
2169                 : &sd->sd_best_ni->ni_nid;
2170         int best_lpn_healthv = 0;
2171         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2172
2173         CDEBUG(D_NET, "using src nid %s for route restriction\n",
2174                src_nid ? libcfs_nidstr(src_nid) : "ANY");
2175
2176         /* If a router nid was specified then we are replying to a GET or
2177          * sending an ACK. In this case we use the gateway associated with the
2178          * specified router nid.
2179          */
2180         if (!LNET_NID_IS_ANY(&sd->sd_rtr_nid)) {
2181                 gwni = lnet_peer_ni_find_locked(&sd->sd_rtr_nid);
2182                 if (gwni) {
2183                         gw = gwni->lpni_peer_net->lpn_peer;
2184                         lnet_peer_ni_decref_locked(gwni);
2185                         if (gw->lp_rtr_refcount)
2186                                 route_found = true;
2187                 } else {
2188                         CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n",
2189                                libcfs_nidstr(&sd->sd_rtr_nid));
2190                 }
2191         }
2192
2193         if (!route_found) {
2194                 if (sd->sd_msg->msg_routing || (src_nid && !LNET_NID_IS_ANY(src_nid))) {
2195                         /* If I'm routing this message then I need to find the
2196                          * next hop based on the destination NID
2197                          *
2198                          * We also find next hop based on the destination NID
2199                          * if the source NI was specified
2200                          */
2201                         best_rnet = lnet_find_rnet_locked(LNET_NID_NET(&sd->sd_dst_nid));
2202                         if (!best_rnet) {
2203                                 CERROR("Unable to send message from %s to %s - Route table may be misconfigured\n",
2204                                        (src_nid && LNET_NID_IS_ANY(src_nid)) ?
2205                                                 "any local NI" :
2206                                                 libcfs_nidstr(src_nid),
2207                                        libcfs_nidstr(&sd->sd_dst_nid));
2208                                 return -EHOSTUNREACH;
2209                         }
2210                 } else {
2211                         /* we've already looked up the initial lpni using
2212                          * dst_nid
2213                          */
2214                         lpni = sd->sd_best_lpni;
2215                         /* the peer tree must be in existence */
2216                         LASSERT(lpni && lpni->lpni_peer_net &&
2217                                 lpni->lpni_peer_net->lpn_peer);
2218                         lp = lpni->lpni_peer_net->lpn_peer;
2219
2220                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2221                                 /* is this remote network reachable?  */
2222                                 rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2223                                 if (!rnet)
2224                                         continue;
2225
2226                                 if (!best_lpn) {
2227                                         best_lpn = lpn;
2228                                         best_rnet = rnet;
2229                                 }
2230
2231                                 /* select the preferred peer net */
2232                                 if (best_lpn_healthv > lpn->lpn_healthv)
2233                                         continue;
2234                                 else if (best_lpn_healthv < lpn->lpn_healthv)
2235                                         goto use_lpn;
2236
2237                                 if (best_lpn_sel_prio < lpn->lpn_sel_priority)
2238                                         continue;
2239                                 else if (best_lpn_sel_prio > lpn->lpn_sel_priority)
2240                                         goto use_lpn;
2241
2242                                 if (best_lpn->lpn_seq <= lpn->lpn_seq)
2243                                         continue;
2244 use_lpn:
2245                                 best_lpn_healthv = lpn->lpn_healthv;
2246                                 best_lpn_sel_prio = lpn->lpn_sel_priority;
2247                                 best_lpn = lpn;
2248                                 best_rnet = rnet;
2249                         }
2250
2251                         if (!best_lpn) {
2252                                 CERROR("peer %s has no available nets\n",
2253                                        libcfs_nidstr(&sd->sd_dst_nid));
2254                                 return -EHOSTUNREACH;
2255                         }
2256
2257                         sd->sd_best_lpni = lnet_find_best_lpni(sd->sd_best_ni,
2258                                                                lnet_nid_to_nid4(&sd->sd_dst_nid),
2259                                                                lp,
2260                                                                best_lpn->lpn_net_id);
2261                         if (!sd->sd_best_lpni) {
2262                                 CERROR("peer %s is unreachable\n",
2263                                        libcfs_nidstr(&sd->sd_dst_nid));
2264                                 return -EHOSTUNREACH;
2265                         }
2266
2267                         /* We're attempting to round robin over the remote peer
2268                          * NI's so update the final destination we selected
2269                          */
2270                         sd->sd_final_dst_lpni = sd->sd_best_lpni;
2271
2272                         /* Increment the sequence number of the remote lpni so
2273                          * we can round robin over the different interfaces of
2274                          * the remote lpni
2275                          */
2276                         sd->sd_best_lpni->lpni_seq++;
2277                 }
2278
2279                 /*
2280                  * find the best route. Restrict the selection on the net of the
2281                  * local NI if we've already picked the local NI to send from.
2282                  * Otherwise, let's pick any route we can find and then find
2283                  * a local NI we can reach the route's gateway on. Any route we
2284                  * select will be reachable by virtue of the restriction we have
2285                  * when adding a route.
2286                  */
2287                 best_route = lnet_find_route_locked(best_rnet,
2288                                                     LNET_NID_NET(src_nid),
2289                                                     sd->sd_best_lpni,
2290                                                     &last_route, &gwni);
2291
2292                 if (!best_route) {
2293                         CERROR("no route to %s from %s\n",
2294                                libcfs_nidstr(dst_nid),
2295                                libcfs_nidstr(src_nid));
2296                         return -EHOSTUNREACH;
2297                 }
2298
2299                 if (!gwni) {
2300                         CERROR("Internal Error. Route expected to %s from %s\n",
2301                                libcfs_nidstr(dst_nid),
2302                                libcfs_nidstr(src_nid));
2303                         return -EFAULT;
2304                 }
2305
2306                 gw = best_route->lr_gateway;
2307                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2308         }
2309
2310         /*
2311          * If the router checker is not active then discover the gateway here.
2312          * This ensures we are able to take advantage of multi-rail routing, but
2313          * if the router checker is active then we do not unecessarily delay
2314          * messages while the gateway is being checked by the dedicated monitor
2315          * thread.
2316          *
2317          * NB: We're only checking the alive_router_check_interval here, rather
2318          * than calling lnet_router_checker_active(), because the other
2319          * conditions that are checked by that function are either
2320          * irrelevant (the_lnet.ln_routing) or must be true (list of routers
2321          * is not empty)
2322          */
2323         if (alive_router_check_interval <= 0) {
2324                 rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt);
2325                 if (rc)
2326                         return rc;
2327         }
2328
2329         if (!sd->sd_best_ni) {
2330                 lpn = gwni->lpni_peer_net;
2331                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw, lpn,
2332                                                                sd->sd_msg,
2333                                                                sd->sd_md_cpt);
2334                 if (!sd->sd_best_ni) {
2335                         CERROR("Internal Error. Expected local ni on %s but non found: %s\n",
2336                                libcfs_net2str(lpn->lpn_net_id),
2337                                libcfs_nidstr(&sd->sd_src_nid));
2338                         return -EFAULT;
2339                 }
2340         }
2341
2342         *gw_lpni = gwni;
2343         *gw_peer = gw;
2344
2345         /*
2346          * increment the sequence numbers since now we're sure we're
2347          * going to use this path
2348          */
2349         if (LNET_NID_IS_ANY(&sd->sd_rtr_nid)) {
2350                 LASSERT(best_route && last_route);
2351                 best_route->lr_seq = last_route->lr_seq + 1;
2352                 if (best_lpn)
2353                         best_lpn->lpn_seq++;
2354         }
2355
2356         return 0;
2357 }
2358
2359 /*
2360  * Handle two cases:
2361  *
2362  * Case 1:
2363  *  Source specified
2364  *  Remote destination
2365  *  Non-MR destination
2366  *
2367  * Case 2:
2368  *  Source specified
2369  *  Remote destination
2370  *  MR destination
2371  *
2372  * The handling of these two cases is similar. Even though the destination
2373  * can be MR or non-MR, we'll deal directly with the router.
2374  */
2375 static int
2376 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2377 {
2378         int rc;
2379         struct lnet_peer_ni *gw_lpni = NULL;
2380         struct lnet_peer *gw_peer = NULL;
2381
2382         /* find local NI */
2383         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2384         if (!sd->sd_best_ni) {
2385                 CERROR("Can't send to %s: src %s is not a local nid\n",
2386                        libcfs_nidstr(&sd->sd_dst_nid),
2387                        libcfs_nidstr(&sd->sd_src_nid));
2388                 return -EINVAL;
2389         }
2390
2391         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid,
2392                                           &gw_lpni, &gw_peer);
2393         if (rc)
2394                 return rc;
2395
2396         if (sd->sd_send_case & NMR_DST)
2397                 /*
2398                  * since the final destination is non-MR let's set its preferred
2399                  * NID before we send
2400                  */
2401                 lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni,
2402                                          sd->sd_msg);
2403
2404         /*
2405          * We're going to send to the gw found so let's set its
2406          * info
2407          */
2408         sd->sd_peer = gw_peer;
2409         sd->sd_best_lpni = gw_lpni;
2410
2411         return lnet_handle_send(sd);
2412 }
2413
2414 struct lnet_ni *
2415 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2416                                struct lnet_msg *msg, bool discovery)
2417 {
2418         struct lnet_peer_net *lpn = NULL;
2419         struct lnet_peer_net *best_lpn = NULL;
2420         struct lnet_net *net = NULL;
2421         struct lnet_net *best_net = NULL;
2422         struct lnet_ni *best_ni = NULL;
2423         int best_lpn_healthv = 0;
2424         int best_net_healthv = 0;
2425         int net_healthv;
2426         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2427         __u32 lpn_sel_prio;
2428         __u32 best_net_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2429         __u32 net_sel_prio;
2430         bool exit = false;
2431
2432         /*
2433          * The peer can have multiple interfaces, some of them can be on
2434          * the local network and others on a routed network. We should
2435          * prefer the local network. However if the local network is not
2436          * available then we need to try the routed network
2437          */
2438
2439         /* go through all the peer nets and find the best_ni */
2440         list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
2441                 /*
2442                  * The peer's list of nets can contain non-local nets. We
2443                  * want to only examine the local ones.
2444                  */
2445                 net = lnet_get_net_locked(lpn->lpn_net_id);
2446                 if (!net)
2447                         continue;
2448
2449                 lpn_sel_prio = lpn->lpn_sel_priority;
2450                 net_healthv = lnet_get_net_healthv_locked(net);
2451                 net_sel_prio = net->net_sel_priority;
2452
2453                 /*
2454                  * if this is a discovery message and lp_disc_net_id is
2455                  * specified then use that net to send the discovery on.
2456                  */
2457                 if (peer->lp_disc_net_id == lpn->lpn_net_id &&
2458                     discovery) {
2459                         exit = true;
2460                         goto select_lpn;
2461                 }
2462
2463                 if (!best_lpn)
2464                         goto select_lpn;
2465
2466                 /* always select the lpn with the best health */
2467                 if (best_lpn_healthv > lpn->lpn_healthv)
2468                         continue;
2469                 else if (best_lpn_healthv < lpn->lpn_healthv)
2470                         goto select_lpn;
2471
2472                 /* select the preferred peer and local nets */
2473                 if (best_lpn_sel_prio < lpn_sel_prio)
2474                         continue;
2475                 else if (best_lpn_sel_prio > lpn_sel_prio)
2476                         goto select_lpn;
2477
2478                 if (best_net_healthv > net_healthv)
2479                         continue;
2480                 else if (best_net_healthv < net_healthv)
2481                         goto select_lpn;
2482
2483                 if (best_net_sel_prio < net_sel_prio)
2484                         continue;
2485                 else if (best_net_sel_prio > net_sel_prio)
2486                         goto select_lpn;
2487
2488                 if (best_lpn->lpn_seq < lpn->lpn_seq)
2489                         continue;
2490                 else if (best_lpn->lpn_seq > lpn->lpn_seq)
2491                         goto select_lpn;
2492
2493                 /* round robin over the local networks */
2494                 if (best_net->net_seq <= net->net_seq)
2495                         continue;
2496
2497 select_lpn:
2498                 best_net_healthv = net_healthv;
2499                 best_net_sel_prio = net_sel_prio;
2500                 best_lpn_healthv = lpn->lpn_healthv;
2501                 best_lpn_sel_prio = lpn_sel_prio;
2502                 best_lpn = lpn;
2503                 best_net = net;
2504
2505                 if (exit)
2506                         break;
2507         }
2508
2509         if (best_lpn) {
2510                 /* Select the best NI on the same net as best_lpn chosen
2511                  * above
2512                  */
2513                 best_ni = lnet_find_best_ni_on_spec_net(NULL, peer, best_lpn,
2514                                                         msg, md_cpt);
2515         }
2516
2517         return best_ni;
2518 }
2519
2520 static struct lnet_ni *
2521 lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt)
2522 {
2523         struct lnet_ni *best_ni = NULL;
2524         struct lnet_peer_net *peer_net = lpni->lpni_peer_net;
2525         struct lnet_peer_ni *lpni_entry;
2526
2527         /*
2528          * We must use a consistent source address when sending to a
2529          * non-MR peer. However, a non-MR peer can have multiple NIDs
2530          * on multiple networks, and we may even need to talk to this
2531          * peer on multiple networks -- certain types of
2532          * load-balancing configuration do this.
2533          *
2534          * So we need to pick the NI the peer prefers for this
2535          * particular network.
2536          */
2537         LASSERT(peer_net);
2538         list_for_each_entry(lpni_entry, &peer_net->lpn_peer_nis,
2539                             lpni_peer_nis) {
2540                 if (lpni_entry->lpni_pref_nnids == 0)
2541                         continue;
2542                 LASSERT(lpni_entry->lpni_pref_nnids == 1);
2543                 best_ni = lnet_nid_to_ni_locked(&lpni_entry->lpni_pref.nid,
2544                                                 cpt);
2545                 break;
2546         }
2547
2548         return best_ni;
2549 }
2550
2551 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2552 static int
2553 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2554 {
2555         struct lnet_ni *best_ni = NULL;
2556
2557         /*
2558          * We must use a consistent source address when sending to a
2559          * non-MR peer. However, a non-MR peer can have multiple NIDs
2560          * on multiple networks, and we may even need to talk to this
2561          * peer on multiple networks -- certain types of
2562          * load-balancing configuration do this.
2563          *
2564          * So we need to pick the NI the peer prefers for this
2565          * particular network.
2566          *
2567          * An exception is traffic on LNET_RESERVED_PORTAL. Internal LNet
2568          * traffic doesn't care which source NI is used, and we don't actually
2569          * want to restrict local recovery pings to a single source NI.
2570          */
2571         if (!lnet_reserved_msg(sd->sd_msg))
2572                 best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2573                                                                sd->sd_cpt);
2574
2575         if (!best_ni)
2576                 best_ni = lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2577                                                 sd->sd_best_lpni->lpni_peer_net,
2578                                                 sd->sd_msg,
2579                                                 sd->sd_md_cpt);
2580
2581         /* If there is no best_ni we don't have a route */
2582         if (!best_ni) {
2583                 CERROR("no path to %s from net %s\n",
2584                         libcfs_nidstr(&sd->sd_best_lpni->lpni_nid),
2585                         libcfs_net2str(sd->sd_best_lpni->lpni_net->net_id));
2586                 return -EHOSTUNREACH;
2587         }
2588
2589         sd->sd_best_ni = best_ni;
2590
2591         /* Set preferred NI if necessary. */
2592         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2593
2594         return 0;
2595 }
2596
2597
2598 /*
2599  * Source not specified
2600  * Local destination
2601  * Non-MR Peer
2602  *
2603  * always use the same source NID for NMR peers
2604  * If we've talked to that peer before then we already have a preferred
2605  * source NI associated with it. Otherwise, we select a preferred local NI
2606  * and store it in the peer
2607  */
2608 static int
2609 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2610 {
2611         int rc = 0;
2612
2613         /* sd->sd_best_lpni is already set to the final destination */
2614
2615         /*
2616          * At this point we should've created the peer ni and peer. If we
2617          * can't find it, then something went wrong. Instead of assert
2618          * output a relevant message and fail the send
2619          */
2620         if (!sd->sd_best_lpni) {
2621                 CERROR("Internal fault. Unable to send msg %s to %s. NID not known\n",
2622                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2623                        libcfs_nidstr(&sd->sd_dst_nid));
2624                 return -EFAULT;
2625         }
2626
2627         if (sd->sd_msg->msg_routing) {
2628                 /* If I'm forwarding this message then I can choose any NI
2629                  * on the destination peer net
2630                  */
2631                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL,
2632                                                                sd->sd_peer,
2633                                                                sd->sd_best_lpni->lpni_peer_net,
2634                                                                sd->sd_msg,
2635                                                                sd->sd_md_cpt);
2636                 if (!sd->sd_best_ni) {
2637                         CERROR("Unable to forward message to %s. No local NI available\n",
2638                                libcfs_nidstr(&sd->sd_dst_nid));
2639                         rc = -EHOSTUNREACH;
2640                 }
2641         } else
2642                 rc = lnet_select_preferred_best_ni(sd);
2643
2644         if (!rc)
2645                 rc = lnet_handle_send(sd);
2646
2647         return rc;
2648 }
2649
2650 static int
2651 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2652 {
2653         /*
2654          * NOTE we've already handled the remote peer case. So we only
2655          * need to worry about the local case here.
2656          *
2657          * if we're sending a response, ACK or reply, we need to send it
2658          * to the destination NID given to us. At this point we already
2659          * have the peer_ni we're suppose to send to, so just find the
2660          * best_ni on the peer net and use that. Since we're sending to an
2661          * MR peer then we can just run the selection algorithm on our
2662          * local NIs and pick the best one.
2663          */
2664         if (sd->sd_send_case & SND_RESP) {
2665                 sd->sd_best_ni =
2666                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2667                                                 sd->sd_best_lpni->lpni_peer_net,
2668                                                 sd->sd_msg,
2669                                                 sd->sd_md_cpt);
2670
2671                 if (!sd->sd_best_ni) {
2672                         /*
2673                          * We're not going to deal with not able to send
2674                          * a response to the provided final destination
2675                          */
2676                         CERROR("Can't send response to %s. No local NI available\n",
2677                                 libcfs_nidstr(&sd->sd_dst_nid));
2678                         return -EHOSTUNREACH;
2679                 }
2680
2681                 return lnet_handle_send(sd);
2682         }
2683
2684         /*
2685          * If we get here that means we're sending a fresh request, PUT or
2686          * GET, so we need to run our standard selection algorithm.
2687          * First find the best local interface that's on any of the peer's
2688          * networks.
2689          */
2690         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2691                                         sd->sd_md_cpt,
2692                                         sd->sd_msg,
2693                                         lnet_msg_discovery(sd->sd_msg));
2694         if (sd->sd_best_ni) {
2695                 sd->sd_best_lpni =
2696                   lnet_find_best_lpni(sd->sd_best_ni,
2697                                              lnet_nid_to_nid4(&sd->sd_dst_nid),
2698                                       sd->sd_peer,
2699                                       sd->sd_best_ni->ni_net->net_id);
2700
2701                 /*
2702                  * if we're successful in selecting a peer_ni on the local
2703                  * network, then send to it. Otherwise fall through and
2704                  * try and see if we can reach it over another routed
2705                  * network
2706                  */
2707                 if (sd->sd_best_lpni &&
2708                     nid_same(&sd->sd_best_lpni->lpni_nid,
2709                              &the_lnet.ln_loni->ni_nid)) {
2710                         /*
2711                          * in case we initially started with a routed
2712                          * destination, let's reset to local
2713                          */
2714                         sd->sd_send_case &= ~REMOTE_DST;
2715                         sd->sd_send_case |= LOCAL_DST;
2716                         return lnet_handle_lo_send(sd);
2717                 } else if (sd->sd_best_lpni) {
2718                         /*
2719                          * in case we initially started with a routed
2720                          * destination, let's reset to local
2721                          */
2722                         sd->sd_send_case &= ~REMOTE_DST;
2723                         sd->sd_send_case |= LOCAL_DST;
2724                         return lnet_handle_send(sd);
2725                 }
2726
2727                 CERROR("Internal Error. Expected to have a best_lpni: "
2728                        "%s -> %s\n",
2729                        libcfs_nidstr(&sd->sd_src_nid),
2730                        libcfs_nidstr(&sd->sd_dst_nid));
2731
2732                 return -EFAULT;
2733         }
2734
2735         /*
2736          * Peer doesn't have a local network. Let's see if there is
2737          * a remote network we can reach it on.
2738          */
2739         return PASS_THROUGH;
2740 }
2741
2742 /*
2743  * Case 1:
2744  *      Source NID not specified
2745  *      Local destination
2746  *      MR peer
2747  *
2748  * Case 2:
2749  *      Source NID not speified
2750  *      Remote destination
2751  *      MR peer
2752  *
2753  * In both of these cases if we're sending a response, ACK or REPLY, then
2754  * we need to send to the destination NID provided.
2755  *
2756  * In the remote case let's deal with MR routers.
2757  *
2758  */
2759
2760 static int
2761 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2762 {
2763         int rc = 0;
2764         struct lnet_peer *gw_peer = NULL;
2765         struct lnet_peer_ni *gw_lpni = NULL;
2766
2767         /*
2768          * handle sending a response to a remote peer here so we don't
2769          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2770          */
2771         if (sd->sd_send_case & REMOTE_DST &&
2772             sd->sd_send_case & SND_RESP) {
2773                 struct lnet_peer_ni *gw;
2774                 struct lnet_peer *gw_peer;
2775
2776                 rc = lnet_handle_find_routed_path(
2777                         sd, &sd->sd_dst_nid, &gw, &gw_peer);
2778                 if (rc < 0) {
2779                         CERROR("Can't send response to %s. No route available\n",
2780                                libcfs_nidstr(&sd->sd_dst_nid));
2781                         return -EHOSTUNREACH;
2782                 } else if (rc > 0) {
2783                         return rc;
2784                 }
2785
2786                 sd->sd_best_lpni = gw;
2787                 sd->sd_peer = gw_peer;
2788
2789                 return lnet_handle_send(sd);
2790         }
2791
2792         /*
2793          * Even though the NID for the peer might not be on a local network,
2794          * since the peer is MR there could be other interfaces on the
2795          * local network. In that case we'd still like to prefer the local
2796          * network over the routed network. If we're unable to do that
2797          * then we select the best router among the different routed networks,
2798          * and if the router is MR then we can deal with it as such.
2799          */
2800         rc = lnet_handle_any_mr_dsta(sd);
2801         if (rc != PASS_THROUGH)
2802                 return rc;
2803
2804         /*
2805          * Now that we must route to the destination, we must consider the
2806          * MR case, where the destination has multiple interfaces, some of
2807          * which we can route to and others we do not. For this reason we
2808          * need to select the destination which we can route to and if
2809          * there are multiple, we need to round robin.
2810          */
2811         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid,
2812                                           &gw_lpni, &gw_peer);
2813         if (rc)
2814                 return rc;
2815
2816         sd->sd_send_case &= ~LOCAL_DST;
2817         sd->sd_send_case |= REMOTE_DST;
2818
2819         sd->sd_peer = gw_peer;
2820         sd->sd_best_lpni = gw_lpni;
2821
2822         return lnet_handle_send(sd);
2823 }
2824
2825 /*
2826  * Source not specified
2827  * Remote destination
2828  * Non-MR peer
2829  *
2830  * Must send to the specified peer NID using the same source NID that
2831  * we've used before. If it's the first time to talk to that peer then
2832  * find the source NI and assign it as preferred to that peer
2833  */
2834 static int
2835 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2836 {
2837         int rc;
2838         struct lnet_peer_ni *gw_lpni = NULL;
2839         struct lnet_peer *gw_peer = NULL;
2840
2841         /*
2842          * Let's see if we have a preferred NI to talk to this NMR peer
2843          */
2844         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2845                                                               sd->sd_cpt);
2846
2847         /*
2848          * find the router and that'll find the best NI if we didn't find
2849          * it already.
2850          */
2851         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid, &gw_lpni,
2852                                           &gw_peer);
2853         if (rc)
2854                 return rc;
2855
2856         /*
2857          * set the best_ni we've chosen as the preferred one for
2858          * this peer
2859          */
2860         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2861
2862         /* we'll be sending to the gw */
2863         sd->sd_best_lpni = gw_lpni;
2864         sd->sd_peer = gw_peer;
2865
2866         return lnet_handle_send(sd);
2867 }
2868
2869 static int
2870 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2871 {
2872         /*
2873          * turn off the SND_RESP bit.
2874          * It will be checked in the case handling
2875          */
2876         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2877
2878         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2879                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2880                 (send_case & SRC_SPEC) ? libcfs_nidstr(&sd->sd_src_nid) : "",
2881                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2882                 libcfs_nidstr(&sd->sd_dst_nid),
2883                 (send_case & LOCAL_DST) ? "local" : "routed");
2884
2885         switch (send_case) {
2886         /*
2887          * For all cases where the source is specified, we should always
2888          * use the destination NID, whether it's an MR destination or not,
2889          * since we're continuing a series of related messages for the
2890          * same RPC
2891          */
2892         case SRC_SPEC_LOCAL_NMR_DST:
2893                 return lnet_handle_spec_local_nmr_dst(sd);
2894         case SRC_SPEC_LOCAL_MR_DST:
2895                 return lnet_handle_spec_local_mr_dst(sd);
2896         case SRC_SPEC_ROUTER_NMR_DST:
2897         case SRC_SPEC_ROUTER_MR_DST:
2898                 return lnet_handle_spec_router_dst(sd);
2899         case SRC_ANY_LOCAL_NMR_DST:
2900                 return lnet_handle_any_local_nmr_dst(sd);
2901         case SRC_ANY_LOCAL_MR_DST:
2902         case SRC_ANY_ROUTER_MR_DST:
2903                 return lnet_handle_any_mr_dst(sd);
2904         case SRC_ANY_ROUTER_NMR_DST:
2905                 return lnet_handle_any_router_nmr_dst(sd);
2906         default:
2907                 CERROR("Unknown send case\n");
2908                 return -1;
2909         }
2910 }
2911
2912 static int
2913 lnet_select_pathway(struct lnet_nid *src_nid,
2914                     struct lnet_nid *dst_nid,
2915                     struct lnet_msg *msg,
2916                     struct lnet_nid *rtr_nid)
2917 {
2918         struct lnet_peer_ni *lpni;
2919         struct lnet_peer *peer;
2920         struct lnet_send_data send_data;
2921         int cpt, rc;
2922         int md_cpt;
2923         __u32 send_case = 0;
2924         bool final_hop;
2925         bool mr_forwarding_allowed;
2926
2927         memset(&send_data, 0, sizeof(send_data));
2928
2929         /*
2930          * get an initial CPT to use for locking. The idea here is not to
2931          * serialize the calls to select_pathway, so that as many
2932          * operations can run concurrently as possible. To do that we use
2933          * the CPT where this call is being executed. Later on when we
2934          * determine the CPT to use in lnet_message_commit, we switch the
2935          * lock and check if there was any configuration change.  If none,
2936          * then we proceed, if there is, then we restart the operation.
2937          */
2938         cpt = lnet_net_lock_current();
2939
2940         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
2941         if (md_cpt == CFS_CPT_ANY)
2942                 md_cpt = cpt;
2943
2944 again:
2945
2946         /*
2947          * If we're being asked to send to the loopback interface, there
2948          * is no need to go through any selection. We can just shortcut
2949          * the entire process and send over lolnd
2950          */
2951         send_data.sd_msg = msg;
2952         send_data.sd_cpt = cpt;
2953         if (nid_is_lo0(dst_nid)) {
2954                 rc = lnet_handle_lo_send(&send_data);
2955                 lnet_net_unlock(cpt);
2956                 return rc;
2957         }
2958
2959         /*
2960          * find an existing peer_ni, or create one and mark it as having been
2961          * created due to network traffic. This call will create the
2962          * peer->peer_net->peer_ni tree.
2963          */
2964         lpni = lnet_peerni_by_nid_locked(dst_nid, NULL, cpt);
2965         if (IS_ERR(lpni)) {
2966                 lnet_net_unlock(cpt);
2967                 return PTR_ERR(lpni);
2968         }
2969
2970         /*
2971          * Cache the original src_nid and rtr_nid. If we need to resend the
2972          * message then we'll need to know whether the src_nid was originally
2973          * specified for this message. If it was originally specified,
2974          * then we need to keep using the same src_nid since it's
2975          * continuing the same sequence of messages. Similarly, rtr_nid will
2976          * affect our choice of next hop.
2977          */
2978         if (src_nid)
2979                 msg->msg_src_nid_param = *src_nid;
2980         else
2981                 msg->msg_src_nid_param = LNET_ANY_NID;
2982         if (rtr_nid)
2983                 msg->msg_rtr_nid_param = *rtr_nid;
2984         else
2985                 msg->msg_rtr_nid_param = LNET_ANY_NID;
2986
2987         /*
2988          * If necessary, perform discovery on the peer that owns this peer_ni.
2989          * Note, this can result in the ownership of this peer_ni changing
2990          * to another peer object.
2991          */
2992         rc = lnet_initiate_peer_discovery(lpni, msg, cpt);
2993         if (rc) {
2994                 lnet_peer_ni_decref_locked(lpni);
2995                 lnet_net_unlock(cpt);
2996                 return rc;
2997         }
2998         lnet_peer_ni_decref_locked(lpni);
2999
3000         peer = lpni->lpni_peer_net->lpn_peer;
3001
3002         /*
3003          * Identify the different send cases
3004          */
3005         if (!src_nid || LNET_NID_IS_ANY(src_nid)) {
3006                 send_case |= SRC_ANY;
3007                 if (lnet_get_net_locked(LNET_NID_NET(dst_nid)))
3008                         send_case |= LOCAL_DST;
3009                 else
3010                         send_case |= REMOTE_DST;
3011         } else {
3012                 send_case |= SRC_SPEC;
3013                 if (LNET_NID_NET(src_nid) == LNET_NID_NET(dst_nid))
3014                         send_case |= LOCAL_DST;
3015                 else
3016                         send_case |= REMOTE_DST;
3017         }
3018
3019         final_hop = false;
3020         if (msg->msg_routing && (send_case & LOCAL_DST))
3021                 final_hop = true;
3022
3023         /* Determine whether to allow MR forwarding for this message.
3024          * NB: MR forwarding is allowed if the message originator and the
3025          * destination are both MR capable, and the destination lpni that was
3026          * originally chosen by the originator is unhealthy or down.
3027          * We check the MR capability of the destination further below
3028          */
3029         mr_forwarding_allowed = false;
3030         if (final_hop) {
3031                 struct lnet_peer *src_lp;
3032                 struct lnet_peer_ni *src_lpni;
3033
3034                 src_lpni = lnet_peerni_by_nid_locked(&msg->msg_hdr.src_nid,
3035                                                    NULL, cpt);
3036                 /* We don't fail the send if we hit any errors here. We'll just
3037                  * try to send it via non-multi-rail criteria
3038                  */
3039                 if (!IS_ERR(src_lpni)) {
3040                         /* Drop ref taken by lnet_nid2peerni_locked() */
3041                         lnet_peer_ni_decref_locked(src_lpni);
3042                         src_lp = lpni->lpni_peer_net->lpn_peer;
3043                         if (lnet_peer_is_multi_rail(src_lp) &&
3044                             !lnet_is_peer_ni_alive(lpni))
3045                                 mr_forwarding_allowed = true;
3046
3047                 }
3048                 CDEBUG(D_NET, "msg %p MR forwarding %s\n", msg,
3049                        mr_forwarding_allowed ? "allowed" : "not allowed");
3050         }
3051
3052         /*
3053          * Deal with the peer as NMR in the following cases:
3054          * 1. the peer is NMR
3055          * 2. We're trying to recover a specific peer NI
3056          * 3. I'm a router sending to the final destination and MR forwarding is
3057          *    not allowed for this message (as determined above).
3058          *    In this case the source of the message would've
3059          *    already selected the final destination so my job
3060          *    is to honor the selection.
3061          */
3062         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery ||
3063             (final_hop && !mr_forwarding_allowed))
3064                 send_case |= NMR_DST;
3065         else
3066                 send_case |= MR_DST;
3067
3068         if (lnet_msg_is_response(msg))
3069                 send_case |= SND_RESP;
3070
3071         /* assign parameters to the send_data */
3072         if (rtr_nid)
3073                 send_data.sd_rtr_nid = *rtr_nid;
3074         else
3075                 send_data.sd_rtr_nid = LNET_ANY_NID;
3076         if (src_nid)
3077                 send_data.sd_src_nid = *src_nid;
3078         else
3079                 send_data.sd_src_nid = LNET_ANY_NID;
3080         send_data.sd_dst_nid = *dst_nid;
3081         send_data.sd_best_lpni = lpni;
3082         /*
3083          * keep a pointer to the final destination in case we're going to
3084          * route, so we'll need to access it later
3085          */
3086         send_data.sd_final_dst_lpni = lpni;
3087         send_data.sd_peer = peer;
3088         send_data.sd_md_cpt = md_cpt;
3089         send_data.sd_send_case = send_case;
3090
3091         rc = lnet_handle_send_case_locked(&send_data);
3092
3093         /*
3094          * Update the local cpt since send_data.sd_cpt might've been
3095          * updated as a result of calling lnet_handle_send_case_locked().
3096          */
3097         cpt = send_data.sd_cpt;
3098
3099         if (rc == REPEAT_SEND)
3100                 goto again;
3101
3102         lnet_net_unlock(cpt);
3103
3104         return rc;
3105 }
3106
3107 int
3108 lnet_send(struct lnet_nid *src_nid, struct lnet_msg *msg,
3109           struct lnet_nid *rtr_nid)
3110 {
3111         struct lnet_nid *dst_nid = &msg->msg_target.nid;
3112         int rc;
3113
3114         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
3115         LASSERT(msg->msg_txpeer == NULL);
3116         LASSERT(msg->msg_txni == NULL);
3117         LASSERT(!msg->msg_sending);
3118         LASSERT(!msg->msg_target_is_router);
3119         LASSERT(!msg->msg_receiving);
3120
3121         msg->msg_sending = 1;
3122
3123         LASSERT(!msg->msg_tx_committed);
3124
3125         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
3126         if (rc < 0) {
3127                 if (rc == -EHOSTUNREACH)
3128                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
3129                 else
3130                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
3131                 return rc;
3132         }
3133
3134         if (rc == LNET_CREDIT_OK)
3135                 lnet_ni_send(msg->msg_txni, msg);
3136
3137         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
3138         return 0;
3139 }
3140
3141 enum lnet_mt_event_type {
3142         MT_TYPE_LOCAL_NI = 0,
3143         MT_TYPE_PEER_NI
3144 };
3145
3146 struct lnet_mt_event_info {
3147         enum lnet_mt_event_type mt_type;
3148         struct lnet_nid mt_nid;
3149 };
3150
3151 /* called with res_lock held */
3152 void
3153 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
3154 {
3155         struct lnet_rsp_tracker *rspt;
3156
3157         /*
3158          * msg has a refcount on the MD so the MD is not going away.
3159          * The rspt queue for the cpt is protected by
3160          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
3161          */
3162         if (!md->md_rspt_ptr)
3163                 return;
3164
3165         rspt = md->md_rspt_ptr;
3166
3167         /* debug code */
3168         LASSERT(rspt->rspt_cpt == cpt);
3169
3170         md->md_rspt_ptr = NULL;
3171
3172         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3173                 /*
3174                  * The monitor thread has invalidated this handle because the
3175                  * response timed out, but it failed to lookup the MD. That
3176                  * means this response tracker is on the zombie list. We can
3177                  * safely remove it under the resource lock (held by caller) and
3178                  * free the response tracker block.
3179                  */
3180                 list_del(&rspt->rspt_on_list);
3181                 lnet_rspt_free(rspt, cpt);
3182         } else {
3183                 /*
3184                  * invalidate the handle to indicate that a response has been
3185                  * received, which will then lead the monitor thread to clean up
3186                  * the rspt block.
3187                  */
3188                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
3189         }
3190 }
3191
3192 void
3193 lnet_clean_zombie_rstqs(void)
3194 {
3195         struct lnet_rsp_tracker *rspt, *tmp;
3196         int i;
3197
3198         cfs_cpt_for_each(i, lnet_cpt_table()) {
3199                 list_for_each_entry_safe(rspt, tmp,
3200                                          the_lnet.ln_mt_zombie_rstqs[i],
3201                                          rspt_on_list) {
3202                         list_del(&rspt->rspt_on_list);
3203                         lnet_rspt_free(rspt, i);
3204                 }
3205         }
3206
3207         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
3208 }
3209
3210 static void
3211 lnet_finalize_expired_responses(void)
3212 {
3213         struct lnet_libmd *md;
3214         struct lnet_rsp_tracker *rspt, *tmp;
3215         ktime_t now;
3216         int i;
3217
3218         if (the_lnet.ln_mt_rstq == NULL)
3219                 return;
3220
3221         cfs_cpt_for_each(i, lnet_cpt_table()) {
3222                 LIST_HEAD(local_queue);
3223
3224                 lnet_net_lock(i);
3225                 if (!the_lnet.ln_mt_rstq[i]) {
3226                         lnet_net_unlock(i);
3227                         continue;
3228                 }
3229                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
3230                 lnet_net_unlock(i);
3231
3232                 now = ktime_get();
3233
3234                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
3235                         /*
3236                          * The rspt mdh will be invalidated when a response
3237                          * is received or whenever we want to discard the
3238                          * block the monitor thread will walk the queue
3239                          * and clean up any rsts with an invalid mdh.
3240                          * The monitor thread will walk the queue until
3241                          * the first unexpired rspt block. This means that
3242                          * some rspt blocks which received their
3243                          * corresponding responses will linger in the
3244                          * queue until they are cleaned up eventually.
3245                          */
3246                         lnet_res_lock(i);
3247                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3248                                 lnet_res_unlock(i);
3249                                 list_del(&rspt->rspt_on_list);
3250                                 lnet_rspt_free(rspt, i);
3251                                 continue;
3252                         }
3253
3254                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
3255                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
3256                                 struct lnet_peer_ni *lpni;
3257                                 struct lnet_nid nid;
3258
3259                                 md = lnet_handle2md(&rspt->rspt_mdh);
3260                                 if (!md) {
3261                                         /* MD has been queued for unlink, but
3262                                          * rspt hasn't been detached (Note we've
3263                                          * checked above that the rspt_mdh is
3264                                          * valid). Since we cannot lookup the MD
3265                                          * we're unable to detach the rspt
3266                                          * ourselves. Thus, move the rspt to the
3267                                          * zombie list where we'll wait for
3268                                          * either:
3269                                          *   1. The remaining operations on the
3270                                          *   MD to complete. In this case the
3271                                          *   final operation will result in
3272                                          *   lnet_msg_detach_md()->
3273                                          *   lnet_detach_rsp_tracker() where
3274                                          *   we will clean up this response
3275                                          *   tracker.
3276                                          *   2. LNet to shutdown. In this case
3277                                          *   we'll wait until after all LND Nets
3278                                          *   have shutdown and then we can
3279                                          *   safely free any remaining response
3280                                          *   tracker blocks on the zombie list.
3281                                          * Note: We need to hold the resource
3282                                          * lock when adding to the zombie list
3283                                          * because we may have concurrent access
3284                                          * with lnet_detach_rsp_tracker().
3285                                          */
3286                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
3287                                         list_move(&rspt->rspt_on_list,
3288                                                   the_lnet.ln_mt_zombie_rstqs[i]);
3289                                         lnet_res_unlock(i);
3290                                         continue;
3291                                 }
3292                                 LASSERT(md->md_rspt_ptr == rspt);
3293                                 md->md_rspt_ptr = NULL;
3294                                 lnet_res_unlock(i);
3295
3296                                 LNetMDUnlink(rspt->rspt_mdh);
3297
3298                                 nid = rspt->rspt_next_hop_nid;
3299
3300                                 list_del(&rspt->rspt_on_list);
3301                                 lnet_rspt_free(rspt, i);
3302
3303                                 /* If we're shutting down we just want to clean
3304                                  * up the rspt blocks
3305                                  */
3306                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3307                                         continue;
3308
3309                                 lnet_net_lock(i);
3310                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
3311                                 lnet_net_unlock(i);
3312
3313                                 CDEBUG(D_NET,
3314                                        "Response timeout: md = %p: nid = %s\n",
3315                                        md, libcfs_nidstr(&nid));
3316
3317                                 /*
3318                                  * If there is a timeout on the response
3319                                  * from the next hop decrement its health
3320                                  * value so that we don't use it
3321                                  */
3322                                 lnet_net_lock(0);
3323                                 lpni = lnet_peer_ni_find_locked(&nid);
3324                                 if (lpni) {
3325                                         lnet_handle_remote_failure_locked(lpni);
3326                                         lnet_peer_ni_decref_locked(lpni);
3327                                 }
3328                                 lnet_net_unlock(0);
3329                         } else {
3330                                 lnet_res_unlock(i);
3331                                 break;
3332                         }
3333                 }
3334
3335                 if (!list_empty(&local_queue)) {
3336                         lnet_net_lock(i);
3337                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
3338                         lnet_net_unlock(i);
3339                 }
3340         }
3341 }
3342
3343 static void
3344 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
3345 {
3346         struct lnet_msg *msg;
3347
3348         while (!list_empty(resendq)) {
3349                 struct lnet_peer_ni *lpni;
3350
3351                 msg = list_entry(resendq->next, struct lnet_msg,
3352                                  msg_list);
3353
3354                 list_del_init(&msg->msg_list);
3355
3356                 lpni = lnet_peer_ni_find_locked(&msg->msg_hdr.dest_nid);
3357                 if (!lpni) {
3358                         lnet_net_unlock(cpt);
3359                         CERROR("Expected that a peer is already created for %s\n",
3360                                libcfs_nidstr(&msg->msg_hdr.dest_nid));
3361                         msg->msg_no_resend = true;
3362                         lnet_finalize(msg, -EFAULT);
3363                         lnet_net_lock(cpt);
3364                 } else {
3365                         int rc;
3366
3367                         lnet_peer_ni_decref_locked(lpni);
3368
3369                         lnet_net_unlock(cpt);
3370                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3371                                libcfs_nidstr(&msg->msg_src_nid_param),
3372                                libcfs_idstr(&msg->msg_target),
3373                                lnet_msgtyp2str(msg->msg_type),
3374                                msg->msg_recovery,
3375                                msg->msg_retry_count);
3376                         rc = lnet_send(&msg->msg_src_nid_param, msg,
3377                                        &msg->msg_rtr_nid_param);
3378                         if (rc) {
3379                                 CERROR("Error sending %s to %s: %d\n",
3380                                        lnet_msgtyp2str(msg->msg_type),
3381                                        libcfs_idstr(&msg->msg_target), rc);
3382                                 msg->msg_no_resend = true;
3383                                 lnet_finalize(msg, rc);
3384                         }
3385                         lnet_net_lock(cpt);
3386                         if (!rc)
3387                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3388                 }
3389         }
3390 }
3391
3392 static void
3393 lnet_resend_pending_msgs(void)
3394 {
3395         int i;
3396
3397         cfs_cpt_for_each(i, lnet_cpt_table()) {
3398                 lnet_net_lock(i);
3399                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3400                 lnet_net_unlock(i);
3401         }
3402 }
3403
3404 /* called with cpt and ni_lock held */
3405 static void
3406 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3407 {
3408         struct lnet_handle_md recovery_mdh;
3409
3410         LNetInvalidateMDHandle(&recovery_mdh);
3411
3412         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3413             force) {
3414                 recovery_mdh = ni->ni_ping_mdh;
3415                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3416         }
3417         lnet_ni_unlock(ni);
3418         lnet_net_unlock(cpt);
3419         if (!LNetMDHandleIsInvalid(recovery_mdh))
3420                 LNetMDUnlink(recovery_mdh);
3421         lnet_net_lock(cpt);
3422         lnet_ni_lock(ni);
3423 }
3424
3425 static void
3426 lnet_recover_local_nis(void)
3427 {
3428         struct lnet_mt_event_info *ev_info;
3429         LIST_HEAD(processed_list);
3430         LIST_HEAD(local_queue);
3431         struct lnet_handle_md mdh;
3432         struct lnet_ni *tmp;
3433         struct lnet_ni *ni;
3434         struct lnet_nid nid;
3435         int healthv;
3436         int rc;
3437         time64_t now;
3438
3439         /*
3440          * splice the recovery queue on a local queue. We will iterate
3441          * through the local queue and update it as needed. Once we're
3442          * done with the traversal, we'll splice the local queue back on
3443          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3444          * will be traversed in the next iteration.
3445          */
3446         lnet_net_lock(0);
3447         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3448                          &local_queue);
3449         lnet_net_unlock(0);
3450
3451         now = ktime_get_seconds();
3452
3453         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3454                 /*
3455                  * if an NI is being deleted or it is now healthy, there
3456                  * is no need to keep it around in the recovery queue.
3457                  * The monitor thread is the only thread responsible for
3458                  * removing the NI from the recovery queue.
3459                  * Multiple threads can be adding NIs to the recovery
3460                  * queue.
3461                  */
3462                 healthv = atomic_read(&ni->ni_healthv);
3463
3464                 lnet_net_lock(0);
3465                 lnet_ni_lock(ni);
3466                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3467                     healthv == LNET_MAX_HEALTH_VALUE) {
3468                         list_del_init(&ni->ni_recovery);
3469                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3470                         lnet_ni_unlock(ni);
3471                         lnet_ni_decref_locked(ni, 0);
3472                         lnet_net_unlock(0);
3473                         continue;
3474                 }
3475
3476                 /*
3477                  * if the local NI failed recovery we must unlink the md.
3478                  * But we want to keep the local_ni on the recovery queue
3479                  * so we can continue the attempts to recover it.
3480                  */
3481                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3482                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3483                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3484                 }
3485
3486
3487                 lnet_ni_unlock(ni);
3488
3489                 if (now < ni->ni_next_ping) {
3490                         lnet_net_unlock(0);
3491                         continue;
3492                 }
3493
3494                 lnet_net_unlock(0);
3495
3496                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3497                        libcfs_nidstr(&ni->ni_nid));
3498
3499                 lnet_ni_lock(ni);
3500                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3501                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3502                         lnet_ni_unlock(ni);
3503
3504                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3505                         if (!ev_info) {
3506                                 CERROR("out of memory. Can't recover %s\n",
3507                                        libcfs_nidstr(&ni->ni_nid));
3508                                 lnet_ni_lock(ni);
3509                                 ni->ni_recovery_state &=
3510                                   ~LNET_NI_RECOVERY_PENDING;
3511                                 lnet_ni_unlock(ni);
3512                                 continue;
3513                         }
3514
3515                         mdh = ni->ni_ping_mdh;
3516                         /*
3517                          * Invalidate the ni mdh in case it's deleted.
3518                          * We'll unlink the mdh in this case below.
3519                          */
3520                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3521                         nid = ni->ni_nid;
3522
3523                         /*
3524                          * remove the NI from the local queue and drop the
3525                          * reference count to it while we're recovering
3526                          * it. The reason for that, is that the NI could
3527                          * be deleted, and the way the code is structured
3528                          * is if we don't drop the NI, then the deletion
3529                          * code will enter a loop waiting for the
3530                          * reference count to be removed while holding the
3531                          * ln_mutex_lock(). When we look up the peer to
3532                          * send to in lnet_select_pathway() we will try to
3533                          * lock the ln_mutex_lock() as well, leading to
3534                          * a deadlock. By dropping the refcount and
3535                          * removing it from the list, we allow for the NI
3536                          * to be removed, then we use the cached NID to
3537                          * look it up again. If it's gone, then we just
3538                          * continue examining the rest of the queue.
3539                          */
3540                         lnet_net_lock(0);
3541                         list_del_init(&ni->ni_recovery);
3542                         lnet_ni_decref_locked(ni, 0);
3543                         lnet_net_unlock(0);
3544
3545                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3546                         ev_info->mt_nid = nid;
3547                         rc = lnet_send_ping(&nid, &mdh, LNET_INTERFACES_MIN,
3548                                             ev_info, the_lnet.ln_mt_handler,
3549                                             true);
3550                         /* lookup the nid again */
3551                         lnet_net_lock(0);
3552                         ni = lnet_nid_to_ni_locked(&nid, 0);
3553                         if (!ni) {
3554                                 /*
3555                                  * the NI has been deleted when we dropped
3556                                  * the ref count
3557                                  */
3558                                 lnet_net_unlock(0);
3559                                 LNetMDUnlink(mdh);
3560                                 continue;
3561                         }
3562                         ni->ni_ping_count++;
3563
3564                         ni->ni_ping_mdh = mdh;
3565                         lnet_ni_add_to_recoveryq_locked(ni, &processed_list,
3566                                                         now);
3567
3568                         if (rc) {
3569                                 lnet_ni_lock(ni);
3570                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3571                                 lnet_ni_unlock(ni);
3572                         }
3573                         lnet_net_unlock(0);
3574                 } else
3575                         lnet_ni_unlock(ni);
3576         }
3577
3578         /*
3579          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3580          * reexamined in the next iteration.
3581          */
3582         list_splice_init(&processed_list, &local_queue);
3583         lnet_net_lock(0);
3584         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3585         lnet_net_unlock(0);
3586 }
3587
3588 static int
3589 lnet_resendqs_create(void)
3590 {
3591         struct list_head **resendqs;
3592         resendqs = lnet_create_array_of_queues();
3593
3594         if (!resendqs)
3595                 return -ENOMEM;
3596
3597         lnet_net_lock(LNET_LOCK_EX);
3598         the_lnet.ln_mt_resendqs = resendqs;
3599         lnet_net_unlock(LNET_LOCK_EX);
3600
3601         return 0;
3602 }
3603
3604 static void
3605 lnet_clean_local_ni_recoveryq(void)
3606 {
3607         struct lnet_ni *ni;
3608
3609         /* This is only called when the monitor thread has stopped */
3610         lnet_net_lock(0);
3611
3612         while (!list_empty(&the_lnet.ln_mt_localNIRecovq)) {
3613                 ni = list_entry(the_lnet.ln_mt_localNIRecovq.next,
3614                                 struct lnet_ni, ni_recovery);
3615                 list_del_init(&ni->ni_recovery);
3616                 lnet_ni_lock(ni);
3617                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3618                 lnet_ni_unlock(ni);
3619                 lnet_ni_decref_locked(ni, 0);
3620         }
3621
3622         lnet_net_unlock(0);
3623 }
3624
3625 static void
3626 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3627                                      bool force)
3628 {
3629         struct lnet_handle_md recovery_mdh;
3630
3631         LNetInvalidateMDHandle(&recovery_mdh);
3632
3633         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3634                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3635                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3636         }
3637         spin_unlock(&lpni->lpni_lock);
3638         lnet_net_unlock(cpt);
3639         if (!LNetMDHandleIsInvalid(recovery_mdh))
3640                 LNetMDUnlink(recovery_mdh);
3641         lnet_net_lock(cpt);
3642         spin_lock(&lpni->lpni_lock);
3643 }
3644
3645 static void
3646 lnet_clean_peer_ni_recoveryq(void)
3647 {
3648         struct lnet_peer_ni *lpni, *tmp;
3649
3650         lnet_net_lock(LNET_LOCK_EX);
3651
3652         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3653                                  lpni_recovery) {
3654                 list_del_init(&lpni->lpni_recovery);
3655                 spin_lock(&lpni->lpni_lock);
3656                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3657                 spin_unlock(&lpni->lpni_lock);
3658                 lnet_peer_ni_decref_locked(lpni);
3659         }
3660
3661         lnet_net_unlock(LNET_LOCK_EX);
3662 }
3663
3664 static void
3665 lnet_clean_resendqs(void)
3666 {
3667         struct lnet_msg *msg, *tmp;
3668         LIST_HEAD(msgs);
3669         int i;
3670
3671         cfs_cpt_for_each(i, lnet_cpt_table()) {
3672                 lnet_net_lock(i);
3673                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3674                 lnet_net_unlock(i);
3675                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3676                         list_del_init(&msg->msg_list);
3677                         msg->msg_no_resend = true;
3678                         lnet_finalize(msg, -ESHUTDOWN);
3679                 }
3680         }
3681
3682         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3683 }
3684
3685 static void
3686 lnet_recover_peer_nis(void)
3687 {
3688         struct lnet_mt_event_info *ev_info;
3689         LIST_HEAD(processed_list);
3690         LIST_HEAD(local_queue);
3691         struct lnet_handle_md mdh;
3692         struct lnet_peer_ni *lpni;
3693         struct lnet_peer_ni *tmp;
3694         struct lnet_nid nid;
3695         int healthv;
3696         int rc;
3697         time64_t now;
3698
3699         /*
3700          * Always use cpt 0 for locking across all interactions with
3701          * ln_mt_peerNIRecovq
3702          */
3703         lnet_net_lock(0);
3704         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3705                          &local_queue);
3706         lnet_net_unlock(0);
3707
3708         now = ktime_get_seconds();
3709
3710         list_for_each_entry_safe(lpni, tmp, &local_queue,
3711                                  lpni_recovery) {
3712                 /*
3713                  * The same protection strategy is used here as is in the
3714                  * local recovery case.
3715                  */
3716                 lnet_net_lock(0);
3717                 healthv = atomic_read(&lpni->lpni_healthv);
3718                 spin_lock(&lpni->lpni_lock);
3719                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3720                     healthv == LNET_MAX_HEALTH_VALUE) {
3721                         list_del_init(&lpni->lpni_recovery);
3722                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3723                         spin_unlock(&lpni->lpni_lock);
3724                         lnet_peer_ni_decref_locked(lpni);
3725                         lnet_net_unlock(0);
3726                         continue;
3727                 }
3728
3729                 /*
3730                  * If the peer NI has failed recovery we must unlink the
3731                  * md. But we want to keep the peer ni on the recovery
3732                  * queue so we can try to continue recovering it
3733                  */
3734                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3735                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3736                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3737                 }
3738
3739                 spin_unlock(&lpni->lpni_lock);
3740
3741                 if (now < lpni->lpni_next_ping) {
3742                         lnet_net_unlock(0);
3743                         continue;
3744                 }
3745
3746                 lnet_net_unlock(0);
3747
3748                 /*
3749                  * NOTE: we're racing with peer deletion from user space.
3750                  * It's possible that a peer is deleted after we check its
3751                  * state. In this case the recovery can create a new peer
3752                  */
3753                 spin_lock(&lpni->lpni_lock);
3754                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3755                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3756                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3757                         spin_unlock(&lpni->lpni_lock);
3758
3759                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3760                         if (!ev_info) {
3761                                 CERROR("out of memory. Can't recover %s\n",
3762                                        libcfs_nidstr(&lpni->lpni_nid));
3763                                 spin_lock(&lpni->lpni_lock);
3764                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3765                                 spin_unlock(&lpni->lpni_lock);
3766                                 continue;
3767                         }
3768
3769                         /* look at the comments in lnet_recover_local_nis() */
3770                         mdh = lpni->lpni_recovery_ping_mdh;
3771                         nid = lpni->lpni_nid;
3772                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3773                         lnet_net_lock(0);
3774                         list_del_init(&lpni->lpni_recovery);
3775                         lnet_peer_ni_decref_locked(lpni);
3776                         lnet_net_unlock(0);
3777
3778                         ev_info->mt_type = MT_TYPE_PEER_NI;
3779                         ev_info->mt_nid = nid;
3780                         rc = lnet_send_ping(&nid, &mdh, LNET_INTERFACES_MIN,
3781                                             ev_info, the_lnet.ln_mt_handler,
3782                                             true);
3783                         lnet_net_lock(0);
3784                         /*
3785                          * lnet_find_peer_ni_locked() grabs a refcount for
3786                          * us. No need to take it explicitly.
3787                          */
3788                         lpni = lnet_peer_ni_find_locked(&nid);
3789                         if (!lpni) {
3790                                 lnet_net_unlock(0);
3791                                 LNetMDUnlink(mdh);
3792                                 continue;
3793                         }
3794
3795                         lpni->lpni_ping_count++;
3796
3797                         lpni->lpni_recovery_ping_mdh = mdh;
3798
3799                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
3800                                                              &processed_list,
3801                                                              now);
3802                         if (rc) {
3803                                 spin_lock(&lpni->lpni_lock);
3804                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3805                                 spin_unlock(&lpni->lpni_lock);
3806                         }
3807
3808                         /* Drop the ref taken by lnet_find_peer_ni_locked() */
3809                         lnet_peer_ni_decref_locked(lpni);
3810                         lnet_net_unlock(0);
3811                 } else
3812                         spin_unlock(&lpni->lpni_lock);
3813         }
3814
3815         list_splice_init(&processed_list, &local_queue);
3816         lnet_net_lock(0);
3817         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3818         lnet_net_unlock(0);
3819 }
3820
3821 static int
3822 lnet_monitor_thread(void *arg)
3823 {
3824         time64_t rsp_timeout = 0;
3825         time64_t now;
3826
3827         wait_for_completion(&the_lnet.ln_started);
3828         /*
3829          * The monitor thread takes care of the following:
3830          *  1. Checks the aliveness of routers
3831          *  2. Checks if there are messages on the resend queue to resend
3832          *     them.
3833          *  3. Check if there are any NIs on the local recovery queue and
3834          *     pings them
3835          *  4. Checks if there are any NIs on the remote recovery queue
3836          *     and pings them.
3837          */
3838         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
3839                 now = ktime_get_real_seconds();
3840
3841                 if (lnet_router_checker_active())
3842                         lnet_check_routers();
3843
3844                 lnet_resend_pending_msgs();
3845
3846                 if (now >= rsp_timeout) {
3847                         lnet_finalize_expired_responses();
3848                         rsp_timeout = now + (lnet_transaction_timeout / 2);
3849                 }
3850
3851                 lnet_recover_local_nis();
3852                 lnet_recover_peer_nis();
3853
3854                 /*
3855                  * TODO do we need to check if we should sleep without
3856                  * timeout?  Technically, an active system will always
3857                  * have messages in flight so this check will always
3858                  * evaluate to false. And on an idle system do we care
3859                  * if we wake up every 1 second? Although, we've seen
3860                  * cases where we get a complaint that an idle thread
3861                  * is waking up unnecessarily.
3862                  */
3863                 wait_for_completion_interruptible_timeout(
3864                         &the_lnet.ln_mt_wait_complete,
3865                         cfs_time_seconds(1));
3866                 /* Must re-init the completion before testing anything,
3867                  * including ln_mt_state.
3868                  */
3869                 reinit_completion(&the_lnet.ln_mt_wait_complete);
3870         }
3871
3872         /* Shutting down */
3873         lnet_net_lock(LNET_LOCK_EX);
3874         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3875         lnet_net_unlock(LNET_LOCK_EX);
3876
3877         /* signal that the monitor thread is exiting */
3878         up(&the_lnet.ln_mt_signal);
3879
3880         return 0;
3881 }
3882
3883 /*
3884  * lnet_send_ping
3885  * Sends a ping.
3886  * Returns == 0 if success
3887  * Returns > 0 if LNetMDBind or prior fails
3888  * Returns < 0 if LNetGet fails
3889  */
3890 int
3891 lnet_send_ping(struct lnet_nid *dest_nid,
3892                struct lnet_handle_md *mdh, int nnis,
3893                void *user_data, lnet_handler_t handler, bool recovery)
3894 {
3895         struct lnet_md md = { NULL };
3896         struct lnet_process_id id;
3897         struct lnet_ping_buffer *pbuf;
3898         int rc;
3899
3900         if (LNET_NID_IS_ANY(dest_nid)) {
3901                 rc = -EHOSTUNREACH;
3902                 goto fail_error;
3903         }
3904
3905         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
3906         if (!pbuf) {
3907                 rc = ENOMEM;
3908                 goto fail_error;
3909         }
3910
3911         /* initialize md content */
3912         md.start     = &pbuf->pb_info;
3913         md.length    = LNET_PING_INFO_SIZE(nnis);
3914         md.threshold = 2; /* GET/REPLY */
3915         md.max_size  = 0;
3916         md.options   = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE;
3917         md.user_ptr  = user_data;
3918         md.handler   = handler;
3919
3920         rc = LNetMDBind(&md, LNET_UNLINK, mdh);
3921         if (rc) {
3922                 lnet_ping_buffer_decref(pbuf);
3923                 CERROR("Can't bind MD: %d\n", rc);
3924                 rc = -rc; /* change the rc to positive */
3925                 goto fail_error;
3926         }
3927         id.pid = LNET_PID_LUSTRE;
3928         id.nid = lnet_nid_to_nid4(dest_nid);
3929
3930         rc = LNetGet(LNET_NID_ANY, *mdh, id,
3931                      LNET_RESERVED_PORTAL,
3932                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
3933
3934         if (rc)
3935                 goto fail_unlink_md;
3936
3937         return 0;
3938
3939 fail_unlink_md:
3940         LNetMDUnlink(*mdh);
3941         LNetInvalidateMDHandle(mdh);
3942 fail_error:
3943         return rc;
3944 }
3945
3946 static void
3947 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
3948                            int status, bool send, bool unlink_event)
3949 {
3950         struct lnet_nid *nid = &ev_info->mt_nid;
3951
3952         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
3953                 struct lnet_ni *ni;
3954
3955                 lnet_net_lock(0);
3956                 ni = lnet_nid_to_ni_locked(nid, 0);
3957                 if (!ni) {
3958                         lnet_net_unlock(0);
3959                         return;
3960                 }
3961                 lnet_ni_lock(ni);
3962                 if (!send || (send && status != 0))
3963                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3964                 if (status)
3965                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
3966                 lnet_ni_unlock(ni);
3967                 lnet_net_unlock(0);
3968
3969                 if (status != 0) {
3970                         CERROR("local NI (%s) recovery failed with %d\n",
3971                                libcfs_nidstr(nid), status);
3972                         return;
3973                 }
3974                 /*
3975                  * need to increment healthv for the ni here, because in
3976                  * the lnet_finalize() path we don't have access to this
3977                  * NI. And in order to get access to it, we'll need to
3978                  * carry forward too much information.
3979                  * In the peer case, it'll naturally be incremented
3980                  */
3981                 if (!unlink_event)
3982                         lnet_inc_healthv(&ni->ni_healthv,
3983                                          lnet_health_sensitivity);
3984         } else {
3985                 struct lnet_peer_ni *lpni;
3986                 int cpt;
3987
3988                 cpt = lnet_net_lock_current();
3989                 lpni = lnet_peer_ni_find_locked(nid);
3990                 if (!lpni) {
3991                         lnet_net_unlock(cpt);
3992                         return;
3993                 }
3994                 spin_lock(&lpni->lpni_lock);
3995                 if (!send || (send && status != 0))
3996                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3997                 if (status)
3998                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
3999                 spin_unlock(&lpni->lpni_lock);
4000                 lnet_peer_ni_decref_locked(lpni);
4001                 lnet_net_unlock(cpt);
4002
4003                 if (status != 0)
4004                         CERROR("peer NI (%s) recovery failed with %d\n",
4005                                libcfs_nidstr(nid), status);
4006         }
4007 }
4008
4009 void
4010 lnet_mt_event_handler(struct lnet_event *event)
4011 {
4012         struct lnet_mt_event_info *ev_info = event->md_user_ptr;
4013         struct lnet_ping_buffer *pbuf;
4014
4015         /* TODO: remove assert */
4016         LASSERT(event->type == LNET_EVENT_REPLY ||
4017                 event->type == LNET_EVENT_SEND ||
4018                 event->type == LNET_EVENT_UNLINK);
4019
4020         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
4021                event->status);
4022
4023         switch (event->type) {
4024         case LNET_EVENT_UNLINK:
4025                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
4026                        libcfs_nidstr(&ev_info->mt_nid));
4027                 fallthrough;
4028         case LNET_EVENT_REPLY:
4029                 lnet_handle_recovery_reply(ev_info, event->status, false,
4030                                            event->type == LNET_EVENT_UNLINK);
4031                 break;
4032         case LNET_EVENT_SEND:
4033                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
4034                                libcfs_nidstr(&ev_info->mt_nid),
4035                                (event->status) ? "unsuccessfully" :
4036                                "successfully", event->status);
4037                 lnet_handle_recovery_reply(ev_info, event->status, true, false);
4038                 break;
4039         default:
4040                 CERROR("Unexpected event: %d\n", event->type);
4041                 break;
4042         }
4043         if (event->unlinked) {
4044                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
4045                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
4046                 lnet_ping_buffer_decref(pbuf);
4047         }
4048 }
4049
4050 static int
4051 lnet_rsp_tracker_create(void)
4052 {
4053         struct list_head **rstqs;
4054         rstqs = lnet_create_array_of_queues();
4055
4056         if (!rstqs)
4057                 return -ENOMEM;
4058
4059         the_lnet.ln_mt_rstq = rstqs;
4060
4061         return 0;
4062 }
4063
4064 static void
4065 lnet_rsp_tracker_clean(void)
4066 {
4067         lnet_finalize_expired_responses();
4068
4069         cfs_percpt_free(the_lnet.ln_mt_rstq);
4070         the_lnet.ln_mt_rstq = NULL;
4071 }
4072
4073 int lnet_monitor_thr_start(void)
4074 {
4075         int rc = 0;
4076         struct task_struct *task;
4077
4078         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
4079                 return -EALREADY;
4080
4081         rc = lnet_resendqs_create();
4082         if (rc)
4083                 return rc;
4084
4085         rc = lnet_rsp_tracker_create();
4086         if (rc)
4087                 goto clean_queues;
4088
4089         sema_init(&the_lnet.ln_mt_signal, 0);
4090
4091         lnet_net_lock(LNET_LOCK_EX);
4092         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
4093         lnet_net_unlock(LNET_LOCK_EX);
4094         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
4095         if (IS_ERR(task)) {
4096                 rc = PTR_ERR(task);
4097                 CERROR("Can't start monitor thread: %d\n", rc);
4098                 goto clean_thread;
4099         }
4100
4101         return 0;
4102
4103 clean_thread:
4104         lnet_net_lock(LNET_LOCK_EX);
4105         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4106         lnet_net_unlock(LNET_LOCK_EX);
4107         /* block until event callback signals exit */
4108         down(&the_lnet.ln_mt_signal);
4109         /* clean up */
4110         lnet_net_lock(LNET_LOCK_EX);
4111         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
4112         lnet_net_unlock(LNET_LOCK_EX);
4113         lnet_rsp_tracker_clean();
4114         lnet_clean_local_ni_recoveryq();
4115         lnet_clean_peer_ni_recoveryq();
4116         lnet_clean_resendqs();
4117         the_lnet.ln_mt_handler = NULL;
4118         return rc;
4119 clean_queues:
4120         lnet_rsp_tracker_clean();
4121         lnet_clean_local_ni_recoveryq();
4122         lnet_clean_peer_ni_recoveryq();
4123         lnet_clean_resendqs();
4124         return rc;
4125 }
4126
4127 void lnet_monitor_thr_stop(void)
4128 {
4129         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
4130                 return;
4131
4132         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
4133         lnet_net_lock(LNET_LOCK_EX);
4134         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4135         lnet_net_unlock(LNET_LOCK_EX);
4136
4137         /* tell the monitor thread that we're shutting down */
4138         complete(&the_lnet.ln_mt_wait_complete);
4139
4140         /* block until monitor thread signals that it's done */
4141         mutex_unlock(&the_lnet.ln_api_mutex);
4142         down(&the_lnet.ln_mt_signal);
4143         mutex_lock(&the_lnet.ln_api_mutex);
4144         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
4145
4146         /* perform cleanup tasks */
4147         lnet_rsp_tracker_clean();
4148         lnet_clean_local_ni_recoveryq();
4149         lnet_clean_peer_ni_recoveryq();
4150         lnet_clean_resendqs();
4151 }
4152
4153 void
4154 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
4155                   __u32 msg_type)
4156 {
4157         lnet_net_lock(cpt);
4158         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
4159         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4160         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
4161         lnet_net_unlock(cpt);
4162
4163         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
4164 }
4165
4166 static void
4167 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
4168 {
4169         struct lnet_hdr *hdr = &msg->msg_hdr;
4170
4171         if (msg->msg_wanted != 0)
4172                 lnet_setpayloadbuffer(msg);
4173
4174         lnet_build_msg_event(msg, LNET_EVENT_PUT);
4175
4176         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
4177          * it back into the ACK during lnet_finalize() */
4178         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
4179                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
4180
4181         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
4182                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
4183 }
4184
4185 static int
4186 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
4187 {
4188         struct lnet_hdr         *hdr = &msg->msg_hdr;
4189         struct lnet_match_info  info;
4190         int                     rc;
4191         bool                    ready_delay;
4192
4193         /* Convert put fields to host byte order */
4194         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
4195         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
4196         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
4197
4198         /* Primary peer NID. */
4199         info.mi_id.nid = msg->msg_initiator;
4200         info.mi_id.pid  = hdr->src_pid;
4201         info.mi_opc     = LNET_MD_OP_PUT;
4202         info.mi_portal  = hdr->msg.put.ptl_index;
4203         info.mi_rlength = hdr->payload_length;
4204         info.mi_roffset = hdr->msg.put.offset;
4205         info.mi_mbits   = hdr->msg.put.match_bits;
4206         info.mi_cpt     = lnet_nid2cpt(&msg->msg_initiator, ni);
4207
4208         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
4209         ready_delay = msg->msg_rx_ready_delay;
4210
4211  again:
4212         rc = lnet_ptl_match_md(&info, msg);
4213         switch (rc) {
4214         default:
4215                 LBUG();
4216
4217         case LNET_MATCHMD_OK:
4218                 lnet_recv_put(ni, msg);
4219                 return 0;
4220
4221         case LNET_MATCHMD_NONE:
4222                 if (ready_delay)
4223                         /* no eager_recv or has already called it, should
4224                          * have been attached on delayed list */
4225                         return 0;
4226
4227                 rc = lnet_ni_eager_recv(ni, msg);
4228                 if (rc == 0) {
4229                         ready_delay = true;
4230                         goto again;
4231                 }
4232                 fallthrough;
4233
4234         case LNET_MATCHMD_DROP:
4235                 CNETERR("Dropping PUT from %s portal %d match %llu"
4236                         " offset %d length %d: %d\n",
4237                         libcfs_idstr(&info.mi_id), info.mi_portal,
4238                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
4239
4240                 return -ENOENT; /* -ve: OK but no match */
4241         }
4242 }
4243
4244 static int
4245 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
4246 {
4247         struct lnet_match_info info;
4248         struct lnet_hdr *hdr = &msg->msg_hdr;
4249         struct lnet_processid source_id;
4250         struct lnet_handle_wire reply_wmd;
4251         int rc;
4252
4253         /* Convert get fields to host byte order */
4254         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
4255         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
4256         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
4257         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
4258
4259         source_id.nid = hdr->src_nid;
4260         source_id.pid = hdr->src_pid;
4261         /* Primary peer NID */
4262         info.mi_id.nid  = msg->msg_initiator;
4263         info.mi_id.pid  = hdr->src_pid;
4264         info.mi_opc     = LNET_MD_OP_GET;
4265         info.mi_portal  = hdr->msg.get.ptl_index;
4266         info.mi_rlength = hdr->msg.get.sink_length;
4267         info.mi_roffset = hdr->msg.get.src_offset;
4268         info.mi_mbits   = hdr->msg.get.match_bits;
4269         info.mi_cpt     = lnet_nid2cpt(&msg->msg_initiator, ni);
4270
4271         rc = lnet_ptl_match_md(&info, msg);
4272         if (rc == LNET_MATCHMD_DROP) {
4273                 CNETERR("Dropping GET from %s portal %d match %llu"
4274                         " offset %d length %d\n",
4275                         libcfs_idstr(&info.mi_id), info.mi_portal,
4276                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
4277                 return -ENOENT; /* -ve: OK but no match */
4278         }
4279
4280         LASSERT(rc == LNET_MATCHMD_OK);
4281
4282         lnet_build_msg_event(msg, LNET_EVENT_GET);
4283
4284         reply_wmd = hdr->msg.get.return_wmd;
4285
4286         lnet_prep_send(msg, LNET_MSG_REPLY, &source_id,
4287                        msg->msg_offset, msg->msg_wanted);
4288
4289         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
4290
4291         if (rdma_get) {
4292                 /* The LND completes the REPLY from her recv procedure */
4293                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
4294                              msg->msg_offset, msg->msg_len, msg->msg_len);
4295                 return 0;
4296         }
4297
4298         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
4299         msg->msg_receiving = 0;
4300
4301         rc = lnet_send(&ni->ni_nid, msg, &msg->msg_from);
4302         if (rc < 0) {
4303                 /* didn't get as far as lnet_ni_send() */
4304                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
4305                        libcfs_nidstr(&ni->ni_nid),
4306                        libcfs_idstr(&info.mi_id), rc);
4307
4308                 lnet_finalize(msg, rc);
4309         }
4310
4311         return 0;
4312 }
4313
4314 static int
4315 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
4316 {
4317         void *private = msg->msg_private;
4318         struct lnet_hdr *hdr = &msg->msg_hdr;
4319         struct lnet_processid src = {};
4320         struct lnet_libmd *md;
4321         unsigned int rlength;
4322         unsigned int mlength;
4323         int cpt;
4324
4325         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4326         lnet_res_lock(cpt);
4327
4328         src.nid = hdr->src_nid;
4329         src.pid = hdr->src_pid;
4330
4331         /* NB handles only looked up by creator (no flips) */
4332         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4333         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4334                 CNETERR("%s: Dropping REPLY from %s for %s "
4335                         "MD %#llx.%#llx\n",
4336                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4337                         (md == NULL) ? "invalid" : "inactive",
4338                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4339                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4340                 if (md != NULL && md->md_me != NULL)
4341                         CERROR("REPLY MD also attached to portal %d\n",
4342                                md->md_me->me_portal);
4343
4344                 lnet_res_unlock(cpt);
4345                 return -ENOENT; /* -ve: OK but no match */
4346         }
4347
4348         LASSERT(md->md_offset == 0);
4349
4350         rlength = hdr->payload_length;
4351         mlength = min(rlength, md->md_length);
4352
4353         if (mlength < rlength &&
4354             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4355                 CNETERR("%s: Dropping REPLY from %s length %d "
4356                         "for MD %#llx would overflow (%d)\n",
4357                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4358                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4359                         mlength);
4360                 lnet_res_unlock(cpt);
4361                 return -ENOENT; /* -ve: OK but no match */
4362         }
4363
4364         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4365                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4366                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4367
4368         lnet_msg_attach_md(msg, md, 0, mlength);
4369
4370         if (mlength != 0)
4371                 lnet_setpayloadbuffer(msg);
4372
4373         lnet_res_unlock(cpt);
4374
4375         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4376
4377         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4378         return 0;
4379 }
4380
4381 static int
4382 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4383 {
4384         struct lnet_hdr *hdr = &msg->msg_hdr;
4385         struct lnet_processid src = {};
4386         struct lnet_libmd *md;
4387         int cpt;
4388
4389         src.nid = hdr->src_nid;
4390         src.pid = hdr->src_pid;
4391
4392         /* Convert ack fields to host byte order */
4393         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4394         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4395
4396         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4397         lnet_res_lock(cpt);
4398
4399         /* NB handles only looked up by creator (no flips) */
4400         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4401         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4402                 /* Don't moan; this is expected */
4403                 CDEBUG(D_NET,
4404                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4405                        libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4406                        (md == NULL) ? "invalid" : "inactive",
4407                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4408                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4409                 if (md != NULL && md->md_me != NULL)
4410                         CERROR("Source MD also attached to portal %d\n",
4411                                md->md_me->me_portal);
4412
4413                 lnet_res_unlock(cpt);
4414                 return -ENOENT;                  /* -ve! */
4415         }
4416
4417         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4418                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4419                hdr->msg.ack.dst_wmd.wh_object_cookie);
4420
4421         lnet_msg_attach_md(msg, md, 0, 0);
4422
4423         lnet_res_unlock(cpt);
4424
4425         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4426
4427         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4428         return 0;
4429 }
4430
4431 /**
4432  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4433  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4434  * \retval -ve                  error code
4435  */
4436 int
4437 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4438 {
4439         int     rc = 0;
4440
4441         if (!the_lnet.ln_routing)
4442                 return -ECANCELED;
4443
4444         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4445             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4446                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4447                         msg->msg_rx_ready_delay = 1;
4448                 } else {
4449                         lnet_net_unlock(msg->msg_rx_cpt);
4450                         rc = lnet_ni_eager_recv(ni, msg);
4451                         lnet_net_lock(msg->msg_rx_cpt);
4452                 }
4453         }
4454
4455         if (rc == 0)
4456                 rc = lnet_post_routed_recv_locked(msg, 0);
4457         return rc;
4458 }
4459
4460 int
4461 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4462 {
4463         int     rc;
4464
4465         switch (msg->msg_type) {
4466         case LNET_MSG_ACK:
4467                 rc = lnet_parse_ack(ni, msg);
4468                 break;
4469         case LNET_MSG_PUT:
4470                 rc = lnet_parse_put(ni, msg);
4471                 break;
4472         case LNET_MSG_GET:
4473                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4474                 break;
4475         case LNET_MSG_REPLY:
4476                 rc = lnet_parse_reply(ni, msg);
4477                 break;
4478         default: /* prevent an unused label if !kernel */
4479                 LASSERT(0);
4480                 return -EPROTO;
4481         }
4482
4483         LASSERT(rc == 0 || rc == -ENOENT);
4484         return rc;
4485 }
4486
4487 char *
4488 lnet_msgtyp2str (int type)
4489 {
4490         switch (type) {
4491         case LNET_MSG_ACK:
4492                 return ("ACK");
4493         case LNET_MSG_PUT:
4494                 return ("PUT");
4495         case LNET_MSG_GET:
4496                 return ("GET");
4497         case LNET_MSG_REPLY:
4498                 return ("REPLY");
4499         case LNET_MSG_HELLO:
4500                 return ("HELLO");
4501         default:
4502                 return ("<UNKNOWN>");
4503         }
4504 }
4505 EXPORT_SYMBOL(lnet_msgtyp2str);
4506
4507 int
4508 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr,
4509            struct lnet_nid *from_nid, void *private, int rdma_req)
4510 {
4511         struct lnet_peer_ni *lpni;
4512         struct lnet_msg *msg;
4513         __u32 payload_length;
4514         lnet_pid_t dest_pid;
4515         struct lnet_nid dest_nid;
4516         struct lnet_nid src_nid;
4517         bool push = false;
4518         int for_me;
4519         __u32 type;
4520         int rc = 0;
4521         int cpt;
4522         time64_t now = ktime_get_seconds();
4523
4524         LASSERT (!in_interrupt ());
4525
4526         type = hdr->type;
4527         src_nid = hdr->src_nid;
4528         dest_nid = hdr->dest_nid;
4529         dest_pid = hdr->dest_pid;
4530         payload_length = hdr->payload_length;
4531
4532         for_me = nid_same(&ni->ni_nid, &dest_nid);
4533         cpt = lnet_nid2cpt(from_nid, ni);
4534
4535         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4536                 libcfs_nidstr(&dest_nid),
4537                 libcfs_nidstr(&ni->ni_nid),
4538                 libcfs_nidstr(&src_nid),
4539                 lnet_msgtyp2str(type),
4540                 (for_me) ? "for me" : "routed");
4541
4542         switch (type) {
4543         case LNET_MSG_ACK:
4544         case LNET_MSG_GET:
4545                 if (payload_length > 0) {
4546                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4547                                libcfs_nidstr(from_nid),
4548                                libcfs_nidstr(&src_nid),
4549                                lnet_msgtyp2str(type), payload_length);
4550                         return -EPROTO;
4551                 }
4552                 break;
4553
4554         case LNET_MSG_PUT:
4555         case LNET_MSG_REPLY:
4556                 if (payload_length >
4557                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4558                         CERROR("%s, src %s: bad %s payload %d "
4559                                "(%d max expected)\n",
4560                                libcfs_nidstr(from_nid),
4561                                libcfs_nidstr(&src_nid),
4562                                lnet_msgtyp2str(type),
4563                                payload_length,
4564                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4565                         return -EPROTO;
4566                 }
4567                 break;
4568
4569         default:
4570                 CERROR("%s, src %s: Bad message type 0x%x\n",
4571                        libcfs_nidstr(from_nid),
4572                        libcfs_nidstr(&src_nid), type);
4573                 return -EPROTO;
4574         }
4575
4576         /* Only update net_last_alive for incoming GETs on the reserved portal
4577          * (i.e. incoming lnet/discovery pings).
4578          * This avoids situations where the router's own traffic results in NI
4579          * status changes
4580          */
4581         if (the_lnet.ln_routing && type == LNET_MSG_GET &&
4582             hdr->msg.get.ptl_index == LNET_RESERVED_PORTAL &&
4583             !lnet_islocalnid(&src_nid) &&
4584             ni->ni_net->net_last_alive != now) {
4585                 lnet_ni_lock(ni);
4586                 spin_lock(&ni->ni_net->net_lock);
4587                 ni->ni_net->net_last_alive = now;
4588                 spin_unlock(&ni->ni_net->net_lock);
4589                 push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP);
4590                 lnet_ni_unlock(ni);
4591         }
4592
4593         if (push)
4594                 lnet_push_update_to_peers(1);
4595
4596         /* Regard a bad destination NID as a protocol error.  Senders should
4597          * know what they're doing; if they don't they're misconfigured, buggy
4598          * or malicious so we chop them off at the knees :) */
4599
4600         if (!for_me) {
4601                 if (LNET_NID_NET(&dest_nid) == LNET_NID_NET(&ni->ni_nid)) {
4602                         /* should have gone direct */
4603                         CERROR("%s, src %s: Bad dest nid %s "
4604                                "(should have been sent direct)\n",
4605                                 libcfs_nidstr(from_nid),
4606                                 libcfs_nidstr(&src_nid),
4607                                 libcfs_nidstr(&dest_nid));
4608                         return -EPROTO;
4609                 }
4610
4611                 if (lnet_islocalnid(&dest_nid)) {
4612                         /* dest is another local NI; sender should have used
4613                          * this node's NID on its own network */
4614                         CERROR("%s, src %s: Bad dest nid %s "
4615                                "(it's my nid but on a different network)\n",
4616                                 libcfs_nidstr(from_nid),
4617                                 libcfs_nidstr(&src_nid),
4618                                 libcfs_nidstr(&dest_nid));
4619                         return -EPROTO;
4620                 }
4621
4622                 if (rdma_req && type == LNET_MSG_GET) {
4623                         CERROR("%s, src %s: Bad optimized GET for %s "
4624                                "(final destination must be me)\n",
4625                                 libcfs_nidstr(from_nid),
4626                                 libcfs_nidstr(&src_nid),
4627                                 libcfs_nidstr(&dest_nid));
4628                         return -EPROTO;
4629                 }
4630
4631                 if (!the_lnet.ln_routing) {
4632                         CERROR("%s, src %s: Dropping message for %s "
4633                                "(routing not enabled)\n",
4634                                 libcfs_nidstr(from_nid),
4635                                 libcfs_nidstr(&src_nid),
4636                                 libcfs_nidstr(&dest_nid));
4637                         goto drop;
4638                 }
4639         }
4640
4641         /* Message looks OK; we're not going to return an error, so we MUST
4642          * call back lnd_recv() come what may... */
4643
4644         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4645             fail_peer(&src_nid, 0)) {                   /* shall we now? */
4646                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4647                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4648                        lnet_msgtyp2str(type));
4649                 goto drop;
4650         }
4651
4652         /* FIXME need to support large-addr nid */
4653         if (!list_empty(&the_lnet.ln_drop_rules) &&
4654             lnet_drop_rule_match(hdr, lnet_nid_to_nid4(&ni->ni_nid), NULL)) {
4655                 CDEBUG(D_NET,
4656                        "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
4657                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4658                        libcfs_nidstr(&dest_nid), lnet_msgtyp2str(type));
4659                 goto drop;
4660         }
4661
4662         msg = lnet_msg_alloc();
4663         if (msg == NULL) {
4664                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4665                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4666                        lnet_msgtyp2str(type));
4667                 goto drop;
4668         }
4669
4670         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4671          * pointers NULL etc */
4672
4673         msg->msg_type = type;
4674         msg->msg_private = private;
4675         msg->msg_receiving = 1;
4676         msg->msg_rdma_get = rdma_req;
4677         msg->msg_len = msg->msg_wanted = payload_length;
4678         msg->msg_offset = 0;
4679         msg->msg_hdr = *hdr;
4680         /* for building message event */
4681         msg->msg_from = *from_nid;
4682         if (!for_me) {
4683                 msg->msg_target.pid = dest_pid;
4684                 msg->msg_target.nid = dest_nid;
4685                 msg->msg_routing = 1;
4686         }
4687
4688         lnet_net_lock(cpt);
4689         lpni = lnet_peerni_by_nid_locked(from_nid, &ni->ni_nid, cpt);
4690         if (IS_ERR(lpni)) {
4691                 lnet_net_unlock(cpt);
4692                 rc = PTR_ERR(lpni);
4693                 CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
4694                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4695                        lnet_msgtyp2str(type), rc);
4696                 lnet_msg_free(msg);
4697                 if (rc == -ESHUTDOWN)
4698                         /* We are shutting down.  Don't do anything more */
4699                         return 0;
4700                 goto drop;
4701         }
4702
4703         /* If this message was forwarded to us from a router then we may need
4704          * to update router aliveness or check for an asymmetrical route
4705          * (or both)
4706          */
4707         if (((lnet_drop_asym_route && for_me) ||
4708              !lpni->lpni_peer_net->lpn_peer->lp_alive) &&
4709             LNET_NID_NET(&src_nid) != LNET_NID_NET(from_nid)) {
4710                 __u32 src_net_id = LNET_NID_NET(&src_nid);
4711                 struct lnet_peer *gw = lpni->lpni_peer_net->lpn_peer;
4712                 struct lnet_route *route;
4713                 bool found = false;
4714
4715                 list_for_each_entry(route, &gw->lp_routes, lr_gwlist) {
4716                         if (route->lr_net == src_net_id) {
4717                                 found = true;
4718                                 /* If we're transitioning the gateway from
4719                                  * dead -> alive, and discovery is disabled
4720                                  * locally or on the gateway, then we need to
4721                                  * update the cached route aliveness for each
4722                                  * route to the src_nid's net.
4723                                  *
4724                                  * Otherwise, we're only checking for
4725                                  * symmetrical route, and we can break the
4726                                  * loop
4727                                  */
4728                                 if (!gw->lp_alive &&
4729                                     lnet_is_discovery_disabled(gw))
4730                                         lnet_set_route_aliveness(route, true);
4731                                 else
4732                                         break;
4733                         }
4734                 }
4735                 if (lnet_drop_asym_route && for_me && !found) {
4736                         /* Drop ref taken by lnet_nid2peerni_locked() */
4737                         lnet_peer_ni_decref_locked(lpni);
4738                         lnet_net_unlock(cpt);
4739                         /* we would not use from_nid to route a message to
4740                          * src_nid
4741                          * => asymmetric routing detected but forbidden
4742                          */
4743                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4744                                libcfs_nidstr(from_nid),
4745                                libcfs_nidstr(&src_nid), lnet_msgtyp2str(type));
4746                         lnet_msg_free(msg);
4747                         goto drop;
4748                 }
4749                 if (!gw->lp_alive) {
4750                         struct lnet_peer_net *lpn;
4751                         struct lnet_peer_ni *lpni2;
4752
4753                         gw->lp_alive = true;
4754                         /* Mark all remote NIs on src_nid's net UP */
4755                         lpn = lnet_peer_get_net_locked(gw, src_net_id);
4756                         if (lpn)
4757                                 list_for_each_entry(lpni2, &lpn->lpn_peer_nis,
4758                                                     lpni_peer_nis)
4759                                         lpni2->lpni_ns_status = LNET_NI_STATUS_UP;
4760                 }
4761         }
4762
4763         lpni->lpni_last_alive = now;
4764
4765         msg->msg_rxpeer = lpni;
4766         msg->msg_rxni = ni;
4767         lnet_ni_addref_locked(ni, cpt);
4768         /* Multi-Rail: Primary NID of source. */
4769         lnet_peer_primary_nid_locked(&src_nid, &msg->msg_initiator);
4770
4771         /*
4772          * mark the status of this lpni as UP since we received a message
4773          * from it. The ping response reports back the ns_status which is
4774          * marked on the remote as up or down and we cache it here.
4775          */
4776         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
4777
4778         lnet_msg_commit(msg, cpt);
4779
4780         /* message delay simulation */
4781         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
4782                      lnet_delay_rule_match_locked(hdr, msg))) {
4783                 lnet_net_unlock(cpt);
4784                 return 0;
4785         }
4786
4787         if (!for_me) {
4788                 rc = lnet_parse_forward_locked(ni, msg);
4789                 lnet_net_unlock(cpt);
4790
4791                 if (rc < 0)
4792                         goto free_drop;
4793
4794                 if (rc == LNET_CREDIT_OK) {
4795                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
4796                                      0, payload_length, payload_length);
4797                 }
4798                 return 0;
4799         }
4800
4801         lnet_net_unlock(cpt);
4802
4803         rc = lnet_parse_local(ni, msg);
4804         if (rc != 0)
4805                 goto free_drop;
4806         return 0;
4807
4808  free_drop:
4809         LASSERT(msg->msg_md == NULL);
4810         lnet_finalize(msg, rc);
4811
4812  drop:
4813         lnet_drop_message(ni, cpt, private, payload_length, type);
4814         return 0;
4815 }
4816 EXPORT_SYMBOL(lnet_parse);
4817
4818 void
4819 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
4820 {
4821         while (!list_empty(head)) {
4822                 struct lnet_processid id = {};
4823                 struct lnet_msg *msg;
4824
4825                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4826                 list_del(&msg->msg_list);
4827
4828                 id.nid = msg->msg_hdr.src_nid;
4829                 id.pid = msg->msg_hdr.src_pid;
4830
4831                 LASSERT(msg->msg_md == NULL);
4832                 LASSERT(msg->msg_rx_delayed);
4833                 LASSERT(msg->msg_rxpeer != NULL);
4834                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4835
4836                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
4837                       " offset %d length %d: %s\n",
4838                       libcfs_idstr(&id),
4839                       msg->msg_hdr.msg.put.ptl_index,
4840                       msg->msg_hdr.msg.put.match_bits,
4841                       msg->msg_hdr.msg.put.offset,
4842                       msg->msg_hdr.payload_length, reason);
4843
4844                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
4845                  * called lnet_drop_message(), so I just hang onto msg as well
4846                  * until that's done */
4847
4848                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
4849                                   msg->msg_private, msg->msg_len,
4850                                   msg->msg_type);
4851
4852                 msg->msg_no_resend = true;
4853                 /*
4854                  * NB: message will not generate event because w/o attached MD,
4855                  * but we still should give error code so lnet_msg_decommit()
4856                  * can skip counters operations and other checks.
4857                  */
4858                 lnet_finalize(msg, -ENOENT);
4859         }
4860 }
4861
4862 void
4863 lnet_recv_delayed_msg_list(struct list_head *head)
4864 {
4865         while (!list_empty(head)) {
4866                 struct lnet_msg *msg;
4867                 struct lnet_processid id;
4868
4869                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4870                 list_del(&msg->msg_list);
4871
4872                 /* md won't disappear under me, since each msg
4873                  * holds a ref on it */
4874
4875                 id.nid = msg->msg_hdr.src_nid;
4876                 id.pid = msg->msg_hdr.src_pid;
4877
4878                 LASSERT(msg->msg_rx_delayed);
4879                 LASSERT(msg->msg_md != NULL);
4880                 LASSERT(msg->msg_rxpeer != NULL);
4881                 LASSERT(msg->msg_rxni != NULL);
4882                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4883
4884                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
4885                        "match %llu offset %d length %d.\n",
4886                         libcfs_idstr(&id), msg->msg_hdr.msg.put.ptl_index,
4887                         msg->msg_hdr.msg.put.match_bits,
4888                         msg->msg_hdr.msg.put.offset,
4889                         msg->msg_hdr.payload_length);
4890
4891                 lnet_recv_put(msg->msg_rxni, msg);
4892         }
4893 }
4894
4895 static void
4896 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
4897                         struct lnet_libmd *md, struct lnet_handle_md mdh)
4898 {
4899         s64 timeout_ns;
4900         struct lnet_rsp_tracker *local_rspt;
4901
4902         /*
4903          * MD has a refcount taken by message so it's not going away.
4904          * The MD however can be looked up. We need to secure the access
4905          * to the md_rspt_ptr by taking the res_lock.
4906          * The rspt can be accessed without protection up to when it gets
4907          * added to the list.
4908          */
4909
4910         lnet_res_lock(cpt);
4911         local_rspt = md->md_rspt_ptr;
4912         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
4913         if (local_rspt != NULL) {
4914                 /*
4915                  * we already have an rspt attached to the md, so we'll
4916                  * update the deadline on that one.
4917                  */
4918                 lnet_rspt_free(rspt, cpt);
4919         } else {
4920                 /* new md */
4921                 rspt->rspt_mdh = mdh;
4922                 rspt->rspt_cpt = cpt;
4923                 /* store the rspt so we can access it when we get the REPLY */
4924                 md->md_rspt_ptr = rspt;
4925                 local_rspt = rspt;
4926         }
4927         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
4928
4929         /*
4930          * add to the list of tracked responses. It's added to tail of the
4931          * list in order to expire all the older entries first.
4932          */
4933         lnet_net_lock(cpt);
4934         list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
4935         lnet_net_unlock(cpt);
4936         lnet_res_unlock(cpt);
4937 }
4938
4939 /**
4940  * Initiate an asynchronous PUT operation.
4941  *
4942  * There are several events associated with a PUT: completion of the send on
4943  * the initiator node (LNET_EVENT_SEND), and when the send completes
4944  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
4945  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
4946  * used at the target node to indicate the completion of incoming data
4947  * delivery.
4948  *
4949  * The local events will be logged in the EQ associated with the MD pointed to
4950  * by \a mdh handle. Using a MD without an associated EQ results in these
4951  * events being discarded. In this case, the caller must have another
4952  * mechanism (e.g., a higher level protocol) for determining when it is safe
4953  * to modify the memory region associated with the MD.
4954  *
4955  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
4956  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
4957  *
4958  * \param self Indicates the NID of a local interface through which to send
4959  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
4960  * \param mdh A handle for the MD that describes the memory to be sent. The MD
4961  * must be "free floating" (See LNetMDBind()).
4962  * \param ack Controls whether an acknowledgment is requested.
4963  * Acknowledgments are only sent when they are requested by the initiating
4964  * process and the target MD enables them.
4965  * \param target A process identifier for the target process.
4966  * \param portal The index in the \a target's portal table.
4967  * \param match_bits The match bits to use for MD selection at the target
4968  * process.
4969  * \param offset The offset into the target MD (only used when the target
4970  * MD has the LNET_MD_MANAGE_REMOTE option set).
4971  * \param hdr_data 64 bits of user data that can be included in the message
4972  * header. This data is written to an event queue entry at the target if an
4973  * EQ is present on the matching MD.
4974  *
4975  * \retval  0      Success, and only in this case events will be generated
4976  * and logged to EQ (if it exists).
4977  * \retval -EIO    Simulated failure.
4978  * \retval -ENOMEM Memory allocation failure.
4979  * \retval -ENOENT Invalid MD object.
4980  *
4981  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
4982  */
4983 int
4984 LNetPut(lnet_nid_t self4, struct lnet_handle_md mdh, enum lnet_ack_req ack,
4985         struct lnet_process_id target4, unsigned int portal,
4986         __u64 match_bits, unsigned int offset,
4987         __u64 hdr_data)
4988 {
4989         struct lnet_msg *msg;
4990         struct lnet_libmd *md;
4991         int cpt;
4992         int rc;
4993         struct lnet_processid target;
4994         struct lnet_rsp_tracker *rspt = NULL;
4995         struct lnet_nid self;
4996
4997         LASSERT(the_lnet.ln_refcount > 0);
4998
4999         lnet_nid4_to_nid(self4, &self);
5000         lnet_nid4_to_nid(target4.nid, &target.nid);
5001         target.pid = target4.pid;
5002
5003         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5004             fail_peer(&target.nid, 1)) {                /* shall we now? */
5005                 CERROR("Dropping PUT to %s: simulated failure\n",
5006                        libcfs_id2str(target4));
5007                 return -EIO;
5008         }
5009
5010         msg = lnet_msg_alloc();
5011         if (msg == NULL) {
5012                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
5013                        libcfs_id2str(target4));
5014                 return -ENOMEM;
5015         }
5016         msg->msg_vmflush = !!(current->flags & PF_MEMALLOC);
5017
5018         cpt = lnet_cpt_of_cookie(mdh.cookie);
5019
5020         if (ack == LNET_ACK_REQ) {
5021                 rspt = lnet_rspt_alloc(cpt);
5022                 if (!rspt) {
5023                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
5024                                 libcfs_id2str(target4));
5025                         return -ENOMEM;
5026                 }
5027                 INIT_LIST_HEAD(&rspt->rspt_on_list);
5028         }
5029
5030         lnet_res_lock(cpt);
5031
5032         md = lnet_handle2md(&mdh);
5033         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5034                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
5035                        match_bits, portal, libcfs_id2str(target4),
5036                        md == NULL ? -1 : md->md_threshold);
5037                 if (md != NULL && md->md_me != NULL)
5038                         CERROR("Source MD also attached to portal %d\n",
5039                                md->md_me->me_portal);
5040                 lnet_res_unlock(cpt);
5041
5042                 if (rspt)
5043                         lnet_rspt_free(rspt, cpt);
5044
5045                 lnet_msg_free(msg);
5046                 return -ENOENT;
5047         }
5048
5049         CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_id2str(target4));
5050
5051         lnet_msg_attach_md(msg, md, 0, 0);
5052
5053         lnet_prep_send(msg, LNET_MSG_PUT, &target, 0, md->md_length);
5054
5055         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
5056         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
5057         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
5058         msg->msg_hdr.msg.put.hdr_data = hdr_data;
5059
5060         /* NB handles only looked up by creator (no flips) */
5061         if (ack == LNET_ACK_REQ) {
5062                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5063                         the_lnet.ln_interface_cookie;
5064                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5065                         md->md_lh.lh_cookie;
5066         } else {
5067                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5068                         LNET_WIRE_HANDLE_COOKIE_NONE;
5069                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5070                         LNET_WIRE_HANDLE_COOKIE_NONE;
5071         }
5072
5073         lnet_res_unlock(cpt);
5074
5075         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5076
5077         if (rspt && lnet_response_tracking_enabled(LNET_MSG_PUT,
5078                                                    md->md_options))
5079                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5080         else if (rspt)
5081                 lnet_rspt_free(rspt, cpt);
5082
5083         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
5084                                  CFS_FAIL_ONCE))
5085                 rc = -EIO;
5086         else
5087                 rc = lnet_send(&self, msg, NULL);
5088
5089         if (rc != 0) {
5090                 CNETERR("Error sending PUT to %s: %d\n",
5091                         libcfs_id2str(target4), rc);
5092                 msg->msg_no_resend = true;
5093                 lnet_finalize(msg, rc);
5094         }
5095
5096         /* completion will be signalled by an event */
5097         return 0;
5098 }
5099 EXPORT_SYMBOL(LNetPut);
5100
5101 /*
5102  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
5103  * returns a msg for the LND to pass to lnet_finalize() when the sink
5104  * data has been received.
5105  *
5106  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
5107  * lnet_finalize() is called on it, so the LND must call this first
5108  */
5109 struct lnet_msg *
5110 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
5111 {
5112         struct lnet_msg *msg = lnet_msg_alloc();
5113         struct lnet_libmd *getmd = getmsg->msg_md;
5114         struct lnet_processid *peer_id = &getmsg->msg_target;
5115         int cpt;
5116
5117         LASSERT(!getmsg->msg_target_is_router);
5118         LASSERT(!getmsg->msg_routing);
5119
5120         if (msg == NULL) {
5121                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
5122                        libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id));
5123                 goto drop;
5124         }
5125
5126         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
5127         lnet_res_lock(cpt);
5128
5129         LASSERT(getmd->md_refcount > 0);
5130
5131         if (getmd->md_threshold == 0) {
5132                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
5133                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id),
5134                         getmd);
5135                 lnet_res_unlock(cpt);
5136                 goto drop;
5137         }
5138
5139         LASSERT(getmd->md_offset == 0);
5140
5141         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
5142                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id), getmd);
5143
5144         /* setup information for lnet_build_msg_event */
5145         msg->msg_initiator =
5146                 getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
5147         msg->msg_from = peer_id->nid;
5148         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
5149         msg->msg_hdr.src_nid = peer_id->nid;
5150         msg->msg_hdr.payload_length = getmd->md_length;
5151         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
5152
5153         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
5154         lnet_res_unlock(cpt);
5155
5156         cpt = lnet_nid2cpt(&peer_id->nid, ni);
5157
5158         lnet_net_lock(cpt);
5159         lnet_msg_commit(msg, cpt);
5160         lnet_net_unlock(cpt);
5161
5162         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
5163
5164         return msg;
5165
5166  drop:
5167         cpt = lnet_nid2cpt(&peer_id->nid, ni);
5168
5169         lnet_net_lock(cpt);
5170         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
5171         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
5172         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
5173                 getmd->md_length;
5174         lnet_net_unlock(cpt);
5175
5176         if (msg != NULL)
5177                 lnet_msg_free(msg);
5178
5179         return NULL;
5180 }
5181 EXPORT_SYMBOL(lnet_create_reply_msg);
5182
5183 void
5184 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
5185                        unsigned int len)
5186 {
5187         /* Set the REPLY length, now the RDMA that elides the REPLY message has
5188          * completed and I know it. */
5189         LASSERT(reply != NULL);
5190         LASSERT(reply->msg_type == LNET_MSG_GET);
5191         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
5192
5193         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
5194          * the end of my buffer, I might as well be dead. */
5195         LASSERT(len <= reply->msg_ev.mlength);
5196
5197         reply->msg_ev.mlength = len;
5198 }
5199 EXPORT_SYMBOL(lnet_set_reply_msg_len);
5200
5201 /**
5202  * Initiate an asynchronous GET operation.
5203  *
5204  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
5205  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
5206  * the target node in the REPLY has been written to local MD.
5207  *
5208  * On the target node, an LNET_EVENT_GET is logged when the GET request
5209  * arrives and is accepted into a MD.
5210  *
5211  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
5212  * \param mdh A handle for the MD that describes the memory into which the
5213  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
5214  *
5215  * \retval  0      Success, and only in this case events will be generated
5216  * and logged to EQ (if it exists) of the MD.
5217  * \retval -EIO    Simulated failure.
5218  * \retval -ENOMEM Memory allocation failure.
5219  * \retval -ENOENT Invalid MD object.
5220  */
5221 int
5222 LNetGet(lnet_nid_t self4, struct lnet_handle_md mdh,
5223         struct lnet_process_id target4, unsigned int portal,
5224         __u64 match_bits, unsigned int offset, bool recovery)
5225 {
5226         struct lnet_msg *msg;
5227         struct lnet_libmd *md;
5228         struct lnet_rsp_tracker *rspt;
5229         int cpt;
5230         int rc;
5231         struct lnet_nid self;
5232         struct lnet_processid target;
5233
5234         LASSERT(the_lnet.ln_refcount > 0);
5235
5236         lnet_nid4_to_nid(self4, &self);
5237         lnet_nid4_to_nid(target4.nid, &target.nid);
5238         target.pid = target4.pid;
5239
5240         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5241             fail_peer(&target.nid, 1))          /* shall we now? */
5242         {
5243                 CERROR("Dropping GET to %s: simulated failure\n",
5244                        libcfs_id2str(target4));
5245                 return -EIO;
5246         }
5247
5248         msg = lnet_msg_alloc();
5249         if (!msg) {
5250                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
5251                        libcfs_id2str(target4));
5252                 return -ENOMEM;
5253         }
5254
5255         cpt = lnet_cpt_of_cookie(mdh.cookie);
5256
5257         rspt = lnet_rspt_alloc(cpt);
5258         if (!rspt) {
5259                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
5260                        libcfs_id2str(target4));
5261                 return -ENOMEM;
5262         }
5263         INIT_LIST_HEAD(&rspt->rspt_on_list);
5264
5265         msg->msg_recovery = recovery;
5266
5267         lnet_res_lock(cpt);
5268
5269         md = lnet_handle2md(&mdh);
5270         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5271                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
5272                        match_bits, portal, libcfs_id2str(target4),
5273                        md == NULL ? -1 : md->md_threshold);
5274                 if (md != NULL && md->md_me != NULL)
5275                         CERROR("REPLY MD also attached to portal %d\n",
5276                                md->md_me->me_portal);
5277
5278                 lnet_res_unlock(cpt);
5279
5280                 lnet_msg_free(msg);
5281                 lnet_rspt_free(rspt, cpt);
5282                 return -ENOENT;
5283         }
5284
5285         CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_id2str(target4));
5286
5287         lnet_msg_attach_md(msg, md, 0, 0);
5288
5289         lnet_prep_send(msg, LNET_MSG_GET, &target, 0, 0);
5290
5291         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5292         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5293         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5294         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5295
5296         /* NB handles only looked up by creator (no flips) */
5297         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5298                 the_lnet.ln_interface_cookie;
5299         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5300                 md->md_lh.lh_cookie;
5301
5302         lnet_res_unlock(cpt);
5303
5304         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5305
5306         if (lnet_response_tracking_enabled(LNET_MSG_GET, md->md_options))
5307                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5308         else
5309                 lnet_rspt_free(rspt, cpt);
5310
5311         rc = lnet_send(&self, msg, NULL);
5312         if (rc < 0) {
5313                 CNETERR("Error sending GET to %s: %d\n",
5314                         libcfs_id2str(target4), rc);
5315                 msg->msg_no_resend = true;
5316                 lnet_finalize(msg, rc);
5317         }
5318
5319         /* completion will be signalled by an event */
5320         return 0;
5321 }
5322 EXPORT_SYMBOL(LNetGet);
5323
5324 /**
5325  * Calculate distance to node at \a dstnid.
5326  *
5327  * \param dstnid Target NID.
5328  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5329  * is saved here.
5330  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5331  * here.
5332  *
5333  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5334  * local_nid_dist_zero is set, which is the default.
5335  * \retval positives Distance to target NID, i.e. number of hops plus one.
5336  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5337  */
5338 int
5339 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
5340 {
5341         struct list_head *e;
5342         struct lnet_ni *ni = NULL;
5343         struct lnet_remotenet *rnet;
5344         __u32 dstnet = LNET_NIDNET(dstnid);
5345         int hops;
5346         int cpt;
5347         __u32 order = 2;
5348         struct list_head *rn_list;
5349         bool matched_dstnet = false;
5350
5351         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5352          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5353          * keep order 0 free for 0@lo and order 1 free for a local NID
5354          * match */
5355
5356         LASSERT(the_lnet.ln_refcount > 0);
5357
5358         cpt = lnet_net_lock_current();
5359
5360         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5361                 /* FIXME support large-addr nid */
5362                 if (lnet_nid_to_nid4(&ni->ni_nid) == dstnid) {
5363                         if (srcnidp != NULL)
5364                                 *srcnidp = dstnid;
5365                         if (orderp != NULL) {
5366                                 if (dstnid == LNET_NID_LO_0)
5367                                         *orderp = 0;
5368                                 else
5369                                         *orderp = 1;
5370                         }
5371                         lnet_net_unlock(cpt);
5372
5373                         return local_nid_dist_zero ? 0 : 1;
5374                 }
5375
5376                 if (!matched_dstnet && LNET_NID_NET(&ni->ni_nid) == dstnet) {
5377                         matched_dstnet = true;
5378                         /* We matched the destination net, but we may have
5379                          * additional local NIs to inspect.
5380                          *
5381                          * We record the nid and order as appropriate, but
5382                          * they may be overwritten if we match local NI above.
5383                          */
5384                         if (srcnidp)
5385                                 /* FIXME support large-addr nids */
5386                                 *srcnidp = lnet_nid_to_nid4(&ni->ni_nid);
5387
5388                         if (orderp) {
5389                                 /* Check if ni was originally created in
5390                                  * current net namespace.
5391                                  * If not, assign order above 0xffff0000,
5392                                  * to make this ni not a priority.
5393                                  */
5394                                 if (current->nsproxy &&
5395                                     !net_eq(ni->ni_net_ns,
5396                                             current->nsproxy->net_ns))
5397                                         *orderp = order + 0xffff0000;
5398                                 else
5399                                         *orderp = order;
5400                         }
5401                 }
5402
5403                 order++;
5404         }
5405
5406         if (matched_dstnet) {
5407                 lnet_net_unlock(cpt);
5408                 return 1;
5409         }
5410
5411         rn_list = lnet_net2rnethash(dstnet);
5412         list_for_each(e, rn_list) {
5413                 rnet = list_entry(e, struct lnet_remotenet, lrn_list);
5414
5415                 if (rnet->lrn_net == dstnet) {
5416                         struct lnet_route *route;
5417                         struct lnet_route *shortest = NULL;
5418                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5419                         __u32 route_hops;
5420
5421                         LASSERT(!list_empty(&rnet->lrn_routes));
5422
5423                         list_for_each_entry(route, &rnet->lrn_routes,
5424                                             lr_list) {
5425                                 route_hops = route->lr_hops;
5426                                 if (route_hops == LNET_UNDEFINED_HOPS)
5427                                         route_hops = 1;
5428                                 if (shortest == NULL ||
5429                                     route_hops < shortest_hops) {
5430                                         shortest = route;
5431                                         shortest_hops = route_hops;
5432                                 }
5433                         }
5434
5435                         LASSERT(shortest != NULL);
5436                         hops = shortest_hops;
5437                         if (srcnidp != NULL) {
5438                                 struct lnet_net *net;
5439                                 net = lnet_get_net_locked(shortest->lr_lnet);
5440                                 LASSERT(net);
5441                                 ni = lnet_get_next_ni_locked(net, NULL);
5442                                 /* FIXME support large-addr nids */
5443                                 *srcnidp = lnet_nid_to_nid4(&ni->ni_nid);
5444                         }
5445                         if (orderp != NULL)
5446                                 *orderp = order;
5447                         lnet_net_unlock(cpt);
5448                         return hops + 1;
5449                 }
5450                 order++;
5451         }
5452
5453         lnet_net_unlock(cpt);
5454         return -EHOSTUNREACH;
5455 }
5456 EXPORT_SYMBOL(LNetDist);