Whamcloud - gitweb
LU-17744 ldiskfs: mballoc stats fixes
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
4  * Use is subject to license terms.
5  *
6  * Copyright (c) 2012, 2017, Intel Corporation.
7  */
8
9 /* This file is part of Lustre, http://www.lustre.org/
10  *
11  * Data movement routines
12  */
13
14 #define DEBUG_SUBSYSTEM S_LNET
15
16 #include <linux/pagemap.h>
17
18 #include <lnet/lib-lnet.h>
19 #include <linux/nsproxy.h>
20 #include <lnet/lnet_rdma.h>
21 #include <net/net_namespace.h>
22
23 static int local_nid_dist_zero = 1;
24 module_param(local_nid_dist_zero, int, 0444);
25 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
26
27 struct lnet_send_data {
28         struct lnet_ni *sd_best_ni;
29         struct lnet_peer_ni *sd_best_lpni;
30         struct lnet_peer_ni *sd_final_dst_lpni;
31         struct lnet_peer *sd_peer;
32         struct lnet_peer *sd_gw_peer;
33         struct lnet_peer_ni *sd_gw_lpni;
34         struct lnet_peer_net *sd_peer_net;
35         struct lnet_msg *sd_msg;
36         struct lnet_nid sd_dst_nid;
37         struct lnet_nid sd_src_nid;
38         struct lnet_nid sd_rtr_nid;
39         int sd_cpt;
40         int sd_md_cpt;
41         __u32 sd_send_case;
42 };
43
44 static inline bool
45 lnet_msg_is_response(struct lnet_msg *msg)
46 {
47         return msg->msg_type == LNET_MSG_ACK || msg->msg_type == LNET_MSG_REPLY;
48 }
49
50 static inline bool
51 lnet_response_tracking_enabled(__u32 msg_type, unsigned int md_options)
52 {
53         if (md_options & LNET_MD_NO_TRACK_RESPONSE)
54                 /* Explicitly disabled in MD options */
55                 return false;
56
57         if (md_options & LNET_MD_TRACK_RESPONSE)
58                 /* Explicity enabled in MD options */
59                 return true;
60
61         if (lnet_response_tracking == 3)
62                 /* Enabled for all message types */
63                 return true;
64
65         if (msg_type == LNET_MSG_PUT)
66                 return lnet_response_tracking == 2;
67
68         if (msg_type == LNET_MSG_GET)
69                 return lnet_response_tracking == 1;
70
71         return false;
72 }
73
74 static inline struct lnet_comm_count *
75 get_stats_counts(struct lnet_element_stats *stats,
76                  enum lnet_stats_type stats_type)
77 {
78         switch (stats_type) {
79         case LNET_STATS_TYPE_SEND:
80                 return &stats->el_send_stats;
81         case LNET_STATS_TYPE_RECV:
82                 return &stats->el_recv_stats;
83         case LNET_STATS_TYPE_DROP:
84                 return &stats->el_drop_stats;
85         default:
86                 CERROR("Unknown stats type\n");
87         }
88
89         return NULL;
90 }
91
92 void lnet_incr_stats(struct lnet_element_stats *stats,
93                      enum lnet_msg_type msg_type,
94                      enum lnet_stats_type stats_type)
95 {
96         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
97         if (!counts)
98                 return;
99
100         switch (msg_type) {
101         case LNET_MSG_ACK:
102                 atomic_inc(&counts->co_ack_count);
103                 break;
104         case LNET_MSG_PUT:
105                 atomic_inc(&counts->co_put_count);
106                 break;
107         case LNET_MSG_GET:
108                 atomic_inc(&counts->co_get_count);
109                 break;
110         case LNET_MSG_REPLY:
111                 atomic_inc(&counts->co_reply_count);
112                 break;
113         case LNET_MSG_HELLO:
114                 atomic_inc(&counts->co_hello_count);
115                 break;
116         default:
117                 CERROR("There is a BUG in the code. Unknown message type\n");
118                 break;
119         }
120 }
121
122 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
123                      enum lnet_stats_type stats_type)
124 {
125         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
126         if (!counts)
127                 return 0;
128
129         return (atomic_read(&counts->co_ack_count) +
130                 atomic_read(&counts->co_put_count) +
131                 atomic_read(&counts->co_get_count) +
132                 atomic_read(&counts->co_reply_count) +
133                 atomic_read(&counts->co_hello_count));
134 }
135
136 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
137                                 struct lnet_comm_count *counts)
138 {
139         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
140         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
141         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
142         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
143         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
144 }
145
146 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
147                               struct lnet_element_stats *stats)
148 {
149         struct lnet_comm_count *counts;
150
151         LASSERT(msg_stats);
152         LASSERT(stats);
153
154         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
155         if (!counts)
156                 return;
157         assign_stats(&msg_stats->im_send_stats, counts);
158
159         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
160         if (!counts)
161                 return;
162         assign_stats(&msg_stats->im_recv_stats, counts);
163
164         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
165         if (!counts)
166                 return;
167         assign_stats(&msg_stats->im_drop_stats, counts);
168 }
169
170 int
171 lnet_fail_nid(lnet_nid_t nid4, unsigned int threshold)
172 {
173         struct lnet_test_peer *tp;
174         struct list_head *el;
175         struct list_head *next;
176         struct lnet_nid nid;
177         LIST_HEAD(cull);
178
179         lnet_nid4_to_nid(nid4, &nid);
180         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
181         if (threshold != 0) {
182                 /* Adding a new entry */
183                 LIBCFS_ALLOC(tp, sizeof(*tp));
184                 if (tp == NULL)
185                         return -ENOMEM;
186
187                 tp->tp_nid = nid;
188                 tp->tp_threshold = threshold;
189
190                 lnet_net_lock(0);
191                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
192                 lnet_net_unlock(0);
193                 return 0;
194         }
195
196         lnet_net_lock(0);
197
198         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
199                 tp = list_entry(el, struct lnet_test_peer, tp_list);
200
201                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
202                     LNET_NID_IS_ANY(&nid) ||    /* removing all entries */
203                     nid_same(&tp->tp_nid, &nid)) {      /* matched this one */
204                         list_move(&tp->tp_list, &cull);
205                 }
206         }
207
208         lnet_net_unlock(0);
209
210         while ((tp = list_first_entry_or_null(&cull,
211                                               struct lnet_test_peer,
212                                               tp_list)) != NULL) {
213                 list_del(&tp->tp_list);
214                 LIBCFS_FREE(tp, sizeof(*tp));
215         }
216         return 0;
217 }
218
219 static int
220 fail_peer(struct lnet_nid *nid, int outgoing)
221 {
222         struct lnet_test_peer *tp;
223         struct list_head *el;
224         struct list_head *next;
225         LIST_HEAD(cull);
226         int fail = 0;
227
228         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
229         lnet_net_lock(0);
230
231         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
232                 tp = list_entry(el, struct lnet_test_peer, tp_list);
233
234                 if (tp->tp_threshold == 0) {
235                         /* zombie entry */
236                         if (outgoing) {
237                                 /* only cull zombies on outgoing tests,
238                                  * since we may be at interrupt priority on
239                                  * incoming messages. */
240                                 list_move(&tp->tp_list, &cull);
241                         }
242                         continue;
243                 }
244
245                 if (LNET_NID_IS_ANY(&tp->tp_nid) ||     /* fail every peer */
246                     nid_same(nid, &tp->tp_nid)) {       /* fail this peer */
247                         fail = 1;
248
249                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
250                                 tp->tp_threshold--;
251                                 if (outgoing &&
252                                     tp->tp_threshold == 0) {
253                                         /* see above */
254                                         list_move(&tp->tp_list, &cull);
255                                 }
256                         }
257                         break;
258                 }
259         }
260
261         lnet_net_unlock(0);
262
263         while ((tp = list_first_entry_or_null(&cull,
264                                               struct lnet_test_peer,
265                                               tp_list)) != NULL) {
266                 list_del(&tp->tp_list);
267                 LIBCFS_FREE(tp, sizeof(*tp));
268         }
269
270         return fail;
271 }
272
273 unsigned int
274 lnet_iov_nob(unsigned int niov, struct kvec *iov)
275 {
276         unsigned int nob = 0;
277
278         LASSERT(niov == 0 || iov != NULL);
279         while (niov-- > 0)
280                 nob += (iov++)->iov_len;
281
282         return (nob);
283 }
284 EXPORT_SYMBOL(lnet_iov_nob);
285
286 void
287 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
288                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
289                   unsigned int nob)
290 {
291         /* NB diov, siov are READ-ONLY */
292         unsigned int this_nob;
293
294         if (nob == 0)
295                 return;
296
297         /* skip complete frags before 'doffset' */
298         LASSERT(ndiov > 0);
299         while (doffset >= diov->iov_len) {
300                 doffset -= diov->iov_len;
301                 diov++;
302                 ndiov--;
303                 LASSERT(ndiov > 0);
304         }
305
306         /* skip complete frags before 'soffset' */
307         LASSERT(nsiov > 0);
308         while (soffset >= siov->iov_len) {
309                 soffset -= siov->iov_len;
310                 siov++;
311                 nsiov--;
312                 LASSERT(nsiov > 0);
313         }
314
315         do {
316                 LASSERT(ndiov > 0);
317                 LASSERT(nsiov > 0);
318                 this_nob = min3((unsigned int)diov->iov_len - doffset,
319                                 (unsigned int)siov->iov_len - soffset,
320                                 nob);
321
322                 memcpy((char *)diov->iov_base + doffset,
323                        (char *)siov->iov_base + soffset, this_nob);
324                 nob -= this_nob;
325
326                 if (diov->iov_len > doffset + this_nob) {
327                         doffset += this_nob;
328                 } else {
329                         diov++;
330                         ndiov--;
331                         doffset = 0;
332                 }
333
334                 if (siov->iov_len > soffset + this_nob) {
335                         soffset += this_nob;
336                 } else {
337                         siov++;
338                         nsiov--;
339                         soffset = 0;
340                 }
341         } while (nob > 0);
342 }
343 EXPORT_SYMBOL(lnet_copy_iov2iov);
344
345 unsigned int
346 lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
347 {
348         unsigned int  nob = 0;
349
350         LASSERT(niov == 0 || kiov != NULL);
351         while (niov-- > 0)
352                 nob += (kiov++)->bv_len;
353
354         return (nob);
355 }
356 EXPORT_SYMBOL(lnet_kiov_nob);
357
358 void
359 lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov,
360                     unsigned int doffset,
361                     unsigned int nsiov, struct bio_vec *siov,
362                     unsigned int soffset,
363                     unsigned int nob)
364 {
365         /* NB diov, siov are READ-ONLY */
366         unsigned int    this_nob;
367         char           *daddr = NULL;
368         char           *saddr = NULL;
369
370         if (nob == 0)
371                 return;
372
373         LASSERT (!in_interrupt ());
374
375         LASSERT (ndiov > 0);
376         while (doffset >= diov->bv_len) {
377                 doffset -= diov->bv_len;
378                 diov++;
379                 ndiov--;
380                 LASSERT(ndiov > 0);
381         }
382
383         LASSERT(nsiov > 0);
384         while (soffset >= siov->bv_len) {
385                 soffset -= siov->bv_len;
386                 siov++;
387                 nsiov--;
388                 LASSERT(nsiov > 0);
389         }
390
391         do {
392                 LASSERT(ndiov > 0);
393                 LASSERT(nsiov > 0);
394                 this_nob = min3(diov->bv_len - doffset,
395                                 siov->bv_len - soffset,
396                                 nob);
397
398                 if (daddr == NULL)
399                         daddr = ((char *)kmap(diov->bv_page)) +
400                                 diov->bv_offset + doffset;
401                 if (saddr == NULL)
402                         saddr = ((char *)kmap(siov->bv_page)) +
403                                 siov->bv_offset + soffset;
404
405                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
406                  * However in practice at least one of the kiovs will be mapped
407                  * kernel pages and the map/unmap will be NOOPs */
408
409                 memcpy (daddr, saddr, this_nob);
410                 nob -= this_nob;
411
412                 if (diov->bv_len > doffset + this_nob) {
413                         daddr += this_nob;
414                         doffset += this_nob;
415                 } else {
416                         kunmap(diov->bv_page);
417                         daddr = NULL;
418                         diov++;
419                         ndiov--;
420                         doffset = 0;
421                 }
422
423                 if (siov->bv_len > soffset + this_nob) {
424                         saddr += this_nob;
425                         soffset += this_nob;
426                 } else {
427                         kunmap(siov->bv_page);
428                         saddr = NULL;
429                         siov++;
430                         nsiov--;
431                         soffset = 0;
432                 }
433         } while (nob > 0);
434
435         if (daddr != NULL)
436                 kunmap(diov->bv_page);
437         if (saddr != NULL)
438                 kunmap(siov->bv_page);
439 }
440 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
441
442 void
443 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
444                     unsigned int nkiov, struct bio_vec *kiov,
445                     unsigned int kiovoffset,
446                     unsigned int nob)
447 {
448         /* NB iov, kiov are READ-ONLY */
449         unsigned int    this_nob;
450         char           *addr = NULL;
451
452         if (nob == 0)
453                 return;
454
455         LASSERT (!in_interrupt ());
456
457         LASSERT (niov > 0);
458         while (iovoffset >= iov->iov_len) {
459                 iovoffset -= iov->iov_len;
460                 iov++;
461                 niov--;
462                 LASSERT(niov > 0);
463         }
464
465         LASSERT(nkiov > 0);
466         while (kiovoffset >= kiov->bv_len) {
467                 kiovoffset -= kiov->bv_len;
468                 kiov++;
469                 nkiov--;
470                 LASSERT(nkiov > 0);
471         }
472
473         do {
474                 LASSERT(niov > 0);
475                 LASSERT(nkiov > 0);
476                 this_nob = min3((unsigned int)iov->iov_len - iovoffset,
477                                 (unsigned int)kiov->bv_len - kiovoffset,
478                                 nob);
479
480                 if (addr == NULL)
481                         addr = ((char *)kmap(kiov->bv_page)) +
482                                 kiov->bv_offset + kiovoffset;
483
484                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
485                 nob -= this_nob;
486
487                 if (iov->iov_len > iovoffset + this_nob) {
488                         iovoffset += this_nob;
489                 } else {
490                         iov++;
491                         niov--;
492                         iovoffset = 0;
493                 }
494
495                 if (kiov->bv_len > kiovoffset + this_nob) {
496                         addr += this_nob;
497                         kiovoffset += this_nob;
498                 } else {
499                         kunmap(kiov->bv_page);
500                         addr = NULL;
501                         kiov++;
502                         nkiov--;
503                         kiovoffset = 0;
504                 }
505
506         } while (nob > 0);
507
508         if (addr != NULL)
509                 kunmap(kiov->bv_page);
510 }
511 EXPORT_SYMBOL(lnet_copy_kiov2iov);
512
513 void
514 lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
515                    unsigned int kiovoffset,
516                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
517                    unsigned int nob)
518 {
519         /* NB kiov, iov are READ-ONLY */
520         unsigned int    this_nob;
521         char           *addr = NULL;
522
523         if (nob == 0)
524                 return;
525
526         LASSERT (!in_interrupt ());
527
528         LASSERT (nkiov > 0);
529         while (kiovoffset >= kiov->bv_len) {
530                 kiovoffset -= kiov->bv_len;
531                 kiov++;
532                 nkiov--;
533                 LASSERT(nkiov > 0);
534         }
535
536         LASSERT(niov > 0);
537         while (iovoffset >= iov->iov_len) {
538                 iovoffset -= iov->iov_len;
539                 iov++;
540                 niov--;
541                 LASSERT(niov > 0);
542         }
543
544         do {
545                 LASSERT(nkiov > 0);
546                 LASSERT(niov > 0);
547                 this_nob = min3((unsigned int)kiov->bv_len - kiovoffset,
548                                 (unsigned int)iov->iov_len - iovoffset,
549                                 nob);
550
551                 if (addr == NULL)
552                         addr = ((char *)kmap(kiov->bv_page)) +
553                                 kiov->bv_offset + kiovoffset;
554
555                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
556                 nob -= this_nob;
557
558                 if (kiov->bv_len > kiovoffset + this_nob) {
559                         addr += this_nob;
560                         kiovoffset += this_nob;
561                 } else {
562                         kunmap(kiov->bv_page);
563                         addr = NULL;
564                         kiov++;
565                         nkiov--;
566                         kiovoffset = 0;
567                 }
568
569                 if (iov->iov_len > iovoffset + this_nob) {
570                         iovoffset += this_nob;
571                 } else {
572                         iov++;
573                         niov--;
574                         iovoffset = 0;
575                 }
576         } while (nob > 0);
577
578         if (addr != NULL)
579                 kunmap(kiov->bv_page);
580 }
581 EXPORT_SYMBOL(lnet_copy_iov2kiov);
582
583 int
584 lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
585                   int src_niov, struct bio_vec *src,
586                   unsigned int offset, unsigned int len)
587 {
588         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
589          * for exactly 'len' bytes, and return the number of entries.
590          * NB not destructive to 'src' */
591         unsigned int    frag_len;
592         unsigned int    niov;
593
594         if (len == 0)                           /* no data => */
595                 return (0);                     /* no frags */
596
597         LASSERT(src_niov > 0);
598         while (offset >= src->bv_len) {      /* skip initial frags */
599                 offset -= src->bv_len;
600                 src_niov--;
601                 src++;
602                 LASSERT(src_niov > 0);
603         }
604
605         niov = 1;
606         for (;;) {
607                 LASSERT(src_niov > 0);
608                 LASSERT((int)niov <= dst_niov);
609
610                 frag_len = src->bv_len - offset;
611                 dst->bv_page = src->bv_page;
612                 dst->bv_offset = src->bv_offset + offset;
613
614                 if (len <= frag_len) {
615                         dst->bv_len = len;
616                         LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
617                         return niov;
618                 }
619
620                 dst->bv_len = frag_len;
621                 LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
622
623                 len -= frag_len;
624                 dst++;
625                 src++;
626                 niov++;
627                 src_niov--;
628                 offset = 0;
629         }
630 }
631 EXPORT_SYMBOL(lnet_extract_kiov);
632
633 void
634 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
635              int delayed, unsigned int offset, unsigned int mlen,
636              unsigned int rlen)
637 {
638         unsigned int niov = 0;
639         struct kvec *iov = NULL;
640         struct bio_vec  *kiov = NULL;
641         int rc;
642
643         LASSERT (!in_interrupt ());
644         LASSERT (mlen == 0 || msg != NULL);
645
646         if (msg != NULL) {
647                 LASSERT(msg->msg_receiving);
648                 LASSERT(!msg->msg_sending);
649                 LASSERT(rlen == msg->msg_len);
650                 LASSERT(mlen <= msg->msg_len);
651                 LASSERT(msg->msg_offset == offset);
652                 LASSERT(msg->msg_wanted == mlen);
653
654                 msg->msg_receiving = 0;
655
656                 if (mlen != 0) {
657                         niov = msg->msg_niov;
658                         kiov = msg->msg_kiov;
659
660                         LASSERT (niov > 0);
661                         LASSERT ((iov == NULL) != (kiov == NULL));
662                 }
663         }
664
665         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
666                                              niov, kiov, offset, mlen,
667                                              rlen);
668         if (rc < 0)
669                 lnet_finalize(msg, rc);
670 }
671
672 static void
673 lnet_setpayloadbuffer(struct lnet_msg *msg)
674 {
675         struct lnet_libmd *md = msg->msg_md;
676
677         LASSERT(msg->msg_len > 0);
678         LASSERT(!msg->msg_routing);
679         LASSERT(md != NULL);
680         LASSERT(msg->msg_niov == 0);
681         LASSERT(msg->msg_kiov == NULL);
682
683         msg->msg_niov = md->md_niov;
684         msg->msg_kiov = md->md_kiov;
685 }
686
687 void
688 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_processid *target,
689                unsigned int offset, unsigned int len)
690 {
691         msg->msg_type = type;
692         msg->msg_target = *target;
693         msg->msg_len = len;
694         msg->msg_offset = offset;
695
696         if (len != 0)
697                 lnet_setpayloadbuffer(msg);
698
699         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
700         msg->msg_hdr.type           = type;
701         /* dest_nid will be overwritten by lnet_select_pathway() */
702         msg->msg_hdr.dest_nid = target->nid;
703         msg->msg_hdr.dest_pid = target->pid;
704         /* src_nid will be set later */
705         msg->msg_hdr.src_pid        = the_lnet.ln_pid;
706         msg->msg_hdr.payload_length = len;
707 }
708
709 void
710 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
711 {
712         void *priv = msg->msg_private;
713         int rc;
714
715         LASSERT(!in_interrupt());
716         LASSERT(nid_is_lo0(&ni->ni_nid) ||
717                 (msg->msg_txcredit && msg->msg_peertxcredit));
718
719         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
720         if (rc < 0) {
721                 msg->msg_no_resend = true;
722                 lnet_finalize(msg, rc);
723         }
724 }
725
726 static int
727 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
728 {
729         int     rc;
730
731         LASSERT(!msg->msg_sending);
732         LASSERT(msg->msg_receiving);
733         LASSERT(!msg->msg_rx_ready_delay);
734         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
735
736         msg->msg_rx_ready_delay = 1;
737         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
738                                                   &msg->msg_private);
739         if (rc != 0) {
740                 CERROR("recv from %s / send to %s aborted: "
741                        "eager_recv failed %d\n",
742                        libcfs_nidstr(&msg->msg_rxpeer->lpni_nid),
743                        libcfs_idstr(&msg->msg_target), rc);
744                 LASSERT(rc < 0); /* required by my callers */
745         }
746
747         return rc;
748 }
749
750 /* Returns:
751  *  -ETIMEDOUT if the message deadline has been exceeded
752  *  -EHOSTUNREACH if the peer is down
753  *  0 if this message should not be dropped
754  */
755 static int
756 lnet_check_message_drop(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
757                         struct lnet_msg *msg)
758 {
759         /* Drop message if we've exceeded the message deadline */
760         if (ktime_after(ktime_get(), msg->msg_deadline))
761                 return -ETIMEDOUT;
762
763         if (msg->msg_target.pid & LNET_PID_USERFLAG)
764                 return 0;
765
766         if (!lnet_peer_aliveness_enabled(lpni))
767                 return 0;
768
769         /* If we're resending a message, let's attempt to send it even if
770          * the peer is down to fulfill our resend quota on the message
771          */
772         if (msg->msg_retry_count > 0)
773                 return 0;
774
775         /* try and send recovery messages regardless */
776         if (msg->msg_recovery)
777                 return 0;
778
779         /* always send any responses */
780         if (lnet_msg_is_response(msg))
781                 return 0;
782
783         /* always send non-routed messages */
784         if (!msg->msg_routing)
785                 return 0;
786
787         /* assume peer_ni is alive as long as we're within the configured
788          * peer timeout
789          */
790         if (ktime_get_seconds() <
791             (lpni->lpni_last_alive +
792              lpni->lpni_net->net_tunables.lct_peer_timeout))
793                 return 0;
794
795         if (!lnet_is_peer_ni_alive(lpni))
796                 return -EHOSTUNREACH;
797
798         return 0;
799 }
800
801 /**
802  * \param msg The message to be sent.
803  * \param do_send True if lnet_ni_send() should be called in this function.
804  *        lnet_send() is going to lnet_net_unlock immediately after this, so
805  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
806  *
807  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
808  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
809  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
810  * \retval -ECANCELED If the MD of the message has been unlinked.
811  */
812 static int
813 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
814 {
815         struct lnet_peer_ni     *lp = msg->msg_txpeer;
816         struct lnet_ni          *ni = msg->msg_txni;
817         int                     cpt = msg->msg_tx_cpt;
818         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
819         int rc;
820
821         /* non-lnet_send() callers have checked before */
822         LASSERT(!do_send || msg->msg_tx_delayed);
823         LASSERT(!msg->msg_receiving);
824         LASSERT(msg->msg_tx_committed);
825
826         /* can't get here if we're sending to the loopback interface */
827         if (the_lnet.ln_loni)
828                 LASSERT(!nid_same(&lp->lpni_nid, &the_lnet.ln_loni->ni_nid));
829
830         /* NB 'lp' is always the next hop */
831         rc = lnet_check_message_drop(ni, lp, msg);
832         if (rc) {
833                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
834                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
835                         msg->msg_len;
836                 lnet_net_unlock(cpt);
837                 if (msg->msg_txpeer)
838                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
839                                         msg->msg_type,
840                                         LNET_STATS_TYPE_DROP);
841
842                 lnet_incr_stats(&msg->msg_txni->ni_stats,
843                                 msg->msg_type,
844                                 LNET_STATS_TYPE_DROP);
845
846                 if (rc == -EHOSTUNREACH) {
847                         CNETERR("Dropping message for %s: peer not alive\n",
848                                 libcfs_idstr(&msg->msg_target));
849                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
850                 } else {
851                         CNETERR("Dropping message for %s: exceeded message deadline\n",
852                                 libcfs_idstr(&msg->msg_target));
853                         msg->msg_health_status =
854                                 LNET_MSG_STATUS_NETWORK_TIMEOUT;
855                 }
856
857                 if (do_send)
858                         lnet_finalize(msg, rc);
859
860                 lnet_net_lock(cpt);
861                 return rc;
862         }
863
864         if (msg->msg_md != NULL &&
865             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
866                 lnet_net_unlock(cpt);
867
868                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
869                         "called on the MD/ME.\n",
870                         libcfs_idstr(&msg->msg_target));
871                 if (do_send) {
872                         msg->msg_no_resend = true;
873                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
874                                msg, libcfs_idstr(&msg->msg_target));
875                         lnet_finalize(msg, -ECANCELED);
876                 }
877
878                 lnet_net_lock(cpt);
879                 return -ECANCELED;
880         }
881
882         if (!msg->msg_peertxcredit) {
883                 spin_lock(&lp->lpni_lock);
884                 LASSERT((lp->lpni_txcredits < 0) ==
885                         !list_empty(&lp->lpni_txq));
886
887                 msg->msg_peertxcredit = 1;
888                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr_nid4);
889                 lp->lpni_txcredits--;
890
891                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
892                         lp->lpni_mintxcredits = lp->lpni_txcredits;
893
894                 if (lp->lpni_txcredits < 0) {
895                         msg->msg_tx_delayed = 1;
896                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
897                         spin_unlock(&lp->lpni_lock);
898                         return LNET_CREDIT_WAIT;
899                 }
900                 spin_unlock(&lp->lpni_lock);
901         }
902
903         if (!msg->msg_txcredit) {
904                 LASSERT((tq->tq_credits < 0) ==
905                         !list_empty(&tq->tq_delayed));
906
907                 msg->msg_txcredit = 1;
908                 tq->tq_credits--;
909                 atomic_dec(&ni->ni_tx_credits);
910
911                 if (tq->tq_credits < tq->tq_credits_min)
912                         tq->tq_credits_min = tq->tq_credits;
913
914                 if (tq->tq_credits < 0) {
915                         msg->msg_tx_delayed = 1;
916                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
917                         return LNET_CREDIT_WAIT;
918                 }
919         }
920
921         if (unlikely(!list_empty(&the_lnet.ln_delay_rules)) &&
922             lnet_delay_rule_match_locked(&msg->msg_hdr, msg)) {
923                 msg->msg_tx_delayed = 1;
924                 return LNET_CREDIT_WAIT;
925         }
926
927         /* unset the tx_delay flag as we're going to send it now */
928         msg->msg_tx_delayed = 0;
929
930         if (do_send) {
931                 lnet_net_unlock(cpt);
932                 lnet_ni_send(ni, msg);
933                 lnet_net_lock(cpt);
934         }
935         return LNET_CREDIT_OK;
936 }
937
938
939 static struct lnet_rtrbufpool *
940 lnet_msg2bufpool(struct lnet_msg *msg)
941 {
942         struct lnet_rtrbufpool  *rbp;
943         int                     cpt;
944
945         LASSERT(msg->msg_rx_committed);
946
947         cpt = msg->msg_rx_cpt;
948         rbp = &the_lnet.ln_rtrpools[cpt][0];
949
950         LASSERT(msg->msg_len <= LNET_MTU);
951         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
952                 rbp++;
953                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
954         }
955
956         return rbp;
957 }
958
959 static int
960 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
961 {
962         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
963          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
964          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
965          * received or OK to receive */
966         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
967         struct lnet_peer *lp;
968         struct lnet_rtrbufpool *rbp;
969         struct lnet_rtrbuf *rb;
970
971         LASSERT(msg->msg_kiov == NULL);
972         LASSERT(msg->msg_niov == 0);
973         LASSERT(msg->msg_routing);
974         LASSERT(msg->msg_receiving);
975         LASSERT(!msg->msg_sending);
976         LASSERT(lpni->lpni_peer_net);
977         LASSERT(lpni->lpni_peer_net->lpn_peer);
978
979         lp = lpni->lpni_peer_net->lpn_peer;
980
981         /* non-lnet_parse callers only receive delayed messages */
982         LASSERT(!do_recv || msg->msg_rx_delayed);
983
984         if (!msg->msg_peerrtrcredit) {
985                 /* lpni_lock protects the credit manipulation */
986                 spin_lock(&lpni->lpni_lock);
987
988                 msg->msg_peerrtrcredit = 1;
989                 lpni->lpni_rtrcredits--;
990                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
991                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
992
993                 if (lpni->lpni_rtrcredits < 0) {
994                         spin_unlock(&lpni->lpni_lock);
995                         /* must have checked eager_recv before here */
996                         LASSERT(msg->msg_rx_ready_delay);
997                         msg->msg_rx_delayed = 1;
998                         /* lp_lock protects the lp_rtrq */
999                         spin_lock(&lp->lp_lock);
1000                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1001                         spin_unlock(&lp->lp_lock);
1002                         return LNET_CREDIT_WAIT;
1003                 }
1004                 spin_unlock(&lpni->lpni_lock);
1005         }
1006
1007         rbp = lnet_msg2bufpool(msg);
1008
1009         if (!msg->msg_rtrcredit) {
1010                 msg->msg_rtrcredit = 1;
1011                 rbp->rbp_credits--;
1012                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1013                         rbp->rbp_mincredits = rbp->rbp_credits;
1014
1015                 if (rbp->rbp_credits < 0) {
1016                         /* must have checked eager_recv before here */
1017                         LASSERT(msg->msg_rx_ready_delay);
1018                         msg->msg_rx_delayed = 1;
1019                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1020                         return LNET_CREDIT_WAIT;
1021                 }
1022         }
1023
1024         LASSERT(!list_empty(&rbp->rbp_bufs));
1025         rb = list_first_entry(&rbp->rbp_bufs, struct lnet_rtrbuf, rb_list);
1026         list_del(&rb->rb_list);
1027
1028         msg->msg_niov = rbp->rbp_npages;
1029         msg->msg_kiov = &rb->rb_kiov[0];
1030
1031         /* unset the msg-rx_delayed flag since we're receiving the message */
1032         msg->msg_rx_delayed = 0;
1033
1034         if (do_recv) {
1035                 int cpt = msg->msg_rx_cpt;
1036
1037                 lnet_net_unlock(cpt);
1038                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1039                              0, msg->msg_len, msg->msg_len);
1040                 lnet_net_lock(cpt);
1041         }
1042         return LNET_CREDIT_OK;
1043 }
1044
1045 void
1046 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1047 {
1048         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1049         struct lnet_ni          *txni = msg->msg_txni;
1050         struct lnet_msg         *msg2;
1051
1052         if (msg->msg_txcredit) {
1053                 struct lnet_ni       *ni = msg->msg_txni;
1054                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1055
1056                 /* give back NI txcredits */
1057                 msg->msg_txcredit = 0;
1058
1059                 LASSERT((tq->tq_credits < 0) ==
1060                         !list_empty(&tq->tq_delayed));
1061
1062                 tq->tq_credits++;
1063                 atomic_inc(&ni->ni_tx_credits);
1064                 if (tq->tq_credits <= 0) {
1065                         msg2 = list_first_entry(&tq->tq_delayed,
1066                                                 struct lnet_msg, msg_list);
1067                         list_del(&msg2->msg_list);
1068
1069                         LASSERT(msg2->msg_txni == ni);
1070                         LASSERT(msg2->msg_tx_delayed);
1071                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1072
1073                         (void) lnet_post_send_locked(msg2, 1);
1074                 }
1075         }
1076
1077         if (msg->msg_peertxcredit) {
1078                 /* give back peer txcredits */
1079                 msg->msg_peertxcredit = 0;
1080
1081                 spin_lock(&txpeer->lpni_lock);
1082                 LASSERT((txpeer->lpni_txcredits < 0) ==
1083                         !list_empty(&txpeer->lpni_txq));
1084
1085                 txpeer->lpni_txqnob -=  msg->msg_len +
1086                                         sizeof(struct lnet_hdr_nid4);
1087                 LASSERT(txpeer->lpni_txqnob >= 0);
1088
1089                 txpeer->lpni_txcredits++;
1090                 if (txpeer->lpni_txcredits <= 0) {
1091                         int msg2_cpt;
1092
1093                         msg2 = list_first_entry(&txpeer->lpni_txq,
1094                                                 struct lnet_msg, msg_list);
1095                         list_del(&msg2->msg_list);
1096                         spin_unlock(&txpeer->lpni_lock);
1097
1098                         LASSERT(msg2->msg_txpeer == txpeer);
1099                         LASSERT(msg2->msg_tx_delayed);
1100
1101                         msg2_cpt = msg2->msg_tx_cpt;
1102
1103                         /*
1104                          * The msg_cpt can be different from the msg2_cpt
1105                          * so we need to make sure we lock the correct cpt
1106                          * for msg2.
1107                          * Once we call lnet_post_send_locked() it is no
1108                          * longer safe to access msg2, since it could've
1109                          * been freed by lnet_finalize(), but we still
1110                          * need to relock the correct cpt, so we cache the
1111                          * msg2_cpt for the purpose of the check that
1112                          * follows the call to lnet_pose_send_locked().
1113                          */
1114                         if (msg2_cpt != msg->msg_tx_cpt) {
1115                                 lnet_net_unlock(msg->msg_tx_cpt);
1116                                 lnet_net_lock(msg2_cpt);
1117                         }
1118                         (void) lnet_post_send_locked(msg2, 1);
1119                         if (msg2_cpt != msg->msg_tx_cpt) {
1120                                 lnet_net_unlock(msg2_cpt);
1121                                 lnet_net_lock(msg->msg_tx_cpt);
1122                         }
1123                 } else {
1124                         spin_unlock(&txpeer->lpni_lock);
1125                 }
1126         }
1127
1128         if (txni != NULL) {
1129                 msg->msg_txni = NULL;
1130                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1131         }
1132
1133         if (txpeer != NULL) {
1134                 msg->msg_txpeer = NULL;
1135                 lnet_peer_ni_decref_locked(txpeer);
1136         }
1137 }
1138
1139 void
1140 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1141 {
1142         struct lnet_msg *msg;
1143
1144         if (list_empty(&rbp->rbp_msgs))
1145                 return;
1146         msg = list_first_entry(&rbp->rbp_msgs,
1147                                struct lnet_msg, msg_list);
1148         list_del(&msg->msg_list);
1149
1150         (void)lnet_post_routed_recv_locked(msg, 1);
1151 }
1152
1153 void
1154 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1155 {
1156         struct lnet_msg *msg;
1157         struct lnet_msg *tmp;
1158
1159         lnet_net_unlock(cpt);
1160
1161         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1162                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1163                              0, 0, 0, msg->msg_hdr.payload_length);
1164                 list_del_init(&msg->msg_list);
1165                 msg->msg_no_resend = true;
1166                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1167                 lnet_finalize(msg, -ECANCELED);
1168         }
1169
1170         lnet_net_lock(cpt);
1171 }
1172
1173 void
1174 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1175 {
1176         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1177         struct lnet_peer *lp;
1178         struct lnet_ni *rxni = msg->msg_rxni;
1179         struct lnet_msg *msg2;
1180
1181         if (msg->msg_rtrcredit) {
1182                 /* give back global router credits */
1183                 struct lnet_rtrbuf *rb;
1184                 struct lnet_rtrbufpool *rbp;
1185
1186                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1187                  * there until it gets one allocated, or aborts the wait
1188                  * itself */
1189                 LASSERT(msg->msg_kiov != NULL);
1190
1191                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1192                 rbp = rb->rb_pool;
1193
1194                 msg->msg_kiov = NULL;
1195                 msg->msg_rtrcredit = 0;
1196
1197                 LASSERT(rbp == lnet_msg2bufpool(msg));
1198
1199                 LASSERT((rbp->rbp_credits > 0) ==
1200                         !list_empty(&rbp->rbp_bufs));
1201
1202                 /* If routing is now turned off, we just drop this buffer and
1203                  * don't bother trying to return credits.  */
1204                 if (!the_lnet.ln_routing) {
1205                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1206                         goto routing_off;
1207                 }
1208
1209                 /* It is possible that a user has lowered the desired number of
1210                  * buffers in this pool.  Make sure we never put back
1211                  * more buffers than the stated number. */
1212                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1213                         /* Discard this buffer so we don't have too
1214                          * many. */
1215                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1216                         rbp->rbp_nbuffers--;
1217                 } else {
1218                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1219                         rbp->rbp_credits++;
1220                         if (rbp->rbp_credits <= 0)
1221                                 lnet_schedule_blocked_locked(rbp);
1222                 }
1223         }
1224
1225 routing_off:
1226         if (msg->msg_peerrtrcredit) {
1227                 LASSERT(rxpeerni);
1228                 LASSERT(rxpeerni->lpni_peer_net);
1229                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1230
1231                 spin_lock(&rxpeerni->lpni_lock);
1232                 /* give back peer router credits */
1233                 msg->msg_peerrtrcredit = 0;
1234                 rxpeerni->lpni_rtrcredits++;
1235                 spin_unlock(&rxpeerni->lpni_lock);
1236
1237                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1238                 spin_lock(&lp->lp_lock);
1239
1240                 /* drop all messages which are queued to be routed on that
1241                  * peer. */
1242                 if (!the_lnet.ln_routing) {
1243                         LIST_HEAD(drop);
1244                         list_splice_init(&lp->lp_rtrq, &drop);
1245                         spin_unlock(&lp->lp_lock);
1246                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1247                 } else if (!list_empty(&lp->lp_rtrq)) {
1248                         int msg2_cpt;
1249
1250                         msg2 = list_first_entry(&lp->lp_rtrq,
1251                                                 struct lnet_msg, msg_list);
1252                         list_del(&msg2->msg_list);
1253                         msg2_cpt = msg2->msg_rx_cpt;
1254                         spin_unlock(&lp->lp_lock);
1255                         /*
1256                          * messages on the lp_rtrq can be from any NID in
1257                          * the peer, which means they might have different
1258                          * cpts. We need to make sure we lock the right
1259                          * one.
1260                          */
1261                         if (msg2_cpt != msg->msg_rx_cpt) {
1262                                 lnet_net_unlock(msg->msg_rx_cpt);
1263                                 lnet_net_lock(msg2_cpt);
1264                         }
1265                         (void) lnet_post_routed_recv_locked(msg2, 1);
1266                         if (msg2_cpt != msg->msg_rx_cpt) {
1267                                 lnet_net_unlock(msg2_cpt);
1268                                 lnet_net_lock(msg->msg_rx_cpt);
1269                         }
1270                 } else {
1271                         spin_unlock(&lp->lp_lock);
1272                 }
1273         }
1274         if (rxni != NULL) {
1275                 msg->msg_rxni = NULL;
1276                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1277         }
1278         if (rxpeerni != NULL) {
1279                 msg->msg_rxpeer = NULL;
1280                 lnet_peer_ni_decref_locked(rxpeerni);
1281         }
1282 }
1283
1284 static struct lnet_peer_ni *
1285 lnet_select_peer_ni(struct lnet_ni *best_ni, struct lnet_nid *dst_nid,
1286                     struct lnet_peer *peer,
1287                     struct lnet_peer_ni *best_lpni,
1288                     struct lnet_peer_net *peer_net)
1289 {
1290         /*
1291          * Look at the peer NIs for the destination peer that connect
1292          * to the chosen net. If a peer_ni is preferred when using the
1293          * best_ni to communicate, we use that one. If there is no
1294          * preferred peer_ni, or there are multiple preferred peer_ni,
1295          * the available transmit credits are used. If the transmit
1296          * credits are equal, we round-robin over the peer_ni.
1297          */
1298         struct lnet_peer_ni *lpni = NULL;
1299         int best_lpni_credits = (best_lpni) ? best_lpni->lpni_txcredits :
1300                 INT_MIN;
1301         int best_lpni_healthv = (best_lpni) ?
1302                 atomic_read(&best_lpni->lpni_healthv) : 0;
1303         bool best_lpni_is_preferred = false;
1304         bool lpni_is_preferred;
1305         int lpni_healthv;
1306         __u32 lpni_sel_prio;
1307         __u32 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1308
1309         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1310                 /*
1311                  * if the best_ni we've chosen aleady has this lpni
1312                  * preferred, then let's use it
1313                  */
1314                 if (best_ni) {
1315                         lpni_is_preferred = lnet_peer_is_pref_nid_locked(
1316                                 lpni, &best_ni->ni_nid);
1317                         CDEBUG(D_NET, "%s lpni_is_preferred = %d\n",
1318                                libcfs_nidstr(&best_ni->ni_nid),
1319                                lpni_is_preferred);
1320                 } else {
1321                         lpni_is_preferred = false;
1322                 }
1323
1324                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1325                 lpni_sel_prio = lpni->lpni_sel_priority;
1326
1327                 if (best_lpni)
1328                         CDEBUG(D_NET, "n:[%s, %s] h:[%d, %d] p:[%d, %d] c:[%d, %d] s:[%d, %d]\n",
1329                                 libcfs_nidstr(&lpni->lpni_nid),
1330                                 libcfs_nidstr(&best_lpni->lpni_nid),
1331                                 lpni_healthv, best_lpni_healthv,
1332                                 lpni_sel_prio, best_sel_prio,
1333                                 lpni->lpni_txcredits, best_lpni_credits,
1334                                 lpni->lpni_seq, best_lpni->lpni_seq);
1335                 else
1336                         goto select_lpni;
1337
1338                 /* pick the healthiest peer ni */
1339                 if (lpni_healthv < best_lpni_healthv)
1340                         continue;
1341                 else if (lpni_healthv > best_lpni_healthv)
1342                         goto select_lpni;
1343
1344                 if (lpni_sel_prio > best_sel_prio)
1345                         continue;
1346                 else if (lpni_sel_prio < best_sel_prio)
1347                         goto select_lpni;
1348
1349                 /* If this is a preferred peer - use it. Otherwise, ignore it */
1350                 if (!best_lpni_is_preferred && lpni_is_preferred)
1351                         goto select_lpni;
1352                 else if (best_lpni_is_preferred && !lpni_is_preferred)
1353                         continue;
1354
1355                 if (lpni->lpni_txcredits < best_lpni_credits)
1356                         /* We already have a peer that has more credits
1357                          * available than this one. No need to consider
1358                          * this peer further.
1359                          */
1360                         continue;
1361                 else if (lpni->lpni_txcredits > best_lpni_credits)
1362                         goto select_lpni;
1363
1364                 /* The best peer found so far and the current peer
1365                  * have the same number of available credits let's
1366                  * make sure to select between them using Round Robin
1367                  */
1368                 if (best_lpni && (best_lpni->lpni_seq <= lpni->lpni_seq))
1369                         continue;
1370 select_lpni:
1371                 best_lpni_is_preferred = lpni_is_preferred;
1372                 best_lpni_healthv = lpni_healthv;
1373                 best_sel_prio = lpni_sel_prio;
1374                 best_lpni = lpni;
1375                 best_lpni_credits = lpni->lpni_txcredits;
1376         }
1377
1378         /* if we still can't find a peer ni then we can't reach it */
1379         if (!best_lpni) {
1380                 u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1381                              LNET_NID_NET(dst_nid);
1382                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1383                                 libcfs_net2str(net_id));
1384                 return NULL;
1385         }
1386
1387         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1388                libcfs_nidstr(&best_lpni->lpni_nid));
1389
1390         return best_lpni;
1391 }
1392
1393 /*
1394  * Prerequisite: the best_ni should already be set in the sd
1395  * Find the best lpni.
1396  * If the net id is provided then restrict lpni selection on
1397  * that particular net.
1398  * Otherwise find any reachable lpni. When dealing with an MR
1399  * gateway and it has multiple lpnis which we can use
1400  * we want to select the best one from the list of reachable
1401  * ones.
1402  */
1403 static inline struct lnet_peer_ni *
1404 lnet_find_best_lpni(struct lnet_ni *lni, struct lnet_nid *dst_nid,
1405                     struct lnet_peer *peer, u32 net_id)
1406 {
1407         struct lnet_peer_net *peer_net;
1408
1409         /* find the best_lpni on any local network */
1410         if (net_id == LNET_NET_ANY) {
1411                 struct lnet_peer_ni *best_lpni = NULL;
1412                 struct lnet_peer_net *lpn;
1413                 list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
1414                         /* no net specified find any reachable peer ni */
1415                         if (!lnet_islocalnet_locked(lpn->lpn_net_id))
1416                                 continue;
1417                         best_lpni = lnet_select_peer_ni(lni, dst_nid, peer,
1418                                                         best_lpni, lpn);
1419                 }
1420
1421                 return best_lpni;
1422         }
1423         /* restrict on the specified net */
1424         peer_net = lnet_peer_get_net_locked(peer, net_id);
1425         if (peer_net)
1426                 return lnet_select_peer_ni(lni, dst_nid, peer, NULL, peer_net);
1427
1428         return NULL;
1429 }
1430
1431 static int
1432 lnet_compare_gw_lpnis(struct lnet_peer_ni *lpni1, struct lnet_peer_ni *lpni2)
1433 {
1434         if (lpni1->lpni_txqnob < lpni2->lpni_txqnob)
1435                 return 1;
1436
1437         if (lpni1->lpni_txqnob > lpni2->lpni_txqnob)
1438                 return -1;
1439
1440         if (lpni1->lpni_txcredits > lpni2->lpni_txcredits)
1441                 return 1;
1442
1443         if (lpni1->lpni_txcredits < lpni2->lpni_txcredits)
1444                 return -1;
1445
1446         return 0;
1447 }
1448
1449 /* Compare route priorities and hop counts */
1450 static int
1451 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
1452 {
1453         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1454         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1455
1456         if (r1->lr_priority < r2->lr_priority)
1457                 return 1;
1458
1459         if (r1->lr_priority > r2->lr_priority)
1460                 return -1;
1461
1462         if (r1_hops < r2_hops)
1463                 return 1;
1464
1465         if (r1_hops > r2_hops)
1466                 return -1;
1467
1468         return 0;
1469 }
1470
1471 static struct lnet_route *
1472 lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net,
1473                        struct lnet_peer_ni *remote_lpni,
1474                        struct lnet_route **prev_route,
1475                        struct lnet_peer_ni **gwni)
1476 {
1477         struct lnet_peer_ni *lpni, *best_gw_ni = NULL;
1478         struct lnet_route *best_route;
1479         struct lnet_route *last_route;
1480         struct lnet_route *route;
1481         int rc;
1482         bool best_rte_is_preferred = false;
1483         struct lnet_nid *gw_pnid;
1484
1485         CDEBUG(D_NET, "Looking up a route to %s, from %s\n",
1486                libcfs_net2str(rnet->lrn_net), libcfs_net2str(src_net));
1487
1488         best_route = last_route = NULL;
1489         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1490                 if (!lnet_is_route_alive(route))
1491                         continue;
1492                 gw_pnid = &route->lr_gateway->lp_primary_nid;
1493
1494                 /* no protection on below fields, but it's harmless */
1495                 if (last_route && (last_route->lr_seq - route->lr_seq < 0))
1496                         last_route = route;
1497
1498                 /* if the best route found is in the preferred list then
1499                  * tag it as preferred and use it later on. But if we
1500                  * didn't find any routes which are on the preferred list
1501                  * then just use the best route possible.
1502                  */
1503                 rc = lnet_peer_is_pref_rtr_locked(remote_lpni, gw_pnid);
1504
1505                 if (!best_route || (rc && !best_rte_is_preferred)) {
1506                         /* Restrict the selection of the router NI on the
1507                          * src_net provided. If the src_net is LNET_NID_ANY,
1508                          * then select the best interface available.
1509                          */
1510                         lpni = lnet_find_best_lpni(NULL, NULL,
1511                                                    route->lr_gateway,
1512                                                    src_net);
1513                         if (!lpni) {
1514                                 CDEBUG(D_NET,
1515                                        "Gateway %s does not have a peer NI on net %s\n",
1516                                        libcfs_nidstr(gw_pnid),
1517                                        libcfs_net2str(src_net));
1518                                 continue;
1519                         }
1520                 }
1521
1522                 if (rc && !best_rte_is_preferred) {
1523                         /* This is the first preferred route we found,
1524                          * so it beats any route found previously
1525                          */
1526                         best_route = route;
1527                         if (!last_route)
1528                                 last_route = route;
1529                         best_gw_ni = lpni;
1530                         best_rte_is_preferred = true;
1531                         CDEBUG(D_NET, "preferred gw = %s\n",
1532                                libcfs_nidstr(gw_pnid));
1533                         continue;
1534                 } else if ((!rc) && best_rte_is_preferred)
1535                         /* The best route we found so far is in the preferred
1536                          * list, so it beats any non-preferred route
1537                          */
1538                         continue;
1539
1540                 if (!best_route) {
1541                         best_route = last_route = route;
1542                         best_gw_ni = lpni;
1543                         continue;
1544                 }
1545
1546                 rc = lnet_compare_routes(route, best_route);
1547                 if (rc == -1)
1548                         continue;
1549
1550                 /* Restrict the selection of the router NI on the
1551                  * src_net provided. If the src_net is LNET_NID_ANY,
1552                  * then select the best interface available.
1553                  */
1554                 lpni = lnet_find_best_lpni(NULL, NULL, route->lr_gateway,
1555                                            src_net);
1556                 if (!lpni) {
1557                         CDEBUG(D_NET,
1558                                "Gateway %s does not have a peer NI on net %s\n",
1559                                libcfs_nidstr(gw_pnid),
1560                                libcfs_net2str(src_net));
1561                         continue;
1562                 }
1563
1564                 if (rc == 1) {
1565                         best_route = route;
1566                         best_gw_ni = lpni;
1567                         continue;
1568                 }
1569
1570                 rc = lnet_compare_gw_lpnis(lpni, best_gw_ni);
1571                 if (rc == -1)
1572                         continue;
1573
1574                 if (rc == 1 || route->lr_seq <= best_route->lr_seq) {
1575                         best_route = route;
1576                         best_gw_ni = lpni;
1577                         continue;
1578                 }
1579         }
1580
1581         *prev_route = last_route;
1582         *gwni = best_gw_ni;
1583
1584         return best_route;
1585 }
1586
1587 static inline unsigned int
1588 lnet_dev_prio_of_md(struct lnet_ni *ni, unsigned int dev_idx)
1589 {
1590         if (dev_idx == UINT_MAX)
1591                 return UINT_MAX;
1592
1593         if (!ni || !ni->ni_net || !ni->ni_net->net_lnd ||
1594             !ni->ni_net->net_lnd->lnd_get_dev_prio)
1595                 return UINT_MAX;
1596
1597         return ni->ni_net->net_lnd->lnd_get_dev_prio(ni, dev_idx);
1598 }
1599
1600 static struct lnet_ni *
1601 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1602                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1603                  struct lnet_msg *msg, int md_cpt)
1604 {
1605         struct lnet_libmd *md = msg->msg_md;
1606         unsigned int offset = msg->msg_offset;
1607         unsigned int shortest_distance;
1608         struct lnet_ni *ni = NULL;
1609         int best_credits;
1610         int best_healthv;
1611         __u32 best_sel_prio;
1612         unsigned int best_dev_prio;
1613         int best_ni_fatal;
1614         unsigned int dev_idx = UINT_MAX;
1615         bool gpu = lnet_md_is_gpu(md);
1616
1617         if (gpu) {
1618                 struct page *page = lnet_get_first_page(md, offset);
1619
1620                 dev_idx = lnet_get_dev_idx(page);
1621         }
1622
1623         /*
1624          * If there is no peer_ni that we can send to on this network,
1625          * then there is no point in looking for a new best_ni here.
1626         */
1627         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1628                 return best_ni;
1629
1630         if (best_ni == NULL) {
1631                 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1632                 shortest_distance = UINT_MAX;
1633                 best_dev_prio = UINT_MAX;
1634                 best_credits = INT_MIN;
1635                 best_healthv = 0;
1636                 best_ni_fatal = true;
1637         } else {
1638                 best_dev_prio = lnet_dev_prio_of_md(best_ni, dev_idx);
1639                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1640                                                      best_ni->ni_dev_cpt);
1641                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1642                 best_healthv = atomic_read(&best_ni->ni_healthv);
1643                 best_sel_prio = best_ni->ni_sel_priority;
1644                 best_ni_fatal = atomic_read(&best_ni->ni_fatal_error_on);
1645         }
1646
1647         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1648                 unsigned int distance;
1649                 int ni_credits;
1650                 int ni_healthv;
1651                 int ni_fatal;
1652                 __u32 ni_sel_prio;
1653                 unsigned int ni_dev_prio;
1654
1655                 ni_credits = atomic_read(&ni->ni_tx_credits);
1656                 ni_healthv = atomic_read(&ni->ni_healthv);
1657                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1658                 ni_sel_prio = ni->ni_sel_priority;
1659
1660                 /*
1661                  * calculate the distance from the CPT on which
1662                  * the message memory is allocated to the CPT of
1663                  * the NI's physical device
1664                  */
1665                 distance = cfs_cpt_distance(lnet_cpt_table(),
1666                                             md_cpt,
1667                                             ni->ni_dev_cpt);
1668
1669                 ni_dev_prio = lnet_dev_prio_of_md(ni, dev_idx);
1670
1671                 /*
1672                  * All distances smaller than the NUMA range
1673                  * are treated equally.
1674                  */
1675                 if (!gpu && distance < lnet_numa_range)
1676                         distance = lnet_numa_range;
1677
1678                 /*
1679                  * Select on health, selection policy, direct dma prio,
1680                  * shorter distance, available credits, then round-robin.
1681                  */
1682                 if (best_ni)
1683                         CDEBUG(D_NET, "compare ni %s [f:%s, c:%d, d:%d, s:%d, p:%u, g:%u, h:%d] with best_ni %s [f:%s, c:%d, d:%d, s:%d, p:%u, g:%u, h:%d]\n",
1684                                libcfs_nidstr(&ni->ni_nid),
1685                                ni_fatal ? "y" : "n", ni_credits, distance,
1686                                ni->ni_seq, ni_sel_prio, ni_dev_prio, ni_healthv,
1687                                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid)
1688                                : "not selected",
1689                                best_ni_fatal ? "y" : "n", best_credits,
1690                                shortest_distance,
1691                                (best_ni) ? best_ni->ni_seq : 0,
1692                                best_sel_prio, best_dev_prio, best_healthv);
1693                 else
1694                         goto select_ni;
1695
1696                 if (ni_fatal && !best_ni_fatal)
1697                         continue;
1698                 else if (!ni_fatal && best_ni_fatal)
1699                         goto select_ni;
1700
1701                 if (ni_healthv < best_healthv)
1702                         continue;
1703                 else if (ni_healthv > best_healthv)
1704                         goto select_ni;
1705
1706                 if (ni_sel_prio > best_sel_prio)
1707                         continue;
1708                 else if (ni_sel_prio < best_sel_prio)
1709                         goto select_ni;
1710
1711                 if (ni_dev_prio > best_dev_prio)
1712                         continue;
1713                 else if (ni_dev_prio < best_dev_prio)
1714                         goto select_ni;
1715
1716                 if (distance > shortest_distance)
1717                         continue;
1718                 else if (distance < shortest_distance)
1719                         goto select_ni;
1720
1721                 if (ni_credits < best_credits)
1722                         continue;
1723                 else if (ni_credits > best_credits)
1724                         goto select_ni;
1725
1726                 if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1727                         continue;
1728
1729 select_ni:
1730                 best_sel_prio = ni_sel_prio;
1731                 best_dev_prio = ni_dev_prio;
1732                 shortest_distance = distance;
1733                 best_healthv = ni_healthv;
1734                 best_ni = ni;
1735                 best_credits = ni_credits;
1736                 best_ni_fatal = ni_fatal;
1737         }
1738
1739         CDEBUG(D_NET, "selected best_ni %s\n",
1740                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid) : "no selection");
1741
1742         return best_ni;
1743 }
1744
1745 static bool
1746 lnet_reserved_msg(struct lnet_msg *msg)
1747 {
1748         if (msg->msg_type == LNET_MSG_PUT) {
1749                 if (msg->msg_hdr.msg.put.ptl_index == LNET_RESERVED_PORTAL)
1750                         return true;
1751         } else if (msg->msg_type == LNET_MSG_GET) {
1752                 if (msg->msg_hdr.msg.get.ptl_index == LNET_RESERVED_PORTAL)
1753                         return true;
1754         }
1755         return false;
1756 }
1757
1758 /* Can the specified message trigger peer discovery?
1759  *
1760  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1761  * because such traffic is required to perform discovery. We therefore
1762  * exclude all GET and PUT on that portal. We also exclude all ACK and
1763  * REPLY traffic, but that is because the portal is not tracked in the
1764  * message structure for these message types. We could restrict this
1765  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1766  */
1767 static bool
1768 lnet_msg_discovery(struct lnet_msg *msg)
1769 {
1770         return !(lnet_reserved_msg(msg) || lnet_msg_is_response(msg));
1771 }
1772
1773 /* Is the specified message an LNet ping?
1774  */
1775 static bool
1776 lnet_msg_is_ping(struct lnet_msg *msg)
1777 {
1778         if (msg->msg_type == LNET_MSG_GET &&
1779             msg->msg_hdr.msg.get.ptl_index == LNET_RESERVED_PORTAL)
1780                 return true;
1781
1782         return false;
1783 }
1784
1785 #define SRC_SPEC        0x0001
1786 #define SRC_ANY         0x0002
1787 #define LOCAL_DST       0x0004
1788 #define REMOTE_DST      0x0008
1789 #define MR_DST          0x0010
1790 #define NMR_DST         0x0020
1791 #define SND_RESP        0x0040
1792
1793 /* The following to defines are used for return codes */
1794 #define REPEAT_SEND     0x1000
1795 #define PASS_THROUGH    0x2000
1796
1797 /* The different cases lnet_select pathway needs to handle */
1798 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1799 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1800 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1801 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1802 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1803 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1804 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1805 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1806
1807 static int
1808 lnet_handle_lo_send(struct lnet_send_data *sd)
1809 {
1810         struct lnet_msg *msg = sd->sd_msg;
1811         int cpt = sd->sd_cpt;
1812
1813         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1814                 return -ESHUTDOWN;
1815
1816         /* No send credit hassles with LOLND */
1817         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1818         msg->msg_hdr.dest_nid = the_lnet.ln_loni->ni_nid;
1819         if (!msg->msg_routing)
1820                 msg->msg_hdr.src_nid = the_lnet.ln_loni->ni_nid;
1821         msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
1822         lnet_msg_commit(msg, cpt);
1823         msg->msg_txni = the_lnet.ln_loni;
1824
1825         return LNET_CREDIT_OK;
1826 }
1827
1828 static int
1829 lnet_handle_send(struct lnet_send_data *sd)
1830 {
1831         struct lnet_ni *best_ni = sd->sd_best_ni;
1832         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1833         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1834         struct lnet_msg *msg = sd->sd_msg;
1835         int cpt2;
1836         __u32 send_case = sd->sd_send_case;
1837         int rc;
1838         __u32 routing = send_case & REMOTE_DST;
1839         struct lnet_rsp_tracker *rspt;
1840
1841         /* Increment sequence number of the selected peer, peer net,
1842          * local ni and local net so that we pick the next ones
1843          * in Round Robin.
1844          */
1845         best_lpni->lpni_peer_net->lpn_peer->lp_send_seq++;
1846         best_lpni->lpni_peer_net->lpn_seq =
1847                 best_lpni->lpni_peer_net->lpn_peer->lp_send_seq;
1848         best_lpni->lpni_seq = best_lpni->lpni_peer_net->lpn_seq;
1849         the_lnet.ln_net_seq++;
1850         best_ni->ni_net->net_seq = the_lnet.ln_net_seq;
1851         best_ni->ni_seq = best_ni->ni_net->net_seq;
1852
1853         CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n",
1854                libcfs_nidstr(&best_ni->ni_nid),
1855                best_ni->ni_seq, best_ni->ni_net->net_seq,
1856                atomic_read(&best_ni->ni_tx_credits),
1857                best_ni->ni_sel_priority,
1858                libcfs_nidstr(&best_lpni->lpni_nid),
1859                best_lpni->lpni_seq, best_lpni->lpni_peer_net->lpn_seq,
1860                best_lpni->lpni_txcredits,
1861                best_lpni->lpni_sel_priority);
1862
1863         /*
1864          * grab a reference on the peer_ni so it sticks around even if
1865          * we need to drop and relock the lnet_net_lock below.
1866          */
1867         lnet_peer_ni_addref_locked(best_lpni);
1868
1869         /*
1870          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1871          * message. This ensures that we get a CPT that is correct for
1872          * the NI when the NI has been restricted to a subset of all CPTs.
1873          * If the selected CPT differs from the one currently locked, we
1874          * must unlock and relock the lnet_net_lock(), and then check whether
1875          * the configuration has changed. We don't have a hold on the best_ni
1876          * yet, and it may have vanished.
1877          */
1878         cpt2 = lnet_cpt_of_nid_locked(&best_lpni->lpni_nid, best_ni);
1879         if (sd->sd_cpt != cpt2) {
1880                 __u32 seq = lnet_get_dlc_seq_locked();
1881                 lnet_net_unlock(sd->sd_cpt);
1882                 sd->sd_cpt = cpt2;
1883                 lnet_net_lock(sd->sd_cpt);
1884                 if (seq != lnet_get_dlc_seq_locked()) {
1885                         lnet_peer_ni_decref_locked(best_lpni);
1886                         return REPEAT_SEND;
1887                 }
1888         }
1889
1890         /*
1891          * store the best_lpni in the message right away to avoid having
1892          * to do the same operation under different conditions
1893          */
1894         msg->msg_txpeer = best_lpni;
1895         msg->msg_txni = best_ni;
1896
1897         /*
1898          * grab a reference for the best_ni since now it's in use in this
1899          * send. The reference will be dropped in lnet_finalize()
1900          */
1901         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1902
1903         /*
1904          * Always set the target.nid to the best peer picked. Either the
1905          * NID will be one of the peer NIDs selected, or the same NID as
1906          * what was originally set in the target or it will be the NID of
1907          * a router if this message should be routed
1908          */
1909         msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1910
1911         /*
1912          * lnet_msg_commit assigns the correct cpt to the message, which
1913          * is used to decrement the correct refcount on the ni when it's
1914          * time to return the credits
1915          */
1916         lnet_msg_commit(msg, sd->sd_cpt);
1917
1918         /*
1919          * If we are routing the message then we keep the src_nid that was
1920          * set by the originator. If we are not routing then we are the
1921          * originator and set it here.
1922          */
1923         if (!msg->msg_routing)
1924                 msg->msg_hdr.src_nid = msg->msg_txni->ni_nid;
1925
1926         if (routing) {
1927                 msg->msg_target_is_router = 1;
1928                 msg->msg_target.pid = LNET_PID_LUSTRE;
1929                 /*
1930                  * since we're routing we want to ensure that the
1931                  * msg_hdr.dest_nid is set to the final destination. When
1932                  * the router receives this message it knows how to route
1933                  * it.
1934                  *
1935                  * final_dst_lpni is set at the beginning of the
1936                  * lnet_select_pathway() function and is never changed.
1937                  * It's safe to use it here.
1938                  */
1939                 final_dst_lpni->lpni_peer_net->lpn_peer->lp_send_seq++;
1940                 final_dst_lpni->lpni_peer_net->lpn_seq =
1941                         final_dst_lpni->lpni_peer_net->lpn_peer->lp_send_seq;
1942                 final_dst_lpni->lpni_seq =
1943                         final_dst_lpni->lpni_peer_net->lpn_seq;
1944                 msg->msg_hdr.dest_nid = final_dst_lpni->lpni_nid;
1945         } else {
1946                 /*
1947                  * if we're not routing set the dest_nid to the best peer
1948                  * ni NID that we picked earlier in the algorithm.
1949                  */
1950                 msg->msg_hdr.dest_nid = msg->msg_txpeer->lpni_nid;
1951         }
1952
1953         /*
1954          * if we have response tracker block update it with the next hop
1955          * nid
1956          */
1957         if (msg->msg_md) {
1958                 rspt = msg->msg_md->md_rspt_ptr;
1959                 if (rspt) {
1960                         rspt->rspt_next_hop_nid =
1961                                 msg->msg_txpeer->lpni_nid;
1962                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1963                                libcfs_nidstr(&rspt->rspt_next_hop_nid));
1964                 }
1965         }
1966
1967         rc = lnet_post_send_locked(msg, 0);
1968
1969         if (!rc)
1970                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
1971                        libcfs_nidstr(&msg->msg_hdr.src_nid),
1972                        libcfs_nidstr(&msg->msg_txni->ni_nid),
1973                        libcfs_nidstr(&sd->sd_src_nid),
1974                        libcfs_nidstr(&msg->msg_hdr.dest_nid),
1975                        libcfs_nidstr(&sd->sd_dst_nid),
1976                        libcfs_nidstr(&msg->msg_txpeer->lpni_nid),
1977                        libcfs_nidstr(&sd->sd_rtr_nid),
1978                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
1979
1980         return rc;
1981 }
1982
1983 static inline void
1984 lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni,
1985                          struct lnet_msg *msg)
1986 {
1987         if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) &&
1988             !lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) {
1989                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
1990                        libcfs_nidstr(&lni->ni_nid),
1991                        libcfs_nidstr(&lpni->lpni_nid));
1992                 lnet_peer_ni_set_non_mr_pref_nid(lpni, &lni->ni_nid);
1993         }
1994 }
1995
1996 /*
1997  * Source Specified
1998  * Local Destination
1999  * non-mr peer
2000  *
2001  * use the source and destination NIDs as the pathway
2002  */
2003 static int
2004 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
2005 {
2006         /* the destination lpni is set before we get here. */
2007
2008         /* find local NI */
2009         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2010         if (!sd->sd_best_ni) {
2011                 CERROR("Can't send to %s: src %s is not a local nid\n",
2012                        libcfs_nidstr(&sd->sd_dst_nid),
2013                        libcfs_nidstr(&sd->sd_src_nid));
2014                 return -EINVAL;
2015         }
2016
2017         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2018
2019         return lnet_handle_send(sd);
2020 }
2021
2022 /*
2023  * Source Specified
2024  * Local Destination
2025  * MR Peer
2026  *
2027  * Don't run the selection algorithm on the peer NIs. By specifying the
2028  * local NID, we're also saying that we should always use the destination NID
2029  * provided. This handles the case where we should be using the same
2030  * destination NID for the all the messages which belong to the same RPC
2031  * request.
2032  */
2033 static int
2034 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
2035 {
2036         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2037         if (!sd->sd_best_ni) {
2038                 CERROR("Can't send to %s: src %s is not a local nid\n",
2039                        libcfs_nidstr(&sd->sd_dst_nid),
2040                        libcfs_nidstr(&sd->sd_src_nid));
2041                 return -EINVAL;
2042         }
2043
2044         if (sd->sd_best_lpni &&
2045             nid_same(&sd->sd_best_lpni->lpni_nid,
2046                       &the_lnet.ln_loni->ni_nid))
2047                 return lnet_handle_lo_send(sd);
2048         else if (sd->sd_best_lpni)
2049                 return lnet_handle_send(sd);
2050
2051         CERROR("can't send to %s. no NI on %s\n",
2052                libcfs_nidstr(&sd->sd_dst_nid),
2053                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
2054
2055         return -EHOSTUNREACH;
2056 }
2057
2058 static struct lnet_ni *
2059 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
2060                               struct lnet_peer *peer,
2061                               struct lnet_peer_net *peer_net,
2062                               struct lnet_msg *msg,
2063                               int cpt)
2064 {
2065         struct lnet_net *local_net;
2066         struct lnet_ni *best_ni;
2067
2068         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
2069         if (!local_net)
2070                 return NULL;
2071
2072         /*
2073          * Iterate through the NIs in this local Net and select
2074          * the NI to send from. The selection is determined by
2075          * these 3 criterion in the following priority:
2076          *      1. NUMA
2077          *      2. NI available credits
2078          *      3. Round Robin
2079          */
2080         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
2081                                    peer, peer_net, msg, cpt);
2082
2083         return best_ni;
2084 }
2085
2086 static int
2087 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg,
2088                              int cpt)
2089 {
2090         struct lnet_peer *peer;
2091         struct lnet_peer_ni *new_lpni;
2092         int rc;
2093
2094         lnet_peer_ni_addref_locked(lpni);
2095
2096         peer = lpni->lpni_peer_net->lpn_peer;
2097
2098         if (lnet_peer_gw_discovery(peer)) {
2099                 lnet_peer_ni_decref_locked(lpni);
2100                 return 0;
2101         }
2102
2103         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
2104                 lnet_peer_ni_decref_locked(lpni);
2105                 return 0;
2106         }
2107
2108         rc = lnet_discover_peer_locked(lpni, cpt, false);
2109         if (rc) {
2110                 lnet_peer_ni_decref_locked(lpni);
2111                 return rc;
2112         }
2113
2114         new_lpni = lnet_peer_ni_find_locked(&lpni->lpni_nid);
2115         if (!new_lpni) {
2116                 lnet_peer_ni_decref_locked(lpni);
2117                 return -ENOENT;
2118         }
2119
2120         peer = new_lpni->lpni_peer_net->lpn_peer;
2121         spin_lock(&peer->lp_lock);
2122         if (lpni == new_lpni && lnet_peer_is_uptodate_locked(peer)) {
2123                 /* The peer NI did not change and the peer is up to date.
2124                  * Nothing more to do.
2125                  */
2126                 spin_unlock(&peer->lp_lock);
2127                 lnet_peer_ni_decref_locked(lpni);
2128                 lnet_peer_ni_decref_locked(new_lpni);
2129                 return 0;
2130         }
2131         spin_unlock(&peer->lp_lock);
2132
2133         /* Either the peer NI changed during discovery, or the peer isn't up
2134          * to date. In both cases we want to queue the message on the
2135          * (possibly new) peer's pending queue and queue the peer for discovery
2136          */
2137         msg->msg_sending = 0;
2138         msg->msg_txpeer = NULL;
2139         lnet_net_unlock(cpt);
2140         lnet_peer_queue_message(peer, msg);
2141         lnet_net_lock(cpt);
2142
2143         lnet_peer_ni_decref_locked(lpni);
2144         lnet_peer_ni_decref_locked(new_lpni);
2145
2146         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2147                msg, libcfs_nidstr(&peer->lp_primary_nid));
2148
2149         return LNET_DC_WAIT;
2150 }
2151
2152 static int
2153 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2154                              struct lnet_nid *dst_nid,
2155                              struct lnet_peer_ni **gw_lpni,
2156                              struct lnet_peer **gw_peer)
2157 {
2158         int rc = 0;
2159         struct lnet_peer *gw;
2160         struct lnet_peer *lp;
2161         struct lnet_peer_net *lpn;
2162         struct lnet_peer_net *best_lpn = NULL;
2163         struct lnet_remotenet *rnet, *best_rnet = NULL;
2164         struct lnet_route *best_route = NULL;
2165         struct lnet_route *last_route = NULL;
2166         struct lnet_peer_ni *lpni = NULL;
2167         struct lnet_peer_ni *gwni = NULL;
2168         bool route_found = false;
2169         bool gwni_decref = false;
2170         struct lnet_nid *src_nid =
2171                 !LNET_NID_IS_ANY(&sd->sd_src_nid) || !sd->sd_best_ni
2172                 ? &sd->sd_src_nid
2173                 : &sd->sd_best_ni->ni_nid;
2174         int best_lpn_healthv = 0;
2175         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2176
2177         CDEBUG(D_NET, "%s route (%s) from local NI %s to destination %s\n",
2178                LNET_NID_IS_ANY(&sd->sd_rtr_nid) ? "Lookup" : "Specified",
2179                libcfs_nidstr(&sd->sd_rtr_nid), libcfs_nidstr(src_nid),
2180                libcfs_nidstr(&sd->sd_dst_nid));
2181
2182         /* If a router nid was specified then we are replying to a GET or
2183          * sending an ACK. In this case we use the gateway associated with the
2184          * specified router nid.
2185          */
2186         if (!LNET_NID_IS_ANY(&sd->sd_rtr_nid)) {
2187                 gwni = lnet_peer_ni_find_locked(&sd->sd_rtr_nid);
2188                 if (gwni) {
2189                         gw = gwni->lpni_peer_net->lpn_peer;
2190                         if (gw->lp_rtr_refcount) {
2191                                 gwni_decref = true;
2192                                 route_found = true;
2193                         } else {
2194                                 lnet_peer_ni_decref_locked(gwni);
2195                                 gwni = NULL;
2196                                 gw = NULL;
2197                         }
2198                 }
2199                 if (!gwni)
2200                         CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n",
2201                               libcfs_nidstr(&sd->sd_rtr_nid));
2202         }
2203
2204         if (!route_found) {
2205                 if (sd->sd_msg->msg_routing || !LNET_NID_IS_ANY(src_nid)) {
2206                         /* If I'm routing this message then I need to find the
2207                          * next hop based on the destination NID
2208                          *
2209                          * We also find next hop based on the destination NID
2210                          * if the source NI was specified
2211                          */
2212                         best_rnet = lnet_find_rnet_locked(LNET_NID_NET(&sd->sd_dst_nid));
2213                         if (!best_rnet) {
2214                                 CERROR("Unable to send message from %s to %s - Route table may be misconfigured\n",
2215                                        (src_nid && LNET_NID_IS_ANY(src_nid)) ?
2216                                                 "any local NI" :
2217                                                 libcfs_nidstr(src_nid),
2218                                        libcfs_nidstr(&sd->sd_dst_nid));
2219                                 rc = -EHOSTUNREACH;
2220                                 goto out;
2221                         }
2222                         CDEBUG(D_NET, "best_rnet %s\n",
2223                                libcfs_net2str(best_rnet->lrn_net));
2224                 } else {
2225                         /* we've already looked up the initial lpni using
2226                          * dst_nid
2227                          */
2228                         lpni = sd->sd_best_lpni;
2229                         /* the peer tree must be in existence */
2230                         LASSERT(lpni && lpni->lpni_peer_net &&
2231                                 lpni->lpni_peer_net->lpn_peer);
2232                         lp = lpni->lpni_peer_net->lpn_peer;
2233
2234                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2235                                 /* is this remote network reachable?  */
2236                                 rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2237                                 if (!rnet)
2238                                         continue;
2239
2240                                 if (!best_lpn)
2241                                         goto use_lpn;
2242                                 else
2243                                         CDEBUG(D_NET, "n[%s, %s] h[%d, %d], p[%u, %u], s[%d, %d]\n",
2244                                                libcfs_net2str(lpn->lpn_net_id),
2245                                                libcfs_net2str(best_lpn->lpn_net_id),
2246                                                lpn->lpn_healthv,
2247                                                best_lpn->lpn_healthv,
2248                                                lpn->lpn_sel_priority,
2249                                                best_lpn->lpn_sel_priority,
2250                                                lpn->lpn_seq,
2251                                                best_lpn->lpn_seq);
2252
2253                                 /* select the preferred peer net */
2254                                 if (best_lpn_healthv > lpn->lpn_healthv)
2255                                         continue;
2256                                 else if (best_lpn_healthv < lpn->lpn_healthv)
2257                                         goto use_lpn;
2258
2259                                 if (best_lpn_sel_prio < lpn->lpn_sel_priority)
2260                                         continue;
2261                                 else if (best_lpn_sel_prio > lpn->lpn_sel_priority)
2262                                         goto use_lpn;
2263
2264                                 if (best_lpn->lpn_seq <= lpn->lpn_seq)
2265                                         continue;
2266 use_lpn:
2267                                 best_lpn_healthv = lpn->lpn_healthv;
2268                                 best_lpn_sel_prio = lpn->lpn_sel_priority;
2269                                 best_lpn = lpn;
2270                                 best_rnet = rnet;
2271                         }
2272
2273                         if (!best_lpn) {
2274                                 CERROR("peer %s has no available nets\n",
2275                                        libcfs_nidstr(&sd->sd_dst_nid));
2276                                 rc =  -EHOSTUNREACH;
2277                                 goto out;
2278                         }
2279
2280                         CDEBUG(D_NET, "selected best_lpn %s\n",
2281                                libcfs_net2str(best_lpn->lpn_net_id));
2282
2283                         sd->sd_best_lpni = lnet_find_best_lpni(sd->sd_best_ni,
2284                                                                &sd->sd_dst_nid,
2285                                                                lp,
2286                                                                best_lpn->lpn_net_id);
2287                         if (!sd->sd_best_lpni) {
2288                                 CERROR("peer %s is unreachable\n",
2289                                        libcfs_nidstr(&sd->sd_dst_nid));
2290                                 rc = -EHOSTUNREACH;
2291                                 goto out;
2292                         }
2293
2294                         /* We're attempting to round robin over the remote peer
2295                          * NI's so update the final destination we selected
2296                          */
2297                         sd->sd_final_dst_lpni = sd->sd_best_lpni;
2298                 }
2299
2300                 /*
2301                  * find the best route. Restrict the selection on the net of the
2302                  * local NI if we've already picked the local NI to send from.
2303                  * Otherwise, let's pick any route we can find and then find
2304                  * a local NI we can reach the route's gateway on. Any route we
2305                  * select will be reachable by virtue of the restriction we have
2306                  * when adding a route.
2307                  */
2308                 best_route = lnet_find_route_locked(best_rnet,
2309                                                     LNET_NID_NET(src_nid),
2310                                                     sd->sd_best_lpni,
2311                                                     &last_route, &gwni);
2312
2313                 if (!best_route) {
2314                         CERROR("no route to %s from %s\n",
2315                                libcfs_nidstr(dst_nid),
2316                                libcfs_nidstr(src_nid));
2317                         rc = -EHOSTUNREACH;
2318                         goto out;
2319                 }
2320
2321                 if (!gwni) {
2322                         CERROR("Internal Error. Route expected to %s from %s\n",
2323                                libcfs_nidstr(dst_nid),
2324                                libcfs_nidstr(src_nid));
2325                         rc = -EFAULT;
2326                         goto out;
2327                 }
2328
2329                 gw = best_route->lr_gateway;
2330                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2331         }
2332
2333         /*
2334          * If the router checker is not active then discover the gateway here.
2335          * This ensures we are able to take advantage of multi-rail routing, but
2336          * if the router checker is active then we do not unecessarily delay
2337          * messages while the gateway is being checked by the dedicated monitor
2338          * thread.
2339          *
2340          * NB: We're only checking the alive_router_check_interval here, rather
2341          * than calling lnet_router_checker_active(), because the other
2342          * conditions that are checked by that function are either
2343          * irrelevant (the_lnet.ln_routing) or must be true (list of routers
2344          * is not empty)
2345          */
2346         if (alive_router_check_interval <= 0) {
2347                 rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt);
2348                 if (rc)
2349                         goto out;
2350         }
2351
2352         if (!sd->sd_best_ni) {
2353                 lpn = gwni->lpni_peer_net;
2354                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw, lpn,
2355                                                                sd->sd_msg,
2356                                                                sd->sd_md_cpt);
2357                 if (!sd->sd_best_ni) {
2358                         CERROR("Internal Error. Expected local ni on %s but non found: %s\n",
2359                                libcfs_net2str(lpn->lpn_net_id),
2360                                libcfs_nidstr(&sd->sd_src_nid));
2361                         rc = -EFAULT;
2362                         goto out;
2363                 }
2364         }
2365
2366         *gw_lpni = gwni;
2367         *gw_peer = gw;
2368
2369         /*
2370          * increment the sequence number since now we're sure we're
2371          * going to use this route
2372          */
2373         if (LNET_NID_IS_ANY(&sd->sd_rtr_nid)) {
2374                 LASSERT(best_route && last_route);
2375                 best_route->lr_seq = last_route->lr_seq + 1;
2376         }
2377
2378 out:
2379         if (gwni_decref && gwni)
2380                 lnet_peer_ni_decref_locked(gwni);
2381
2382         return rc;
2383 }
2384
2385 /*
2386  * Handle two cases:
2387  *
2388  * Case 1:
2389  *  Source specified
2390  *  Remote destination
2391  *  Non-MR destination
2392  *
2393  * Case 2:
2394  *  Source specified
2395  *  Remote destination
2396  *  MR destination
2397  *
2398  * The handling of these two cases is similar. Even though the destination
2399  * can be MR or non-MR, we'll deal directly with the router.
2400  */
2401 static int
2402 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2403 {
2404         int rc;
2405         struct lnet_peer_ni *gw_lpni = NULL;
2406         struct lnet_peer *gw_peer = NULL;
2407
2408         /* find local NI */
2409         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2410         if (!sd->sd_best_ni) {
2411                 CERROR("Can't send to %s: src %s is not a local nid\n",
2412                        libcfs_nidstr(&sd->sd_dst_nid),
2413                        libcfs_nidstr(&sd->sd_src_nid));
2414                 return -EINVAL;
2415         }
2416
2417         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid,
2418                                           &gw_lpni, &gw_peer);
2419         if (rc)
2420                 return rc;
2421
2422         if (sd->sd_send_case & NMR_DST)
2423                 /*
2424                  * since the final destination is non-MR let's set its preferred
2425                  * NID before we send
2426                  */
2427                 lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni,
2428                                          sd->sd_msg);
2429
2430         /*
2431          * We're going to send to the gw found so let's set its
2432          * info
2433          */
2434         sd->sd_peer = gw_peer;
2435         sd->sd_best_lpni = gw_lpni;
2436
2437         return lnet_handle_send(sd);
2438 }
2439
2440 static struct lnet_ni *
2441 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2442                                struct lnet_msg *msg, bool discovery)
2443 {
2444         struct lnet_peer_net *lpn = NULL;
2445         struct lnet_peer_net *best_lpn = NULL;
2446         struct lnet_net *net = NULL;
2447         struct lnet_net *best_net = NULL;
2448         struct lnet_ni *best_ni = NULL;
2449         int best_lpn_healthv = 0;
2450         int best_net_healthv = 0;
2451         int net_healthv;
2452         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2453         __u32 lpn_sel_prio;
2454         __u32 best_net_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2455         __u32 net_sel_prio;
2456
2457         /* If lp_disc_net_id is set, this peer is a router undergoing
2458          * discovery, and this message is an LNet ping, then this may be a
2459          * discovery message and we need to select an NI on the peer net
2460          * specified by lp_disc_net_id
2461          */
2462         if (peer->lp_disc_net_id &&
2463             (peer->lp_state & LNET_PEER_RTR_DISCOVERY) &&
2464             lnet_msg_is_ping(msg)) {
2465                 best_lpn = lnet_peer_get_net_locked(peer, peer->lp_disc_net_id);
2466                 if (best_lpn && lnet_get_net_locked(best_lpn->lpn_net_id))
2467                         goto select_best_ni;
2468         }
2469
2470         /*
2471          * The peer can have multiple interfaces, some of them can be on
2472          * the local network and others on a routed network. We should
2473          * prefer the local network. However if the local network is not
2474          * available then we need to try the routed network
2475          */
2476
2477         /* go through all the peer nets and find the best_ni */
2478         list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
2479                 /*
2480                  * The peer's list of nets can contain non-local nets. We
2481                  * want to only examine the local ones.
2482                  */
2483                 net = lnet_get_net_locked(lpn->lpn_net_id);
2484                 if (!net)
2485                         continue;
2486
2487                 lpn_sel_prio = lpn->lpn_sel_priority;
2488                 net_healthv = lnet_get_net_healthv_locked(net);
2489                 net_sel_prio = net->net_sel_priority;
2490
2491                 if (!best_lpn || !best_net)
2492                         goto select_lpn;
2493                 else
2494                         CDEBUG(D_NET,
2495                                "n[%s, %s] ph[%d, %d], pp[%u, %u], nh[%d, %d], np[%u, %u], ps[%u, %u], ns[%u, %u]\n",
2496                                libcfs_net2str(lpn->lpn_net_id),
2497                                libcfs_net2str(best_lpn->lpn_net_id),
2498                                lpn->lpn_healthv,
2499                                best_lpn_healthv,
2500                                lpn_sel_prio,
2501                                best_lpn_sel_prio,
2502                                net_healthv,
2503                                best_net_healthv,
2504                                net_sel_prio,
2505                                best_net_sel_prio,
2506                                lpn->lpn_seq,
2507                                best_lpn->lpn_seq,
2508                                net->net_seq,
2509                                best_net->net_seq);
2510
2511                 /* always select the lpn with the best health */
2512                 if (best_lpn_healthv > lpn->lpn_healthv)
2513                         continue;
2514                 else if (best_lpn_healthv < lpn->lpn_healthv)
2515                         goto select_lpn;
2516
2517                 /* select the preferred peer and local nets */
2518                 if (best_lpn_sel_prio < lpn_sel_prio)
2519                         continue;
2520                 else if (best_lpn_sel_prio > lpn_sel_prio)
2521                         goto select_lpn;
2522
2523                 if (best_net_healthv > net_healthv)
2524                         continue;
2525                 else if (best_net_healthv < net_healthv)
2526                         goto select_lpn;
2527
2528                 if (best_net_sel_prio < net_sel_prio)
2529                         continue;
2530                 else if (best_net_sel_prio > net_sel_prio)
2531                         goto select_lpn;
2532
2533                 if (best_lpn->lpn_seq < lpn->lpn_seq)
2534                         continue;
2535                 else if (best_lpn->lpn_seq > lpn->lpn_seq)
2536                         goto select_lpn;
2537
2538                 /* round robin over the local networks */
2539                 if (best_net->net_seq <= net->net_seq)
2540                         continue;
2541
2542 select_lpn:
2543                 best_net_healthv = net_healthv;
2544                 best_net_sel_prio = net_sel_prio;
2545                 best_lpn_healthv = lpn->lpn_healthv;
2546                 best_lpn_sel_prio = lpn_sel_prio;
2547                 best_lpn = lpn;
2548                 best_net = net;
2549         }
2550
2551         if (best_lpn) {
2552                 /* Select the best NI on the same net as best_lpn chosen
2553                  * above
2554                  */
2555 select_best_ni:
2556                 CDEBUG(D_NET, "selected best_lpn %s\n",
2557                        libcfs_net2str(best_lpn->lpn_net_id));
2558                 best_ni = lnet_find_best_ni_on_spec_net(NULL, peer, best_lpn,
2559                                                         msg, md_cpt);
2560         }
2561
2562         return best_ni;
2563 }
2564
2565 static struct lnet_ni *
2566 lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt)
2567 {
2568         struct lnet_ni *best_ni = NULL;
2569         struct lnet_peer_net *peer_net = lpni->lpni_peer_net;
2570         struct lnet_peer_ni *lpni_entry;
2571
2572         /*
2573          * We must use a consistent source address when sending to a
2574          * non-MR peer. However, a non-MR peer can have multiple NIDs
2575          * on multiple networks, and we may even need to talk to this
2576          * peer on multiple networks -- certain types of
2577          * load-balancing configuration do this.
2578          *
2579          * So we need to pick the NI the peer prefers for this
2580          * particular network.
2581          */
2582         LASSERT(peer_net);
2583         list_for_each_entry(lpni_entry, &peer_net->lpn_peer_nis,
2584                             lpni_peer_nis) {
2585                 if (lpni_entry->lpni_pref_nnids == 0)
2586                         continue;
2587                 LASSERT(lpni_entry->lpni_pref_nnids == 1);
2588                 best_ni = lnet_nid_to_ni_locked(&lpni_entry->lpni_pref.nid,
2589                                                 cpt);
2590                 break;
2591         }
2592
2593         return best_ni;
2594 }
2595
2596 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2597 static int
2598 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2599 {
2600         struct lnet_ni *best_ni = NULL;
2601
2602         /*
2603          * We must use a consistent source address when sending to a
2604          * non-MR peer. However, a non-MR peer can have multiple NIDs
2605          * on multiple networks, and we may even need to talk to this
2606          * peer on multiple networks -- certain types of
2607          * load-balancing configuration do this.
2608          *
2609          * So we need to pick the NI the peer prefers for this
2610          * particular network.
2611          *
2612          * An exception is traffic on LNET_RESERVED_PORTAL. Internal LNet
2613          * traffic doesn't care which source NI is used, and we don't actually
2614          * want to restrict local recovery pings to a single source NI.
2615          */
2616         if (!lnet_reserved_msg(sd->sd_msg))
2617                 best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2618                                                                sd->sd_cpt);
2619
2620         if (!best_ni)
2621                 best_ni = lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2622                                                 sd->sd_best_lpni->lpni_peer_net,
2623                                                 sd->sd_msg,
2624                                                 sd->sd_md_cpt);
2625
2626         /* If there is no best_ni we don't have a route */
2627         if (!best_ni) {
2628                 CERROR("no path to %s from net %s\n",
2629                         libcfs_nidstr(&sd->sd_best_lpni->lpni_nid),
2630                         libcfs_net2str(sd->sd_best_lpni->lpni_net->net_id));
2631                 return -EHOSTUNREACH;
2632         }
2633
2634         sd->sd_best_ni = best_ni;
2635
2636         /* Set preferred NI if necessary. */
2637         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2638
2639         return 0;
2640 }
2641
2642
2643 /*
2644  * Source not specified
2645  * Local destination
2646  * Non-MR Peer
2647  *
2648  * always use the same source NID for NMR peers
2649  * If we've talked to that peer before then we already have a preferred
2650  * source NI associated with it. Otherwise, we select a preferred local NI
2651  * and store it in the peer
2652  */
2653 static int
2654 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2655 {
2656         int rc = 0;
2657
2658         /* sd->sd_best_lpni is already set to the final destination */
2659
2660         /*
2661          * At this point we should've created the peer ni and peer. If we
2662          * can't find it, then something went wrong. Instead of assert
2663          * output a relevant message and fail the send
2664          */
2665         if (!sd->sd_best_lpni) {
2666                 CERROR("Internal fault. Unable to send msg %s to %s. NID not known\n",
2667                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2668                        libcfs_nidstr(&sd->sd_dst_nid));
2669                 return -EFAULT;
2670         }
2671
2672         if (sd->sd_msg->msg_routing) {
2673                 /* If I'm forwarding this message then I can choose any NI
2674                  * on the destination peer net
2675                  */
2676                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL,
2677                                                                sd->sd_peer,
2678                                                                sd->sd_best_lpni->lpni_peer_net,
2679                                                                sd->sd_msg,
2680                                                                sd->sd_md_cpt);
2681                 if (!sd->sd_best_ni) {
2682                         CERROR("Unable to forward message to %s. No local NI available\n",
2683                                libcfs_nidstr(&sd->sd_dst_nid));
2684                         rc = -EHOSTUNREACH;
2685                 }
2686         } else
2687                 rc = lnet_select_preferred_best_ni(sd);
2688
2689         if (!rc)
2690                 rc = lnet_handle_send(sd);
2691
2692         return rc;
2693 }
2694
2695 static int
2696 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2697 {
2698         /*
2699          * NOTE we've already handled the remote peer case. So we only
2700          * need to worry about the local case here.
2701          *
2702          * if we're sending a response, ACK or reply, we need to send it
2703          * to the destination NID given to us. At this point we already
2704          * have the peer_ni we're suppose to send to, so just find the
2705          * best_ni on the peer net and use that. Since we're sending to an
2706          * MR peer then we can just run the selection algorithm on our
2707          * local NIs and pick the best one.
2708          */
2709         if (sd->sd_send_case & SND_RESP) {
2710                 sd->sd_best_ni =
2711                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2712                                                 sd->sd_best_lpni->lpni_peer_net,
2713                                                 sd->sd_msg,
2714                                                 sd->sd_md_cpt);
2715
2716                 if (!sd->sd_best_ni) {
2717                         /*
2718                          * We're not going to deal with not able to send
2719                          * a response to the provided final destination
2720                          */
2721                         CERROR("Can't send response to %s. No local NI available\n",
2722                                 libcfs_nidstr(&sd->sd_dst_nid));
2723                         return -EHOSTUNREACH;
2724                 }
2725
2726                 return lnet_handle_send(sd);
2727         }
2728
2729         /*
2730          * If we get here that means we're sending a fresh request, PUT or
2731          * GET, so we need to run our standard selection algorithm.
2732          * First find the best local interface that's on any of the peer's
2733          * networks.
2734          */
2735         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2736                                         sd->sd_md_cpt,
2737                                         sd->sd_msg,
2738                                         lnet_msg_discovery(sd->sd_msg));
2739         if (sd->sd_best_ni) {
2740                 sd->sd_best_lpni =
2741                   lnet_find_best_lpni(sd->sd_best_ni, &sd->sd_dst_nid,
2742                                       sd->sd_peer,
2743                                       sd->sd_best_ni->ni_net->net_id);
2744
2745                 /*
2746                  * if we're successful in selecting a peer_ni on the local
2747                  * network, then send to it. Otherwise fall through and
2748                  * try and see if we can reach it over another routed
2749                  * network
2750                  */
2751                 if (sd->sd_best_lpni &&
2752                     nid_same(&sd->sd_best_lpni->lpni_nid,
2753                              &the_lnet.ln_loni->ni_nid)) {
2754                         /*
2755                          * in case we initially started with a routed
2756                          * destination, let's reset to local
2757                          */
2758                         sd->sd_send_case &= ~REMOTE_DST;
2759                         sd->sd_send_case |= LOCAL_DST;
2760                         return lnet_handle_lo_send(sd);
2761                 } else if (sd->sd_best_lpni) {
2762                         /*
2763                          * in case we initially started with a routed
2764                          * destination, let's reset to local
2765                          */
2766                         sd->sd_send_case &= ~REMOTE_DST;
2767                         sd->sd_send_case |= LOCAL_DST;
2768                         return lnet_handle_send(sd);
2769                 }
2770
2771                 CERROR("Internal Error. Expected to have a best_lpni: "
2772                        "%s -> %s\n",
2773                        libcfs_nidstr(&sd->sd_src_nid),
2774                        libcfs_nidstr(&sd->sd_dst_nid));
2775
2776                 return -EFAULT;
2777         }
2778
2779         /*
2780          * Peer doesn't have a local network. Let's see if there is
2781          * a remote network we can reach it on.
2782          */
2783         return PASS_THROUGH;
2784 }
2785
2786 /*
2787  * Case 1:
2788  *      Source NID not specified
2789  *      Local destination
2790  *      MR peer
2791  *
2792  * Case 2:
2793  *      Source NID not speified
2794  *      Remote destination
2795  *      MR peer
2796  *
2797  * In both of these cases if we're sending a response, ACK or REPLY, then
2798  * we need to send to the destination NID provided.
2799  *
2800  * In the remote case let's deal with MR routers.
2801  *
2802  */
2803
2804 static int
2805 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2806 {
2807         int rc = 0;
2808         struct lnet_peer *gw_peer = NULL;
2809         struct lnet_peer_ni *gw_lpni = NULL;
2810
2811         /*
2812          * handle sending a response to a remote peer here so we don't
2813          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2814          */
2815         if (sd->sd_send_case & REMOTE_DST &&
2816             sd->sd_send_case & SND_RESP) {
2817                 struct lnet_peer_ni *gw;
2818                 struct lnet_peer *gw_peer;
2819
2820                 rc = lnet_handle_find_routed_path(
2821                         sd, &sd->sd_dst_nid, &gw, &gw_peer);
2822                 if (rc < 0) {
2823                         CERROR("Can't send response to %s. No route available\n",
2824                                libcfs_nidstr(&sd->sd_dst_nid));
2825                         return -EHOSTUNREACH;
2826                 } else if (rc > 0) {
2827                         return rc;
2828                 }
2829
2830                 sd->sd_best_lpni = gw;
2831                 sd->sd_peer = gw_peer;
2832
2833                 return lnet_handle_send(sd);
2834         }
2835
2836         /*
2837          * Even though the NID for the peer might not be on a local network,
2838          * since the peer is MR there could be other interfaces on the
2839          * local network. In that case we'd still like to prefer the local
2840          * network over the routed network. If we're unable to do that
2841          * then we select the best router among the different routed networks,
2842          * and if the router is MR then we can deal with it as such.
2843          */
2844         rc = lnet_handle_any_mr_dsta(sd);
2845         if (rc != PASS_THROUGH)
2846                 return rc;
2847
2848         /*
2849          * Now that we must route to the destination, we must consider the
2850          * MR case, where the destination has multiple interfaces, some of
2851          * which we can route to and others we do not. For this reason we
2852          * need to select the destination which we can route to and if
2853          * there are multiple, we need to round robin.
2854          */
2855         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid,
2856                                           &gw_lpni, &gw_peer);
2857         if (rc)
2858                 return rc;
2859
2860         sd->sd_send_case &= ~LOCAL_DST;
2861         sd->sd_send_case |= REMOTE_DST;
2862
2863         sd->sd_peer = gw_peer;
2864         sd->sd_best_lpni = gw_lpni;
2865
2866         return lnet_handle_send(sd);
2867 }
2868
2869 /*
2870  * Source not specified
2871  * Remote destination
2872  * Non-MR peer
2873  *
2874  * Must send to the specified peer NID using the same source NID that
2875  * we've used before. If it's the first time to talk to that peer then
2876  * find the source NI and assign it as preferred to that peer
2877  */
2878 static int
2879 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2880 {
2881         int rc;
2882         struct lnet_peer_ni *gw_lpni = NULL;
2883         struct lnet_peer *gw_peer = NULL;
2884
2885         /*
2886          * Let's see if we have a preferred NI to talk to this NMR peer
2887          */
2888         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2889                                                               sd->sd_cpt);
2890
2891         /*
2892          * find the router and that'll find the best NI if we didn't find
2893          * it already.
2894          */
2895         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid, &gw_lpni,
2896                                           &gw_peer);
2897         if (rc)
2898                 return rc;
2899
2900         /*
2901          * set the best_ni we've chosen as the preferred one for
2902          * this peer
2903          */
2904         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2905
2906         /* we'll be sending to the gw */
2907         sd->sd_best_lpni = gw_lpni;
2908         sd->sd_peer = gw_peer;
2909
2910         return lnet_handle_send(sd);
2911 }
2912
2913 static int
2914 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2915 {
2916         /*
2917          * turn off the SND_RESP bit.
2918          * It will be checked in the case handling
2919          */
2920         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2921
2922         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2923                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2924                 (send_case & SRC_SPEC) ? libcfs_nidstr(&sd->sd_src_nid) : "",
2925                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2926                 libcfs_nidstr(&sd->sd_dst_nid),
2927                 (send_case & LOCAL_DST) ? "local" : "routed");
2928
2929         switch (send_case) {
2930         /*
2931          * For all cases where the source is specified, we should always
2932          * use the destination NID, whether it's an MR destination or not,
2933          * since we're continuing a series of related messages for the
2934          * same RPC
2935          */
2936         case SRC_SPEC_LOCAL_NMR_DST:
2937                 return lnet_handle_spec_local_nmr_dst(sd);
2938         case SRC_SPEC_LOCAL_MR_DST:
2939                 return lnet_handle_spec_local_mr_dst(sd);
2940         case SRC_SPEC_ROUTER_NMR_DST:
2941         case SRC_SPEC_ROUTER_MR_DST:
2942                 return lnet_handle_spec_router_dst(sd);
2943         case SRC_ANY_LOCAL_NMR_DST:
2944                 return lnet_handle_any_local_nmr_dst(sd);
2945         case SRC_ANY_LOCAL_MR_DST:
2946         case SRC_ANY_ROUTER_MR_DST:
2947                 return lnet_handle_any_mr_dst(sd);
2948         case SRC_ANY_ROUTER_NMR_DST:
2949                 return lnet_handle_any_router_nmr_dst(sd);
2950         default:
2951                 CERROR("Unknown send case\n");
2952                 return -1;
2953         }
2954 }
2955
2956 static int
2957 lnet_select_pathway(struct lnet_nid *src_nid,
2958                     struct lnet_nid *dst_nid,
2959                     struct lnet_msg *msg,
2960                     struct lnet_nid *rtr_nid)
2961 {
2962         struct lnet_peer_ni *lpni;
2963         struct lnet_peer *peer;
2964         struct lnet_send_data send_data;
2965         int cpt, rc;
2966         int md_cpt;
2967         __u32 send_case = 0;
2968         bool final_hop;
2969         bool mr_forwarding_allowed;
2970
2971         memset(&send_data, 0, sizeof(send_data));
2972
2973         /*
2974          * get an initial CPT to use for locking. The idea here is not to
2975          * serialize the calls to select_pathway, so that as many
2976          * operations can run concurrently as possible. To do that we use
2977          * the CPT where this call is being executed. Later on when we
2978          * determine the CPT to use in lnet_message_commit, we switch the
2979          * lock and check if there was any configuration change.  If none,
2980          * then we proceed, if there is, then we restart the operation.
2981          */
2982         cpt = lnet_net_lock_current();
2983
2984         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
2985         if (md_cpt == CFS_CPT_ANY)
2986                 md_cpt = cpt;
2987
2988 again:
2989
2990         /*
2991          * If we're being asked to send to the loopback interface, there
2992          * is no need to go through any selection. We can just shortcut
2993          * the entire process and send over lolnd
2994          */
2995         send_data.sd_msg = msg;
2996         send_data.sd_cpt = cpt;
2997         if (nid_is_lo0(dst_nid)) {
2998                 rc = lnet_handle_lo_send(&send_data);
2999                 lnet_net_unlock(cpt);
3000                 return rc;
3001         }
3002
3003         /*
3004          * find an existing peer_ni, or create one and mark it as having been
3005          * created due to network traffic. This call will create the
3006          * peer->peer_net->peer_ni tree.
3007          */
3008         lpni = lnet_peerni_by_nid_locked(dst_nid, NULL, cpt);
3009         if (IS_ERR(lpni)) {
3010                 lnet_net_unlock(cpt);
3011                 return PTR_ERR(lpni);
3012         }
3013
3014         /*
3015          * Cache the original src_nid and rtr_nid. If we need to resend the
3016          * message then we'll need to know whether the src_nid was originally
3017          * specified for this message. If it was originally specified,
3018          * then we need to keep using the same src_nid since it's
3019          * continuing the same sequence of messages. Similarly, rtr_nid will
3020          * affect our choice of next hop.
3021          */
3022         if (src_nid)
3023                 msg->msg_src_nid_param = *src_nid;
3024         else
3025                 msg->msg_src_nid_param = LNET_ANY_NID;
3026         if (rtr_nid)
3027                 msg->msg_rtr_nid_param = *rtr_nid;
3028         else
3029                 msg->msg_rtr_nid_param = LNET_ANY_NID;
3030
3031         /*
3032          * If necessary, perform discovery on the peer that owns this peer_ni.
3033          * Note, this can result in the ownership of this peer_ni changing
3034          * to another peer object.
3035          */
3036         rc = lnet_initiate_peer_discovery(lpni, msg, cpt);
3037         if (rc) {
3038                 lnet_peer_ni_decref_locked(lpni);
3039                 lnet_net_unlock(cpt);
3040                 return rc;
3041         }
3042
3043         peer = lpni->lpni_peer_net->lpn_peer;
3044
3045         /*
3046          * Identify the different send cases
3047          */
3048         if (!src_nid || LNET_NID_IS_ANY(src_nid)) {
3049                 send_case |= SRC_ANY;
3050                 if (lnet_get_net_locked(LNET_NID_NET(dst_nid)))
3051                         send_case |= LOCAL_DST;
3052                 else
3053                         send_case |= REMOTE_DST;
3054         } else {
3055                 send_case |= SRC_SPEC;
3056                 if (LNET_NID_NET(src_nid) == LNET_NID_NET(dst_nid))
3057                         send_case |= LOCAL_DST;
3058                 else
3059                         send_case |= REMOTE_DST;
3060         }
3061
3062         final_hop = false;
3063         if (msg->msg_routing && (send_case & LOCAL_DST))
3064                 final_hop = true;
3065
3066         /* Determine whether to allow MR forwarding for this message.
3067          * NB: MR forwarding is allowed if the message originator and the
3068          * destination are both MR capable, and the destination lpni that was
3069          * originally chosen by the originator is unhealthy or down.
3070          * We check the MR capability of the destination further below
3071          */
3072         mr_forwarding_allowed = false;
3073         if (final_hop) {
3074                 struct lnet_peer *src_lp;
3075                 struct lnet_peer_ni *src_lpni;
3076
3077                 src_lpni = lnet_peerni_by_nid_locked(&msg->msg_hdr.src_nid,
3078                                                    NULL, cpt);
3079                 /* We don't fail the send if we hit any errors here. We'll just
3080                  * try to send it via non-multi-rail criteria
3081                  */
3082                 if (!IS_ERR(src_lpni)) {
3083                         /* Drop ref taken by lnet_peerni_by_nid_locked() */
3084                         lnet_peer_ni_decref_locked(src_lpni);
3085                         src_lp = lpni->lpni_peer_net->lpn_peer;
3086                         if (lnet_peer_is_multi_rail(src_lp) &&
3087                             !lnet_is_peer_ni_alive(lpni))
3088                                 mr_forwarding_allowed = true;
3089
3090                 }
3091                 CDEBUG(D_NET, "msg %p MR forwarding %s\n", msg,
3092                        mr_forwarding_allowed ? "allowed" : "not allowed");
3093         }
3094
3095         /*
3096          * Deal with the peer as NMR in the following cases:
3097          * 1. the peer is NMR
3098          * 2. We're trying to recover a specific peer NI
3099          * 3. I'm a router sending to the final destination and MR forwarding is
3100          *    not allowed for this message (as determined above).
3101          *    In this case the source of the message would've
3102          *    already selected the final destination so my job
3103          *    is to honor the selection.
3104          */
3105         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery ||
3106             (final_hop && !mr_forwarding_allowed))
3107                 send_case |= NMR_DST;
3108         else
3109                 send_case |= MR_DST;
3110
3111         if (lnet_msg_is_response(msg))
3112                 send_case |= SND_RESP;
3113
3114         /* assign parameters to the send_data */
3115         if (rtr_nid)
3116                 send_data.sd_rtr_nid = *rtr_nid;
3117         else
3118                 send_data.sd_rtr_nid = LNET_ANY_NID;
3119         if (src_nid)
3120                 send_data.sd_src_nid = *src_nid;
3121         else
3122                 send_data.sd_src_nid = LNET_ANY_NID;
3123         send_data.sd_dst_nid = *dst_nid;
3124         send_data.sd_best_lpni = lpni;
3125         /*
3126          * keep a pointer to the final destination in case we're going to
3127          * route, so we'll need to access it later
3128          */
3129         send_data.sd_final_dst_lpni = lpni;
3130         send_data.sd_peer = peer;
3131         send_data.sd_md_cpt = md_cpt;
3132         send_data.sd_send_case = send_case;
3133
3134         rc = lnet_handle_send_case_locked(&send_data);
3135
3136         /*
3137          * Update the local cpt since send_data.sd_cpt might've been
3138          * updated as a result of calling lnet_handle_send_case_locked().
3139          */
3140         cpt = send_data.sd_cpt;
3141         lnet_peer_ni_decref_locked(lpni);
3142
3143         if (rc == REPEAT_SEND)
3144                 goto again;
3145
3146         lnet_net_unlock(cpt);
3147
3148         return rc;
3149 }
3150
3151 int
3152 lnet_send(struct lnet_nid *src_nid, struct lnet_msg *msg,
3153           struct lnet_nid *rtr_nid)
3154 {
3155         struct lnet_nid *dst_nid = &msg->msg_target.nid;
3156         int rc;
3157
3158         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
3159         LASSERT(msg->msg_txpeer == NULL);
3160         LASSERT(msg->msg_txni == NULL);
3161         LASSERT(!msg->msg_sending);
3162         LASSERT(!msg->msg_target_is_router);
3163         LASSERT(!msg->msg_receiving);
3164
3165         msg->msg_sending = 1;
3166
3167         LASSERT(!msg->msg_tx_committed);
3168
3169         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
3170         if (rc < 0) {
3171                 if (rc == -EHOSTUNREACH)
3172                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
3173                 else
3174                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
3175                 return rc;
3176         }
3177
3178         if (rc == LNET_CREDIT_OK)
3179                 lnet_ni_send(msg->msg_txni, msg);
3180
3181         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
3182         return 0;
3183 }
3184
3185 enum lnet_mt_event_type {
3186         MT_TYPE_LOCAL_NI = 0,
3187         MT_TYPE_PEER_NI
3188 };
3189
3190 struct lnet_mt_event_info {
3191         enum lnet_mt_event_type mt_type;
3192         struct lnet_nid mt_nid;
3193 };
3194
3195 /* called with res_lock held */
3196 void
3197 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
3198 {
3199         struct lnet_rsp_tracker *rspt;
3200
3201         /*
3202          * msg has a refcount on the MD so the MD is not going away.
3203          * The rspt queue for the cpt is protected by
3204          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
3205          */
3206         if (!md->md_rspt_ptr)
3207                 return;
3208
3209         rspt = md->md_rspt_ptr;
3210
3211         /* debug code */
3212         LASSERT(rspt->rspt_cpt == cpt);
3213
3214         md->md_rspt_ptr = NULL;
3215
3216         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3217                 /*
3218                  * The monitor thread has invalidated this handle because the
3219                  * response timed out, but it failed to lookup the MD. That
3220                  * means this response tracker is on the zombie list. We can
3221                  * safely remove it under the resource lock (held by caller) and
3222                  * free the response tracker block.
3223                  */
3224                 list_del(&rspt->rspt_on_list);
3225                 lnet_rspt_free(rspt, cpt);
3226         } else {
3227                 /*
3228                  * invalidate the handle to indicate that a response has been
3229                  * received, which will then lead the monitor thread to clean up
3230                  * the rspt block.
3231                  */
3232                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
3233         }
3234 }
3235
3236 void
3237 lnet_clean_zombie_rstqs(void)
3238 {
3239         struct lnet_rsp_tracker *rspt, *tmp;
3240         int i;
3241
3242         cfs_cpt_for_each(i, lnet_cpt_table()) {
3243                 list_for_each_entry_safe(rspt, tmp,
3244                                          the_lnet.ln_mt_zombie_rstqs[i],
3245                                          rspt_on_list) {
3246                         list_del(&rspt->rspt_on_list);
3247                         lnet_rspt_free(rspt, i);
3248                 }
3249         }
3250
3251         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
3252 }
3253
3254 static void
3255 lnet_finalize_expired_responses(void)
3256 {
3257         struct lnet_libmd *md;
3258         struct lnet_rsp_tracker *rspt, *tmp;
3259         ktime_t now;
3260         int i;
3261
3262         if (the_lnet.ln_mt_rstq == NULL)
3263                 return;
3264
3265         cfs_cpt_for_each(i, lnet_cpt_table()) {
3266                 LIST_HEAD(local_queue);
3267
3268                 lnet_net_lock(i);
3269                 if (!the_lnet.ln_mt_rstq[i]) {
3270                         lnet_net_unlock(i);
3271                         continue;
3272                 }
3273                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
3274                 lnet_net_unlock(i);
3275
3276                 now = ktime_get();
3277
3278                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
3279                         /*
3280                          * The rspt mdh will be invalidated when a response
3281                          * is received or whenever we want to discard the
3282                          * block the monitor thread will walk the queue
3283                          * and clean up any rsts with an invalid mdh.
3284                          * The monitor thread will walk the queue until
3285                          * the first unexpired rspt block. This means that
3286                          * some rspt blocks which received their
3287                          * corresponding responses will linger in the
3288                          * queue until they are cleaned up eventually.
3289                          */
3290                         lnet_res_lock(i);
3291                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3292                                 lnet_res_unlock(i);
3293                                 list_del(&rspt->rspt_on_list);
3294                                 lnet_rspt_free(rspt, i);
3295                                 continue;
3296                         }
3297
3298                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
3299                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
3300                                 struct lnet_peer_ni *lpni;
3301                                 struct lnet_nid nid;
3302
3303                                 md = lnet_handle2md(&rspt->rspt_mdh);
3304                                 if (!md) {
3305                                         /* MD has been queued for unlink, but
3306                                          * rspt hasn't been detached (Note we've
3307                                          * checked above that the rspt_mdh is
3308                                          * valid). Since we cannot lookup the MD
3309                                          * we're unable to detach the rspt
3310                                          * ourselves. Thus, move the rspt to the
3311                                          * zombie list where we'll wait for
3312                                          * either:
3313                                          *   1. The remaining operations on the
3314                                          *   MD to complete. In this case the
3315                                          *   final operation will result in
3316                                          *   lnet_msg_detach_md()->
3317                                          *   lnet_detach_rsp_tracker() where
3318                                          *   we will clean up this response
3319                                          *   tracker.
3320                                          *   2. LNet to shutdown. In this case
3321                                          *   we'll wait until after all LND Nets
3322                                          *   have shutdown and then we can
3323                                          *   safely free any remaining response
3324                                          *   tracker blocks on the zombie list.
3325                                          * Note: We need to hold the resource
3326                                          * lock when adding to the zombie list
3327                                          * because we may have concurrent access
3328                                          * with lnet_detach_rsp_tracker().
3329                                          */
3330                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
3331                                         list_move(&rspt->rspt_on_list,
3332                                                   the_lnet.ln_mt_zombie_rstqs[i]);
3333                                         lnet_res_unlock(i);
3334                                         continue;
3335                                 }
3336                                 LASSERT(md->md_rspt_ptr == rspt);
3337                                 md->md_rspt_ptr = NULL;
3338                                 lnet_res_unlock(i);
3339
3340                                 LNetMDUnlink(rspt->rspt_mdh);
3341
3342                                 nid = rspt->rspt_next_hop_nid;
3343
3344                                 list_del(&rspt->rspt_on_list);
3345                                 lnet_rspt_free(rspt, i);
3346
3347                                 /* If we're shutting down we just want to clean
3348                                  * up the rspt blocks
3349                                  */
3350                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3351                                         continue;
3352
3353                                 lnet_net_lock(i);
3354                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
3355                                 lnet_net_unlock(i);
3356
3357                                 CDEBUG(D_NET,
3358                                        "Response timeout: md = %p: nid = %s\n",
3359                                        md, libcfs_nidstr(&nid));
3360
3361                                 /*
3362                                  * If there is a timeout on the response
3363                                  * from the next hop decrement its health
3364                                  * value so that we don't use it
3365                                  */
3366                                 lnet_net_lock(0);
3367                                 lpni = lnet_peer_ni_find_locked(&nid);
3368                                 if (lpni) {
3369                                         lnet_handle_remote_failure_locked(lpni);
3370                                         lnet_peer_ni_decref_locked(lpni);
3371                                 }
3372                                 lnet_net_unlock(0);
3373                         } else {
3374                                 lnet_res_unlock(i);
3375                                 break;
3376                         }
3377                 }
3378
3379                 if (!list_empty(&local_queue)) {
3380                         lnet_net_lock(i);
3381                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
3382                         lnet_net_unlock(i);
3383                 }
3384         }
3385 }
3386
3387 static void
3388 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
3389 {
3390         struct lnet_msg *msg;
3391
3392         while (!list_empty(resendq)) {
3393                 struct lnet_peer_ni *lpni;
3394
3395                 msg = list_first_entry(resendq, struct lnet_msg,
3396                                        msg_list);
3397
3398                 list_del_init(&msg->msg_list);
3399
3400                 lpni = lnet_peer_ni_find_locked(&msg->msg_hdr.dest_nid);
3401                 if (!lpni) {
3402                         lnet_net_unlock(cpt);
3403                         CERROR("Expected that a peer is already created for %s\n",
3404                                libcfs_nidstr(&msg->msg_hdr.dest_nid));
3405                         msg->msg_no_resend = true;
3406                         lnet_finalize(msg, -EFAULT);
3407                         lnet_net_lock(cpt);
3408                 } else {
3409                         int rc;
3410
3411                         lnet_peer_ni_decref_locked(lpni);
3412
3413                         lnet_net_unlock(cpt);
3414                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3415                                libcfs_nidstr(&msg->msg_src_nid_param),
3416                                libcfs_idstr(&msg->msg_target),
3417                                lnet_msgtyp2str(msg->msg_type),
3418                                msg->msg_recovery,
3419                                msg->msg_retry_count);
3420                         rc = lnet_send(&msg->msg_src_nid_param, msg,
3421                                        &msg->msg_rtr_nid_param);
3422                         if (rc) {
3423                                 CERROR("Error sending %s to %s: %d\n",
3424                                        lnet_msgtyp2str(msg->msg_type),
3425                                        libcfs_idstr(&msg->msg_target), rc);
3426                                 msg->msg_no_resend = true;
3427                                 lnet_finalize(msg, rc);
3428                         }
3429                         lnet_net_lock(cpt);
3430                         if (!rc)
3431                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3432                 }
3433         }
3434 }
3435
3436 static void
3437 lnet_resend_pending_msgs(void)
3438 {
3439         int i;
3440
3441         cfs_cpt_for_each(i, lnet_cpt_table()) {
3442                 lnet_net_lock(i);
3443                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3444                 lnet_net_unlock(i);
3445         }
3446 }
3447
3448 /* called with cpt and ni_lock held */
3449 static void
3450 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3451 {
3452         struct lnet_handle_md recovery_mdh;
3453
3454         LNetInvalidateMDHandle(&recovery_mdh);
3455
3456         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3457             force) {
3458                 recovery_mdh = ni->ni_ping_mdh;
3459                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3460         }
3461         lnet_ni_unlock(ni);
3462         lnet_net_unlock(cpt);
3463         if (!LNetMDHandleIsInvalid(recovery_mdh))
3464                 LNetMDUnlink(recovery_mdh);
3465         lnet_net_lock(cpt);
3466         lnet_ni_lock(ni);
3467 }
3468
3469 /* Returns the total number of local NIs in recovery.
3470  * Records up to @arrsz of the associated NIDs in the @nidarr array
3471  */
3472 static int
3473 lnet_recover_local_nis(struct lnet_nid *nidarr, unsigned int arrsz)
3474 {
3475         struct lnet_mt_event_info *ev_info;
3476         LIST_HEAD(processed_list);
3477         LIST_HEAD(local_queue);
3478         struct lnet_handle_md mdh;
3479         struct lnet_ni *tmp;
3480         struct lnet_ni *ni;
3481         struct lnet_nid nid;
3482         int healthv;
3483         int rc;
3484         time64_t now;
3485         unsigned int nnis = 0;
3486
3487         /*
3488          * splice the recovery queue on a local queue. We will iterate
3489          * through the local queue and update it as needed. Once we're
3490          * done with the traversal, we'll splice the local queue back on
3491          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3492          * will be traversed in the next iteration.
3493          */
3494         lnet_net_lock(0);
3495         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3496                          &local_queue);
3497         lnet_net_unlock(0);
3498
3499         now = ktime_get_seconds();
3500
3501         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3502                 /*
3503                  * if an NI is being deleted or it is now healthy, there
3504                  * is no need to keep it around in the recovery queue.
3505                  * The monitor thread is the only thread responsible for
3506                  * removing the NI from the recovery queue.
3507                  * Multiple threads can be adding NIs to the recovery
3508                  * queue.
3509                  */
3510                 healthv = atomic_read(&ni->ni_healthv);
3511
3512                 lnet_net_lock(0);
3513                 lnet_ni_lock(ni);
3514                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3515                     healthv == LNET_MAX_HEALTH_VALUE) {
3516                         list_del_init(&ni->ni_recovery);
3517                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3518                         lnet_ni_unlock(ni);
3519                         lnet_ni_decref_locked(ni, 0);
3520                         lnet_net_unlock(0);
3521                         continue;
3522                 }
3523
3524                 if (nnis < arrsz)
3525                         nidarr[nnis] = ni->ni_nid;
3526                 nnis++;
3527
3528                 /*
3529                  * if the local NI failed recovery we must unlink the md.
3530                  * But we want to keep the local_ni on the recovery queue
3531                  * so we can continue the attempts to recover it.
3532                  */
3533                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3534                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3535                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3536                 }
3537
3538
3539                 lnet_ni_unlock(ni);
3540
3541                 if (now < ni->ni_next_ping) {
3542                         lnet_net_unlock(0);
3543                         continue;
3544                 }
3545
3546                 lnet_net_unlock(0);
3547
3548                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3549                        libcfs_nidstr(&ni->ni_nid));
3550
3551                 lnet_ni_lock(ni);
3552                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3553                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3554                         lnet_ni_unlock(ni);
3555
3556                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3557                         if (!ev_info) {
3558                                 CERROR("out of memory. Can't recover %s\n",
3559                                        libcfs_nidstr(&ni->ni_nid));
3560                                 lnet_ni_lock(ni);
3561                                 ni->ni_recovery_state &=
3562                                   ~LNET_NI_RECOVERY_PENDING;
3563                                 lnet_ni_unlock(ni);
3564                                 continue;
3565                         }
3566
3567                         mdh = ni->ni_ping_mdh;
3568                         /*
3569                          * Invalidate the ni mdh in case it's deleted.
3570                          * We'll unlink the mdh in this case below.
3571                          */
3572                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3573                         nid = ni->ni_nid;
3574
3575                         /*
3576                          * remove the NI from the local queue and drop the
3577                          * reference count to it while we're recovering
3578                          * it. The reason for that, is that the NI could
3579                          * be deleted, and the way the code is structured
3580                          * is if we don't drop the NI, then the deletion
3581                          * code will enter a loop waiting for the
3582                          * reference count to be removed while holding the
3583                          * ln_mutex_lock(). When we look up the peer to
3584                          * send to in lnet_select_pathway() we will try to
3585                          * lock the ln_mutex_lock() as well, leading to
3586                          * a deadlock. By dropping the refcount and
3587                          * removing it from the list, we allow for the NI
3588                          * to be removed, then we use the cached NID to
3589                          * look it up again. If it's gone, then we just
3590                          * continue examining the rest of the queue.
3591                          */
3592                         lnet_net_lock(0);
3593                         list_del_init(&ni->ni_recovery);
3594                         lnet_ni_decref_locked(ni, 0);
3595                         lnet_net_unlock(0);
3596
3597                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3598                         ev_info->mt_nid = nid;
3599                         rc = lnet_send_ping(&nid, &mdh, LNET_PING_INFO_MIN_SIZE,
3600                                             ev_info, the_lnet.ln_mt_handler,
3601                                             true);
3602                         /* lookup the nid again */
3603                         lnet_net_lock(0);
3604                         ni = lnet_nid_to_ni_locked(&nid, 0);
3605                         if (!ni) {
3606                                 /*
3607                                  * the NI has been deleted when we dropped
3608                                  * the ref count
3609                                  */
3610                                 lnet_net_unlock(0);
3611                                 LNetMDUnlink(mdh);
3612                                 continue;
3613                         }
3614                         ni->ni_ping_count++;
3615
3616                         ni->ni_ping_mdh = mdh;
3617                         lnet_ni_add_to_recoveryq_locked(ni, &processed_list,
3618                                                         now);
3619
3620                         if (rc) {
3621                                 lnet_ni_lock(ni);
3622                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3623                                 lnet_ni_unlock(ni);
3624                         }
3625                         lnet_net_unlock(0);
3626                 } else
3627                         lnet_ni_unlock(ni);
3628         }
3629
3630         /*
3631          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3632          * reexamined in the next iteration.
3633          */
3634         list_splice_init(&processed_list, &local_queue);
3635         lnet_net_lock(0);
3636         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3637         lnet_net_unlock(0);
3638
3639         return nnis;
3640 }
3641
3642 static int
3643 lnet_resendqs_create(void)
3644 {
3645         struct list_head **resendqs;
3646         resendqs = lnet_create_array_of_queues();
3647
3648         if (!resendqs)
3649                 return -ENOMEM;
3650
3651         lnet_net_lock(LNET_LOCK_EX);
3652         the_lnet.ln_mt_resendqs = resendqs;
3653         lnet_net_unlock(LNET_LOCK_EX);
3654
3655         return 0;
3656 }
3657
3658 static void
3659 lnet_clean_local_ni_recoveryq(void)
3660 {
3661         struct lnet_ni *ni;
3662
3663         /* This is only called when the monitor thread has stopped */
3664         lnet_net_lock(0);
3665
3666         while ((ni = list_first_entry_or_null(&the_lnet.ln_mt_localNIRecovq,
3667                                               struct lnet_ni,
3668                                               ni_recovery)) != NULL) {
3669                 list_del_init(&ni->ni_recovery);
3670                 lnet_ni_lock(ni);
3671                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3672                 lnet_ni_unlock(ni);
3673                 lnet_ni_decref_locked(ni, 0);
3674         }
3675
3676         lnet_net_unlock(0);
3677 }
3678
3679 static void
3680 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3681                                      bool force)
3682 {
3683         struct lnet_handle_md recovery_mdh;
3684
3685         LNetInvalidateMDHandle(&recovery_mdh);
3686
3687         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3688                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3689                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3690         }
3691         spin_unlock(&lpni->lpni_lock);
3692         lnet_net_unlock(cpt);
3693         if (!LNetMDHandleIsInvalid(recovery_mdh))
3694                 LNetMDUnlink(recovery_mdh);
3695         lnet_net_lock(cpt);
3696         spin_lock(&lpni->lpni_lock);
3697 }
3698
3699 static void
3700 lnet_clean_peer_ni_recoveryq(void)
3701 {
3702         struct lnet_peer_ni *lpni, *tmp;
3703
3704         lnet_net_lock(LNET_LOCK_EX);
3705
3706         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3707                                  lpni_recovery) {
3708                 list_del_init(&lpni->lpni_recovery);
3709                 spin_lock(&lpni->lpni_lock);
3710                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3711                 spin_unlock(&lpni->lpni_lock);
3712                 lnet_peer_ni_decref_locked(lpni);
3713         }
3714
3715         lnet_net_unlock(LNET_LOCK_EX);
3716 }
3717
3718 static void
3719 lnet_clean_resendqs(void)
3720 {
3721         struct lnet_msg *msg, *tmp;
3722         LIST_HEAD(msgs);
3723         int i;
3724
3725         cfs_cpt_for_each(i, lnet_cpt_table()) {
3726                 lnet_net_lock(i);
3727                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3728                 lnet_net_unlock(i);
3729                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3730                         list_del_init(&msg->msg_list);
3731                         msg->msg_no_resend = true;
3732                         lnet_finalize(msg, -ESHUTDOWN);
3733                 }
3734         }
3735
3736         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3737 }
3738
3739 /* Returns the total number of peer NIs in recovery.
3740  * Records up to @arrsz of the associated NIDs in the @nidarr array
3741  */
3742 static unsigned int
3743 lnet_recover_peer_nis(struct lnet_nid *nidarr, unsigned int arrsz)
3744 {
3745         struct lnet_mt_event_info *ev_info;
3746         LIST_HEAD(processed_list);
3747         LIST_HEAD(local_queue);
3748         struct lnet_handle_md mdh;
3749         struct lnet_peer_ni *lpni;
3750         struct lnet_peer_ni *tmp;
3751         struct lnet_nid nid;
3752         int healthv;
3753         int rc;
3754         time64_t now;
3755         unsigned int nlpnis = 0;
3756
3757         /*
3758          * Always use cpt 0 for locking across all interactions with
3759          * ln_mt_peerNIRecovq
3760          */
3761         lnet_net_lock(0);
3762         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3763                          &local_queue);
3764         lnet_net_unlock(0);
3765
3766         now = ktime_get_seconds();
3767
3768         list_for_each_entry_safe(lpni, tmp, &local_queue,
3769                                  lpni_recovery) {
3770                 /*
3771                  * The same protection strategy is used here as is in the
3772                  * local recovery case.
3773                  */
3774                 lnet_net_lock(0);
3775                 healthv = atomic_read(&lpni->lpni_healthv);
3776                 spin_lock(&lpni->lpni_lock);
3777                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3778                     healthv == LNET_MAX_HEALTH_VALUE) {
3779                         list_del_init(&lpni->lpni_recovery);
3780                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3781                         spin_unlock(&lpni->lpni_lock);
3782                         lnet_peer_ni_decref_locked(lpni);
3783                         lnet_net_unlock(0);
3784                         continue;
3785                 }
3786
3787                 if (nlpnis < arrsz)
3788                         nidarr[nlpnis] = lpni->lpni_nid;
3789                 nlpnis++;
3790
3791                 /*
3792                  * If the peer NI has failed recovery we must unlink the
3793                  * md. But we want to keep the peer ni on the recovery
3794                  * queue so we can try to continue recovering it
3795                  */
3796                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3797                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3798                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3799                 }
3800
3801                 spin_unlock(&lpni->lpni_lock);
3802
3803                 if (now < lpni->lpni_next_ping) {
3804                         lnet_net_unlock(0);
3805                         continue;
3806                 }
3807
3808                 lnet_net_unlock(0);
3809
3810                 /*
3811                  * NOTE: we're racing with peer deletion from user space.
3812                  * It's possible that a peer is deleted after we check its
3813                  * state. In this case the recovery can create a new peer
3814                  */
3815                 spin_lock(&lpni->lpni_lock);
3816                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3817                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3818                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3819                         spin_unlock(&lpni->lpni_lock);
3820
3821                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3822                         if (!ev_info) {
3823                                 CERROR("out of memory. Can't recover %s\n",
3824                                        libcfs_nidstr(&lpni->lpni_nid));
3825                                 spin_lock(&lpni->lpni_lock);
3826                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3827                                 spin_unlock(&lpni->lpni_lock);
3828                                 continue;
3829                         }
3830
3831                         /* look at the comments in lnet_recover_local_nis() */
3832                         mdh = lpni->lpni_recovery_ping_mdh;
3833                         nid = lpni->lpni_nid;
3834                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3835                         lnet_net_lock(0);
3836                         list_del_init(&lpni->lpni_recovery);
3837                         lnet_peer_ni_decref_locked(lpni);
3838                         lnet_net_unlock(0);
3839
3840                         ev_info->mt_type = MT_TYPE_PEER_NI;
3841                         ev_info->mt_nid = nid;
3842                         rc = lnet_send_ping(&nid, &mdh, LNET_PING_INFO_MIN_SIZE,
3843                                             ev_info, the_lnet.ln_mt_handler,
3844                                             true);
3845                         lnet_net_lock(0);
3846                         /*
3847                          * lnet_peer_ni_find_locked() grabs a refcount for
3848                          * us. No need to take it explicitly.
3849                          */
3850                         lpni = lnet_peer_ni_find_locked(&nid);
3851                         if (!lpni) {
3852                                 lnet_net_unlock(0);
3853                                 LNetMDUnlink(mdh);
3854                                 continue;
3855                         }
3856
3857                         lpni->lpni_ping_count++;
3858
3859                         lpni->lpni_recovery_ping_mdh = mdh;
3860
3861                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
3862                                                              &processed_list,
3863                                                              now);
3864                         if (rc) {
3865                                 spin_lock(&lpni->lpni_lock);
3866                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3867                                 spin_unlock(&lpni->lpni_lock);
3868                         }
3869
3870                         /* Drop the ref taken by lnet_peer_ni_find_locked() */
3871                         lnet_peer_ni_decref_locked(lpni);
3872                         lnet_net_unlock(0);
3873                 } else {
3874                         spin_unlock(&lpni->lpni_lock);
3875                 }
3876         }
3877
3878         list_splice_init(&processed_list, &local_queue);
3879         lnet_net_lock(0);
3880         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3881         lnet_net_unlock(0);
3882
3883         return nlpnis;
3884 }
3885
3886 #define LNET_MAX_NNIDS 20
3887 /* @nids is array of nids that are in recovery. It has max size of
3888  * LNET_MAX_NNIDS.
3889  * @nnids is the total number of nids that are in recovery. It can be
3890  * larger than LNET_MAX_NNIDS.
3891  * @local tells us whether these are local or peer NIs in recovery.
3892  */
3893 static void
3894 lnet_print_recovery_list(struct lnet_nid *nids, unsigned int nnids,
3895                          bool local)
3896 {
3897         static bool printed;
3898         char *buf = NULL;
3899         char *tmp;
3900         int i;
3901         unsigned int arrsz;
3902         unsigned int bufsz;
3903
3904         if (!nnids)
3905                 return;
3906
3907         arrsz = nnids < LNET_MAX_NNIDS ? nnids : LNET_MAX_NNIDS;
3908
3909         /* Printing arrsz NIDs, each has max size LNET_NIDSTR_SIZE, a comma
3910          * and space for each nid after the first (2 * (arrsz - 1)),
3911          * + 1 for terminating null byte
3912          */
3913         bufsz = (arrsz * LNET_NIDSTR_SIZE) + (2 * (arrsz - 1)) + 1;
3914         LIBCFS_ALLOC(buf, bufsz);
3915         if (!buf) {
3916                 LCONSOLE(D_INFO, "%u %s NIs in recovery\n",
3917                          nnids, local ? "local" : "peer");
3918                 return;
3919         }
3920
3921         tmp = buf;
3922         tmp += sprintf(tmp, "%s", libcfs_nidstr(&nids[0]));
3923         for (i = 1; i < arrsz; i++)
3924                 tmp += sprintf(tmp, ", %s", libcfs_nidstr(&nids[i]));
3925
3926         /* LCONSOLE() used to avoid rate limiting when we have both local
3927          * and peer NIs in recovery
3928          */
3929         LCONSOLE(D_INFO, "%u %s NIs in recovery (showing %u): %s\n",
3930                  nnids, local ? "local" : "peer", arrsz, buf);
3931
3932         LIBCFS_FREE(buf, bufsz);
3933
3934         if (!printed && nnids > LNET_MAX_NNIDS) {
3935                 LCONSOLE(D_INFO, "See full list with 'lnetctl debug recovery -(p|l)'\n");
3936                 printed = true;
3937         }
3938 }
3939
3940 static void
3941 lnet_health_update_console(struct lnet_nid *lnids, unsigned int nnis,
3942                            struct lnet_nid *rnids, unsigned int nlpnis,
3943                            time64_t now)
3944 {
3945         static time64_t next_ni_update;
3946         static time64_t next_lpni_update;
3947         static time64_t next_msg_update;
3948         static unsigned int num_ni_updates;
3949         static unsigned int num_lpni_updates;
3950         static unsigned int num_msg_updates = 1;
3951         int late_count;
3952
3953         if (now >= next_ni_update) {
3954                 if (nnis) {
3955                         lnet_print_recovery_list(lnids, nnis, true);
3956                         if (num_ni_updates < 5)
3957                                 num_ni_updates++;
3958                         next_ni_update = now + (60 * num_ni_updates);
3959                 } else {
3960                         next_ni_update = 0;
3961                         num_ni_updates = 0;
3962                 }
3963         }
3964
3965
3966         if (now >= next_lpni_update) {
3967                 if (nlpnis) {
3968                         lnet_print_recovery_list(rnids, nlpnis, false);
3969                         if (num_lpni_updates < 5)
3970                                 num_lpni_updates++;
3971                         next_lpni_update = now + (60 * num_lpni_updates);
3972                 } else {
3973                         next_lpni_update = 0;
3974                         num_lpni_updates = 0;
3975                 }
3976         }
3977
3978         /* Let late_count accumulate for 60 seconds */
3979         if (unlikely(!next_msg_update))
3980                 next_msg_update = now + 60;
3981
3982         if (now >= next_msg_update) {
3983                 late_count = atomic_read(&the_lnet.ln_late_msg_count);
3984
3985                 if (late_count) {
3986                         s64 avg = atomic64_xchg(&the_lnet.ln_late_msg_nsecs, 0) /
3987                                   atomic_xchg(&the_lnet.ln_late_msg_count, 0);
3988
3989                         if (avg > NSEC_PER_SEC) {
3990                                 unsigned int avg_msec;
3991
3992                                 avg_msec = do_div(avg, NSEC_PER_SEC) /
3993                                                 NSEC_PER_MSEC;
3994                                 LCONSOLE_INFO("%u messages in past %us over their deadline by avg %lld.%03us\n",
3995                                               late_count,
3996                                               (60 * num_msg_updates), avg,
3997                                               avg_msec);
3998
3999                                 if (num_msg_updates < 5)
4000                                         num_msg_updates++;
4001                                 next_msg_update = now + (60 * num_msg_updates);
4002                         }
4003                 } else {
4004                         next_msg_update = now + 60;
4005                         num_msg_updates = 1;
4006                 }
4007         }
4008 }
4009
4010 static int
4011 lnet_monitor_thread(void *arg)
4012 {
4013         time64_t rsp_timeout = 0;
4014         time64_t now;
4015         unsigned int nnis;
4016         unsigned int nlpnis;
4017         struct lnet_nid local_nids[LNET_MAX_NNIDS];
4018         struct lnet_nid peer_nids[LNET_MAX_NNIDS];
4019
4020         wait_for_completion(&the_lnet.ln_started);
4021
4022         /*
4023          * The monitor thread takes care of the following:
4024          *  1. Checks the aliveness of routers
4025          *  2. Checks if there are messages on the resend queue to resend
4026          *     them.
4027          *  3. Checks if there are any NIs on the local recovery queue and
4028          *     pings them
4029          *  4. Checks if there are any NIs on the remote recovery queue
4030          *     and pings them.
4031          *  5. Updates the ping buffer if requested by LNDs upon interface
4032          *     state change
4033          */
4034         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
4035                 now = ktime_get_real_seconds();
4036
4037                 if (lnet_router_checker_active())
4038                         lnet_check_routers();
4039
4040                 lnet_resend_pending_msgs();
4041
4042                 if (now >= rsp_timeout) {
4043                         lnet_finalize_expired_responses();
4044                         rsp_timeout = now + (lnet_transaction_timeout / 2);
4045                 }
4046
4047                 nnis = lnet_recover_local_nis(local_nids, LNET_MAX_NNIDS);
4048                 nlpnis = lnet_recover_peer_nis(peer_nids, LNET_MAX_NNIDS);
4049                 lnet_health_update_console(local_nids, nnis, peer_nids, nlpnis,
4050                                            now);
4051
4052                 lnet_queue_ping_buffer_update();
4053
4054                 /*
4055                  * TODO do we need to check if we should sleep without
4056                  * timeout?  Technically, an active system will always
4057                  * have messages in flight so this check will always
4058                  * evaluate to false. And on an idle system do we care
4059                  * if we wake up every 1 second? Although, we've seen
4060                  * cases where we get a complaint that an idle thread
4061                  * is waking up unnecessarily.
4062                  */
4063                 wait_for_completion_interruptible_timeout(
4064                         &the_lnet.ln_mt_wait_complete,
4065                         cfs_time_seconds(1));
4066                 /* Must re-init the completion before testing anything,
4067                  * including ln_mt_state.
4068                  */
4069                 reinit_completion(&the_lnet.ln_mt_wait_complete);
4070         }
4071
4072         /* Shutting down */
4073         lnet_net_lock(LNET_LOCK_EX);
4074         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
4075         lnet_net_unlock(LNET_LOCK_EX);
4076
4077         /* signal that the monitor thread is exiting */
4078         up(&the_lnet.ln_mt_signal);
4079
4080         return 0;
4081 }
4082
4083 /*
4084  * lnet_send_ping
4085  * Sends a ping.
4086  * Returns == 0 if success
4087  * Returns > 0 if LNetMDBind or prior fails
4088  * Returns < 0 if LNetGet fails
4089  */
4090 int
4091 lnet_send_ping(struct lnet_nid *dest_nid,
4092                struct lnet_handle_md *mdh, int bytes,
4093                void *user_data, lnet_handler_t handler, bool recovery)
4094 {
4095         struct lnet_md md = { NULL };
4096         struct lnet_processid id;
4097         struct lnet_ping_buffer *pbuf;
4098         int rc;
4099
4100         if (LNET_NID_IS_ANY(dest_nid)) {
4101                 rc = -EHOSTUNREACH;
4102                 goto fail_error;
4103         }
4104
4105         pbuf = lnet_ping_buffer_alloc(bytes, GFP_NOFS);
4106         if (!pbuf) {
4107                 rc = ENOMEM;
4108                 goto fail_error;
4109         }
4110
4111         /* initialize md content */
4112         md.start     = &pbuf->pb_info;
4113         md.length    = bytes;
4114         md.threshold = 2; /* GET/REPLY */
4115         md.max_size  = 0;
4116         md.options   = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE;
4117         md.user_ptr  = user_data;
4118         md.handler   = handler;
4119
4120         rc = LNetMDBind(&md, LNET_UNLINK, mdh);
4121         if (rc) {
4122                 lnet_ping_buffer_decref(pbuf);
4123                 CERROR("Can't bind MD: %d\n", rc);
4124                 rc = -rc; /* change the rc to positive */
4125                 goto fail_error;
4126         }
4127         id.pid = LNET_PID_LUSTRE;
4128         id.nid = *dest_nid;
4129
4130         rc = LNetGet(NULL, *mdh, &id,
4131                      LNET_RESERVED_PORTAL,
4132                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
4133
4134         if (rc)
4135                 goto fail_unlink_md;
4136
4137         return 0;
4138
4139 fail_unlink_md:
4140         LNetMDUnlink(*mdh);
4141         LNetInvalidateMDHandle(mdh);
4142 fail_error:
4143         return rc;
4144 }
4145
4146 static void
4147 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
4148                            int status, bool send, bool unlink_event)
4149 {
4150         struct lnet_nid *nid = &ev_info->mt_nid;
4151
4152         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
4153                 struct lnet_ni *ni;
4154
4155                 lnet_net_lock(0);
4156                 ni = lnet_nid_to_ni_locked(nid, 0);
4157                 if (!ni) {
4158                         lnet_net_unlock(0);
4159                         return;
4160                 }
4161                 lnet_ni_lock(ni);
4162                 if (!send || (send && status != 0))
4163                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
4164                 if (status)
4165                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
4166                 lnet_ni_unlock(ni);
4167                 lnet_net_unlock(0);
4168
4169                 if (status != 0) {
4170                         CDEBUG(D_NET, "local NI (%s) recovery failed with %d\n",
4171                                libcfs_nidstr(nid), status);
4172                         return;
4173                 }
4174                 /*
4175                  * need to increment healthv for the ni here, because in
4176                  * the lnet_finalize() path we don't have access to this
4177                  * NI. And in order to get access to it, we'll need to
4178                  * carry forward too much information.
4179                  * In the peer case, it'll naturally be incremented
4180                  */
4181                 if (!unlink_event)
4182                         lnet_inc_healthv(&ni->ni_healthv,
4183                                          lnet_health_sensitivity);
4184         } else {
4185                 struct lnet_peer_ni *lpni;
4186                 int cpt;
4187
4188                 cpt = lnet_net_lock_current();
4189                 lpni = lnet_peer_ni_find_locked(nid);
4190                 if (!lpni) {
4191                         lnet_net_unlock(cpt);
4192                         return;
4193                 }
4194                 spin_lock(&lpni->lpni_lock);
4195                 if (!send || (send && status != 0))
4196                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
4197                 if (status)
4198                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
4199                 spin_unlock(&lpni->lpni_lock);
4200                 lnet_peer_ni_decref_locked(lpni);
4201                 lnet_net_unlock(cpt);
4202
4203                 if (status != 0)
4204                         CDEBUG(D_NET, "peer NI (%s) recovery failed with %d\n",
4205                                libcfs_nidstr(nid), status);
4206         }
4207 }
4208
4209 void
4210 lnet_mt_event_handler(struct lnet_event *event)
4211 {
4212         struct lnet_mt_event_info *ev_info = event->md_user_ptr;
4213         struct lnet_ping_buffer *pbuf;
4214
4215         /* TODO: remove assert */
4216         LASSERT(event->type == LNET_EVENT_REPLY ||
4217                 event->type == LNET_EVENT_SEND ||
4218                 event->type == LNET_EVENT_UNLINK);
4219
4220         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
4221                event->status);
4222
4223         switch (event->type) {
4224         case LNET_EVENT_UNLINK:
4225                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
4226                        libcfs_nidstr(&ev_info->mt_nid));
4227                 fallthrough;
4228         case LNET_EVENT_REPLY:
4229                 lnet_handle_recovery_reply(ev_info, event->status, false,
4230                                            event->type == LNET_EVENT_UNLINK);
4231                 break;
4232         case LNET_EVENT_SEND:
4233                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
4234                                libcfs_nidstr(&ev_info->mt_nid),
4235                                (event->status) ? "unsuccessfully" :
4236                                "successfully", event->status);
4237                 lnet_handle_recovery_reply(ev_info, event->status, true, false);
4238                 break;
4239         default:
4240                 CERROR("Unexpected event: %d\n", event->type);
4241                 break;
4242         }
4243         if (event->unlinked) {
4244                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
4245                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
4246                 lnet_ping_buffer_decref(pbuf);
4247         }
4248 }
4249
4250 static int
4251 lnet_rsp_tracker_create(void)
4252 {
4253         struct list_head **rstqs;
4254         rstqs = lnet_create_array_of_queues();
4255
4256         if (!rstqs)
4257                 return -ENOMEM;
4258
4259         the_lnet.ln_mt_rstq = rstqs;
4260
4261         return 0;
4262 }
4263
4264 static void
4265 lnet_rsp_tracker_clean(void)
4266 {
4267         lnet_finalize_expired_responses();
4268
4269         cfs_percpt_free(the_lnet.ln_mt_rstq);
4270         the_lnet.ln_mt_rstq = NULL;
4271 }
4272
4273 int lnet_monitor_thr_start(void)
4274 {
4275         int rc = 0;
4276         struct task_struct *task;
4277
4278         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
4279                 return -EALREADY;
4280
4281         rc = lnet_resendqs_create();
4282         if (rc)
4283                 return rc;
4284
4285         rc = lnet_rsp_tracker_create();
4286         if (rc)
4287                 goto clean_queues;
4288
4289         the_lnet.ln_pb_update_wq = alloc_workqueue("lnetpb_wq",
4290                                                    WQ_UNBOUND,
4291                                                    1);
4292         if (!the_lnet.ln_pb_update_wq) {
4293                 rc = -ENOMEM;
4294                 CERROR("Failed to allocate LNet ping buffer workqueue\n");
4295                 goto clean_queues;
4296         }
4297         atomic_set(&the_lnet.ln_pb_update_ready, 1);
4298
4299         sema_init(&the_lnet.ln_mt_signal, 0);
4300
4301         lnet_net_lock(LNET_LOCK_EX);
4302         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
4303         lnet_net_unlock(LNET_LOCK_EX);
4304         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
4305         if (IS_ERR(task)) {
4306                 rc = PTR_ERR(task);
4307                 CERROR("Can't start monitor thread: %d\n", rc);
4308                 goto clean_thread;
4309         }
4310
4311         return 0;
4312
4313 clean_thread:
4314         lnet_net_lock(LNET_LOCK_EX);
4315         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4316         lnet_net_unlock(LNET_LOCK_EX);
4317         /* block until event callback signals exit */
4318         down(&the_lnet.ln_mt_signal);
4319         /* clean up */
4320         lnet_net_lock(LNET_LOCK_EX);
4321         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
4322         lnet_net_unlock(LNET_LOCK_EX);
4323         lnet_rsp_tracker_clean();
4324         lnet_clean_local_ni_recoveryq();
4325         lnet_clean_peer_ni_recoveryq();
4326         lnet_clean_resendqs();
4327         the_lnet.ln_mt_handler = NULL;
4328         return rc;
4329 clean_queues:
4330         lnet_rsp_tracker_clean();
4331         lnet_clean_local_ni_recoveryq();
4332         lnet_clean_peer_ni_recoveryq();
4333         lnet_clean_resendqs();
4334         return rc;
4335 }
4336
4337 void lnet_monitor_thr_stop(void)
4338 {
4339         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
4340                 return;
4341
4342         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
4343
4344         /* clean up the ping buffer update workqueue before telling
4345          * the monitor thread to shut down to avoid getting stuck
4346          * on pending messages
4347          */
4348         mutex_unlock(&the_lnet.ln_api_mutex);
4349         flush_workqueue(the_lnet.ln_pb_update_wq);
4350         destroy_workqueue(the_lnet.ln_pb_update_wq);
4351         atomic_set(&the_lnet.ln_pb_update_ready, 0);
4352         the_lnet.ln_pb_update_wq = NULL;
4353         mutex_lock(&the_lnet.ln_api_mutex);
4354
4355         lnet_net_lock(LNET_LOCK_EX);
4356         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4357         lnet_net_unlock(LNET_LOCK_EX);
4358
4359         /* tell the monitor thread that we're shutting down */
4360         complete(&the_lnet.ln_mt_wait_complete);
4361
4362         /* block until monitor thread signals that it's done */
4363         mutex_unlock(&the_lnet.ln_api_mutex);
4364         down(&the_lnet.ln_mt_signal);
4365         mutex_lock(&the_lnet.ln_api_mutex);
4366         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
4367
4368         /* perform cleanup tasks */
4369         lnet_rsp_tracker_clean();
4370         lnet_clean_local_ni_recoveryq();
4371         lnet_clean_peer_ni_recoveryq();
4372         lnet_clean_resendqs();
4373 }
4374
4375 void
4376 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
4377                   __u32 msg_type)
4378 {
4379         lnet_net_lock(cpt);
4380         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
4381         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4382         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
4383         lnet_net_unlock(cpt);
4384
4385         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
4386 }
4387
4388 static void
4389 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
4390 {
4391         struct lnet_hdr *hdr = &msg->msg_hdr;
4392
4393         if (msg->msg_wanted != 0)
4394                 lnet_setpayloadbuffer(msg);
4395
4396         lnet_build_msg_event(msg, LNET_EVENT_PUT);
4397
4398         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
4399          * it back into the ACK during lnet_finalize() */
4400         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
4401                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
4402
4403         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
4404                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
4405 }
4406
4407 static int
4408 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
4409 {
4410         struct lnet_hdr         *hdr = &msg->msg_hdr;
4411         struct lnet_match_info  info;
4412         int                     rc;
4413         bool                    ready_delay;
4414
4415         /* Convert put fields to host byte order */
4416         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
4417         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
4418         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
4419
4420         /* Primary peer NID. */
4421         info.mi_id.nid = msg->msg_initiator;
4422         info.mi_id.pid  = hdr->src_pid;
4423         info.mi_opc     = LNET_MD_OP_PUT;
4424         info.mi_portal  = hdr->msg.put.ptl_index;
4425         info.mi_rlength = hdr->payload_length;
4426         info.mi_roffset = hdr->msg.put.offset;
4427         info.mi_mbits   = hdr->msg.put.match_bits;
4428         info.mi_cpt     = lnet_nid2cpt(&msg->msg_initiator, ni);
4429
4430         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
4431         ready_delay = msg->msg_rx_ready_delay;
4432
4433  again:
4434         rc = lnet_ptl_match_md(&info, msg);
4435         switch (rc) {
4436         default:
4437                 LBUG();
4438
4439         case LNET_MATCHMD_OK:
4440                 lnet_recv_put(ni, msg);
4441                 return 0;
4442
4443         case LNET_MATCHMD_NONE:
4444                 if (ready_delay)
4445                         /* no eager_recv or has already called it, should
4446                          * have been attached on delayed list */
4447                         return 0;
4448
4449                 rc = lnet_ni_eager_recv(ni, msg);
4450                 if (rc == 0) {
4451                         ready_delay = true;
4452                         goto again;
4453                 }
4454                 fallthrough;
4455
4456         case LNET_MATCHMD_DROP:
4457                 CNETERR("Dropping PUT from %s portal %d match %llu"
4458                         " offset %d length %d: %d\n",
4459                         libcfs_idstr(&info.mi_id), info.mi_portal,
4460                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
4461
4462                 return -ENOENT; /* -ve: OK but no match */
4463         }
4464 }
4465
4466 static int
4467 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
4468 {
4469         struct lnet_match_info info;
4470         struct lnet_hdr *hdr = &msg->msg_hdr;
4471         struct lnet_processid source_id;
4472         struct lnet_handle_wire reply_wmd;
4473         int rc;
4474
4475         /* Convert get fields to host byte order */
4476         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
4477         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
4478         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
4479         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
4480
4481         source_id.nid = hdr->src_nid;
4482         source_id.pid = hdr->src_pid;
4483         /* Primary peer NID */
4484         info.mi_id.nid  = msg->msg_initiator;
4485         info.mi_id.pid  = hdr->src_pid;
4486         info.mi_opc     = LNET_MD_OP_GET;
4487         info.mi_portal  = hdr->msg.get.ptl_index;
4488         info.mi_rlength = hdr->msg.get.sink_length;
4489         info.mi_roffset = hdr->msg.get.src_offset;
4490         info.mi_mbits   = hdr->msg.get.match_bits;
4491         info.mi_cpt     = lnet_nid2cpt(&msg->msg_initiator, ni);
4492
4493         rc = lnet_ptl_match_md(&info, msg);
4494         if (rc == LNET_MATCHMD_DROP) {
4495                 CNETERR("Dropping GET from %s portal %d match %llu"
4496                         " offset %d length %d\n",
4497                         libcfs_idstr(&info.mi_id), info.mi_portal,
4498                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
4499                 return -ENOENT; /* -ve: OK but no match */
4500         }
4501
4502         LASSERT(rc == LNET_MATCHMD_OK);
4503
4504         lnet_build_msg_event(msg, LNET_EVENT_GET);
4505
4506         reply_wmd = hdr->msg.get.return_wmd;
4507
4508         lnet_prep_send(msg, LNET_MSG_REPLY, &source_id,
4509                        msg->msg_offset, msg->msg_wanted);
4510
4511         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
4512
4513         if (rdma_get) {
4514                 /* The LND completes the REPLY from her recv procedure */
4515                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
4516                              msg->msg_offset, msg->msg_len, msg->msg_len);
4517                 return 0;
4518         }
4519
4520         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
4521         msg->msg_receiving = 0;
4522
4523         rc = lnet_send(&ni->ni_nid, msg, &msg->msg_from);
4524         if (rc < 0) {
4525                 /* didn't get as far as lnet_ni_send() */
4526                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
4527                        libcfs_nidstr(&ni->ni_nid),
4528                        libcfs_idstr(&info.mi_id), rc);
4529
4530                 lnet_finalize(msg, rc);
4531         }
4532
4533         return 0;
4534 }
4535
4536 static int
4537 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
4538 {
4539         void *private = msg->msg_private;
4540         struct lnet_hdr *hdr = &msg->msg_hdr;
4541         struct lnet_processid src = {};
4542         struct lnet_libmd *md;
4543         unsigned int rlength;
4544         unsigned int mlength;
4545         int cpt;
4546
4547         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4548         lnet_res_lock(cpt);
4549
4550         src.nid = hdr->src_nid;
4551         src.pid = hdr->src_pid;
4552
4553         /* NB handles only looked up by creator (no flips) */
4554         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4555         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4556                 CNETERR("%s: Dropping REPLY from %s for %s "
4557                         "MD %#llx.%#llx\n",
4558                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4559                         (md == NULL) ? "invalid" : "inactive",
4560                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4561                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4562                 if (md != NULL && md->md_me != NULL)
4563                         CERROR("REPLY MD also attached to portal %d\n",
4564                                md->md_me->me_portal);
4565
4566                 lnet_res_unlock(cpt);
4567                 return -ENOENT; /* -ve: OK but no match */
4568         }
4569
4570         LASSERT(md->md_offset == 0);
4571
4572         rlength = hdr->payload_length;
4573         mlength = min(rlength, md->md_length);
4574
4575         if (mlength < rlength &&
4576             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4577                 CNETERR("%s: Dropping REPLY from %s length %d "
4578                         "for MD %#llx would overflow (%d)\n",
4579                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4580                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4581                         mlength);
4582                 lnet_res_unlock(cpt);
4583                 return -ENOENT; /* -ve: OK but no match */
4584         }
4585
4586         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4587                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4588                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4589
4590         lnet_msg_attach_md(msg, md, 0, mlength);
4591
4592         if (mlength != 0)
4593                 lnet_setpayloadbuffer(msg);
4594
4595         lnet_res_unlock(cpt);
4596
4597         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4598
4599         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4600         return 0;
4601 }
4602
4603 static int
4604 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4605 {
4606         struct lnet_hdr *hdr = &msg->msg_hdr;
4607         struct lnet_processid src = {};
4608         struct lnet_libmd *md;
4609         int cpt;
4610
4611         src.nid = hdr->src_nid;
4612         src.pid = hdr->src_pid;
4613
4614         /* Convert ack fields to host byte order */
4615         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4616         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4617
4618         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4619         lnet_res_lock(cpt);
4620
4621         /* NB handles only looked up by creator (no flips) */
4622         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4623         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4624                 /* Don't moan; this is expected */
4625                 CDEBUG(D_NET,
4626                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4627                        libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4628                        (md == NULL) ? "invalid" : "inactive",
4629                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4630                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4631                 if (md != NULL && md->md_me != NULL)
4632                         CERROR("Source MD also attached to portal %d\n",
4633                                md->md_me->me_portal);
4634
4635                 lnet_res_unlock(cpt);
4636                 return -ENOENT;                  /* -ve! */
4637         }
4638
4639         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4640                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src),
4641                hdr->msg.ack.dst_wmd.wh_object_cookie);
4642
4643         lnet_msg_attach_md(msg, md, 0, 0);
4644
4645         lnet_res_unlock(cpt);
4646
4647         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4648
4649         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4650         return 0;
4651 }
4652
4653 /**
4654  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4655  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4656  * \retval -ve                  error code
4657  */
4658 int
4659 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4660 {
4661         int     rc = 0;
4662
4663         if (!the_lnet.ln_routing)
4664                 return -ECANCELED;
4665
4666         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4667             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4668                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4669                         msg->msg_rx_ready_delay = 1;
4670                 } else {
4671                         lnet_net_unlock(msg->msg_rx_cpt);
4672                         rc = lnet_ni_eager_recv(ni, msg);
4673                         lnet_net_lock(msg->msg_rx_cpt);
4674                 }
4675         }
4676
4677         if (rc == 0)
4678                 rc = lnet_post_routed_recv_locked(msg, 0);
4679         return rc;
4680 }
4681
4682 int
4683 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4684 {
4685         int     rc;
4686
4687         switch (msg->msg_type) {
4688         case LNET_MSG_ACK:
4689                 rc = lnet_parse_ack(ni, msg);
4690                 break;
4691         case LNET_MSG_PUT:
4692                 rc = lnet_parse_put(ni, msg);
4693                 break;
4694         case LNET_MSG_GET:
4695                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4696                 break;
4697         case LNET_MSG_REPLY:
4698                 rc = lnet_parse_reply(ni, msg);
4699                 break;
4700         default: /* prevent an unused label if !kernel */
4701                 LASSERT(0);
4702                 return -EPROTO;
4703         }
4704
4705         LASSERT(rc == 0 || rc == -ENOENT);
4706         return rc;
4707 }
4708
4709 char *
4710 lnet_msgtyp2str (int type)
4711 {
4712         switch (type) {
4713         case LNET_MSG_ACK:
4714                 return ("ACK");
4715         case LNET_MSG_PUT:
4716                 return ("PUT");
4717         case LNET_MSG_GET:
4718                 return ("GET");
4719         case LNET_MSG_REPLY:
4720                 return ("REPLY");
4721         case LNET_MSG_HELLO:
4722                 return ("HELLO");
4723         default:
4724                 return ("<UNKNOWN>");
4725         }
4726 }
4727 EXPORT_SYMBOL(lnet_msgtyp2str);
4728
4729 int
4730 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr,
4731            struct lnet_nid *from_nid, void *private, int rdma_req)
4732 {
4733         struct lnet_peer_ni *lpni;
4734         struct lnet_msg *msg;
4735         __u32 payload_length;
4736         lnet_pid_t dest_pid;
4737         struct lnet_nid dest_nid;
4738         struct lnet_nid src_nid;
4739         bool push = false;
4740         int for_me;
4741         __u32 type;
4742         int rc = 0;
4743         int cpt;
4744         time64_t now = ktime_get_seconds();
4745
4746         LASSERT (!in_interrupt ());
4747
4748         type = hdr->type;
4749         src_nid = hdr->src_nid;
4750         dest_nid = hdr->dest_nid;
4751         dest_pid = hdr->dest_pid;
4752         payload_length = hdr->payload_length;
4753
4754         for_me = nid_same(&ni->ni_nid, &dest_nid);
4755         cpt = lnet_nid2cpt(from_nid, ni);
4756
4757         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4758                 libcfs_nidstr(&dest_nid),
4759                 libcfs_nidstr(&ni->ni_nid),
4760                 libcfs_nidstr(&src_nid),
4761                 lnet_msgtyp2str(type),
4762                 (for_me) ? "for me" : "routed");
4763
4764         switch (type) {
4765         case LNET_MSG_ACK:
4766         case LNET_MSG_GET:
4767                 if (payload_length > 0) {
4768                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4769                                libcfs_nidstr(from_nid),
4770                                libcfs_nidstr(&src_nid),
4771                                lnet_msgtyp2str(type), payload_length);
4772                         return -EPROTO;
4773                 }
4774                 break;
4775
4776         case LNET_MSG_PUT:
4777         case LNET_MSG_REPLY:
4778                 if (payload_length >
4779                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4780                         CERROR("%s, src %s: bad %s payload %d "
4781                                "(%d max expected)\n",
4782                                libcfs_nidstr(from_nid),
4783                                libcfs_nidstr(&src_nid),
4784                                lnet_msgtyp2str(type),
4785                                payload_length,
4786                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4787                         return -EPROTO;
4788                 }
4789                 break;
4790
4791         default:
4792                 CERROR("%s, src %s: Bad message type 0x%x\n",
4793                        libcfs_nidstr(from_nid),
4794                        libcfs_nidstr(&src_nid), type);
4795                 return -EPROTO;
4796         }
4797
4798         /* Only update net_last_alive for incoming GETs on the reserved portal
4799          * (i.e. incoming lnet/discovery pings).
4800          * This avoids situations where the router's own traffic results in NI
4801          * status changes
4802          */
4803         if (the_lnet.ln_routing && type == LNET_MSG_GET &&
4804             hdr->msg.get.ptl_index == LNET_RESERVED_PORTAL &&
4805             !lnet_islocalnid(&src_nid) &&
4806             ni->ni_net->net_last_alive != now) {
4807                 lnet_ni_lock(ni);
4808                 spin_lock(&ni->ni_net->net_lock);
4809                 ni->ni_net->net_last_alive = now;
4810                 spin_unlock(&ni->ni_net->net_lock);
4811                 push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP);
4812                 lnet_ni_unlock(ni);
4813         }
4814
4815         if (push)
4816                 lnet_push_update_to_peers(1);
4817
4818         /* Regard a bad destination NID as a protocol error.  Senders should
4819          * know what they're doing; if they don't they're misconfigured, buggy
4820          * or malicious so we chop them off at the knees :) */
4821
4822         if (!for_me) {
4823                 if (LNET_NID_NET(&dest_nid) == LNET_NID_NET(&ni->ni_nid)) {
4824                         /* should have gone direct */
4825                         CERROR("%s, src %s: Bad dest nid %s "
4826                                "(should have been sent direct)\n",
4827                                 libcfs_nidstr(from_nid),
4828                                 libcfs_nidstr(&src_nid),
4829                                 libcfs_nidstr(&dest_nid));
4830                         return -EPROTO;
4831                 }
4832
4833                 if (lnet_islocalnid(&dest_nid)) {
4834                         /* dest is another local NI; sender should have used
4835                          * this node's NID on its own network */
4836                         CERROR("%s, src %s: Bad dest nid %s "
4837                                "(it's my nid but on a different network)\n",
4838                                 libcfs_nidstr(from_nid),
4839                                 libcfs_nidstr(&src_nid),
4840                                 libcfs_nidstr(&dest_nid));
4841                         return -EPROTO;
4842                 }
4843
4844                 if (rdma_req && type == LNET_MSG_GET) {
4845                         CERROR("%s, src %s: Bad optimized GET for %s "
4846                                "(final destination must be me)\n",
4847                                 libcfs_nidstr(from_nid),
4848                                 libcfs_nidstr(&src_nid),
4849                                 libcfs_nidstr(&dest_nid));
4850                         return -EPROTO;
4851                 }
4852
4853                 if (!the_lnet.ln_routing) {
4854                         CERROR("%s, src %s: Dropping message for %s "
4855                                "(routing not enabled)\n",
4856                                 libcfs_nidstr(from_nid),
4857                                 libcfs_nidstr(&src_nid),
4858                                 libcfs_nidstr(&dest_nid));
4859                         goto drop;
4860                 }
4861         }
4862
4863         /* Message looks OK; we're not going to return an error, so we MUST
4864          * call back lnd_recv() come what may... */
4865
4866         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4867             fail_peer(&src_nid, 0)) {                   /* shall we now? */
4868                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4869                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4870                        lnet_msgtyp2str(type));
4871                 goto drop;
4872         }
4873
4874         if (!list_empty(&the_lnet.ln_drop_rules) &&
4875             lnet_drop_rule_match(hdr, &ni->ni_nid, NULL)) {
4876                 CDEBUG(D_NET,
4877                        "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
4878                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4879                        libcfs_nidstr(&dest_nid), lnet_msgtyp2str(type));
4880                 goto drop;
4881         }
4882
4883         msg = lnet_msg_alloc();
4884         if (msg == NULL) {
4885                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4886                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4887                        lnet_msgtyp2str(type));
4888                 goto drop;
4889         }
4890
4891         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4892          * pointers NULL etc */
4893
4894         msg->msg_type = type;
4895         msg->msg_private = private;
4896         msg->msg_receiving = 1;
4897         msg->msg_rdma_get = rdma_req;
4898         msg->msg_len = msg->msg_wanted = payload_length;
4899         msg->msg_offset = 0;
4900         msg->msg_hdr = *hdr;
4901         /* for building message event */
4902         msg->msg_from = *from_nid;
4903         if (!for_me) {
4904                 msg->msg_target.pid = dest_pid;
4905                 msg->msg_target.nid = dest_nid;
4906                 msg->msg_routing = 1;
4907         }
4908
4909         lnet_net_lock(cpt);
4910         lpni = lnet_peerni_by_nid_locked(from_nid, &ni->ni_nid, cpt);
4911         if (IS_ERR(lpni)) {
4912                 lnet_net_unlock(cpt);
4913                 rc = PTR_ERR(lpni);
4914                 CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
4915                        libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid),
4916                        lnet_msgtyp2str(type), rc);
4917                 lnet_msg_free(msg);
4918                 if (rc == -ESHUTDOWN)
4919                         /* We are shutting down.  Don't do anything more */
4920                         return rc;
4921                 goto drop;
4922         }
4923
4924         /* If this message was forwarded to us from a router then we may need
4925          * to update router aliveness or check for an asymmetrical route
4926          * (or both)
4927          */
4928         if (((lnet_drop_asym_route && for_me) ||
4929              !lpni->lpni_peer_net->lpn_peer->lp_alive) &&
4930             LNET_NID_NET(&src_nid) != LNET_NID_NET(from_nid)) {
4931                 __u32 src_net_id = LNET_NID_NET(&src_nid);
4932                 struct lnet_peer *gw = lpni->lpni_peer_net->lpn_peer;
4933                 struct lnet_route *route;
4934                 bool found = false;
4935
4936                 list_for_each_entry(route, &gw->lp_routes, lr_gwlist) {
4937                         if (route->lr_net == src_net_id) {
4938                                 found = true;
4939                                 /* If we're transitioning the gateway from
4940                                  * dead -> alive, and discovery is disabled
4941                                  * locally or on the gateway, then we need to
4942                                  * update the cached route aliveness for each
4943                                  * route to the src_nid's net.
4944                                  *
4945                                  * Otherwise, we're only checking for
4946                                  * symmetrical route, and we can break the
4947                                  * loop
4948                                  */
4949                                 if (!gw->lp_alive &&
4950                                     lnet_is_discovery_disabled(gw))
4951                                         lnet_set_route_aliveness(route, true);
4952                                 else
4953                                         break;
4954                         }
4955                 }
4956                 if (lnet_drop_asym_route && for_me && !found) {
4957                         /* Drop ref taken by lnet_nid2peerni_locked() */
4958                         lnet_peer_ni_decref_locked(lpni);
4959                         lnet_net_unlock(cpt);
4960                         /* we would not use from_nid to route a message to
4961                          * src_nid
4962                          * => asymmetric routing detected but forbidden
4963                          */
4964                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4965                                libcfs_nidstr(from_nid),
4966                                libcfs_nidstr(&src_nid), lnet_msgtyp2str(type));
4967                         lnet_msg_free(msg);
4968                         goto drop;
4969                 }
4970                 if (!gw->lp_alive) {
4971                         struct lnet_peer_net *lpn;
4972                         struct lnet_peer_ni *lpni2;
4973
4974                         gw->lp_alive = true;
4975                         /* Mark all remote NIs on src_nid's net UP */
4976                         lpn = lnet_peer_get_net_locked(gw, src_net_id);
4977                         if (lpn)
4978                                 list_for_each_entry(lpni2, &lpn->lpn_peer_nis,
4979                                                     lpni_peer_nis)
4980                                         lpni2->lpni_ns_status = LNET_NI_STATUS_UP;
4981                 }
4982         }
4983
4984         lpni->lpni_last_alive = now;
4985
4986         msg->msg_rxpeer = lpni;
4987         msg->msg_rxni = ni;
4988         lnet_ni_addref_locked(ni, cpt);
4989         /* Multi-Rail: Primary NID of source. */
4990         lnet_peer_primary_nid_locked(&src_nid, &msg->msg_initiator);
4991
4992         /*
4993          * mark the status of this lpni as UP since we received a message
4994          * from it. The ping response reports back the ns_status which is
4995          * marked on the remote as up or down and we cache it here.
4996          */
4997         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
4998
4999         lnet_msg_commit(msg, cpt);
5000
5001         /* message delay simulation */
5002         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
5003                      lnet_delay_rule_match_locked(hdr, msg))) {
5004                 lnet_net_unlock(cpt);
5005                 return 0;
5006         }
5007
5008         if (!for_me) {
5009                 rc = lnet_parse_forward_locked(ni, msg);
5010                 lnet_net_unlock(cpt);
5011
5012                 if (rc < 0)
5013                         goto free_drop;
5014
5015                 if (rc == LNET_CREDIT_OK) {
5016                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
5017                                      0, payload_length, payload_length);
5018                 }
5019                 return 0;
5020         }
5021
5022         lnet_net_unlock(cpt);
5023
5024         rc = lnet_parse_local(ni, msg);
5025         if (rc != 0)
5026                 goto free_drop;
5027         return 0;
5028
5029  free_drop:
5030         LASSERT(msg->msg_md == NULL);
5031         lnet_finalize(msg, rc);
5032
5033  drop:
5034         lnet_drop_message(ni, cpt, private, payload_length, type);
5035         return 0;
5036 }
5037 EXPORT_SYMBOL(lnet_parse);
5038
5039 void
5040 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
5041 {
5042         struct lnet_msg *msg;
5043
5044         while ((msg = list_first_entry_or_null(head, struct lnet_msg,
5045                                                msg_list)) != NULL) {
5046                 struct lnet_processid id = {};
5047
5048                 list_del(&msg->msg_list);
5049
5050                 id.nid = msg->msg_hdr.src_nid;
5051                 id.pid = msg->msg_hdr.src_pid;
5052
5053                 LASSERT(msg->msg_md == NULL);
5054                 LASSERT(msg->msg_rx_delayed);
5055                 LASSERT(msg->msg_rxpeer != NULL);
5056                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
5057
5058                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
5059                       " offset %d length %d: %s\n",
5060                       libcfs_idstr(&id),
5061                       msg->msg_hdr.msg.put.ptl_index,
5062                       msg->msg_hdr.msg.put.match_bits,
5063                       msg->msg_hdr.msg.put.offset,
5064                       msg->msg_hdr.payload_length, reason);
5065
5066                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
5067                  * called lnet_drop_message(), so I just hang onto msg as well
5068                  * until that's done */
5069
5070                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
5071                                   msg->msg_private, msg->msg_len,
5072                                   msg->msg_type);
5073
5074                 msg->msg_no_resend = true;
5075                 /*
5076                  * NB: message will not generate event because w/o attached MD,
5077                  * but we still should give error code so lnet_msg_decommit()
5078                  * can skip counters operations and other checks.
5079                  */
5080                 lnet_finalize(msg, -ENOENT);
5081         }
5082 }
5083
5084 void
5085 lnet_recv_delayed_msg_list(struct list_head *head)
5086 {
5087         struct lnet_msg *msg;
5088
5089         while ((msg = list_first_entry_or_null(head, struct lnet_msg,
5090                                                msg_list)) != NULL) {
5091                 struct lnet_processid id;
5092
5093                 list_del(&msg->msg_list);
5094
5095                 /* md won't disappear under me, since each msg
5096                  * holds a ref on it */
5097
5098                 id.nid = msg->msg_hdr.src_nid;
5099                 id.pid = msg->msg_hdr.src_pid;
5100
5101                 LASSERT(msg->msg_rx_delayed);
5102                 LASSERT(msg->msg_md != NULL);
5103                 LASSERT(msg->msg_rxpeer != NULL);
5104                 LASSERT(msg->msg_rxni != NULL);
5105                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
5106
5107                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
5108                        "match %llu offset %d length %d.\n",
5109                         libcfs_idstr(&id), msg->msg_hdr.msg.put.ptl_index,
5110                         msg->msg_hdr.msg.put.match_bits,
5111                         msg->msg_hdr.msg.put.offset,
5112                         msg->msg_hdr.payload_length);
5113
5114                 lnet_recv_put(msg->msg_rxni, msg);
5115         }
5116 }
5117
5118 static void
5119 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
5120                         struct lnet_libmd *md, struct lnet_handle_md mdh)
5121 {
5122         s64 timeout_ns;
5123         struct lnet_rsp_tracker *local_rspt;
5124
5125         /*
5126          * MD has a refcount taken by message so it's not going away.
5127          * The MD however can be looked up. We need to secure the access
5128          * to the md_rspt_ptr by taking the res_lock.
5129          * The rspt can be accessed without protection up to when it gets
5130          * added to the list.
5131          */
5132
5133         lnet_res_lock(cpt);
5134         local_rspt = md->md_rspt_ptr;
5135         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
5136         if (local_rspt != NULL) {
5137                 /*
5138                  * we already have an rspt attached to the md, so we'll
5139                  * update the deadline on that one.
5140                  */
5141                 lnet_rspt_free(rspt, cpt);
5142         } else {
5143                 /* new md */
5144                 rspt->rspt_mdh = mdh;
5145                 rspt->rspt_cpt = cpt;
5146                 /* store the rspt so we can access it when we get the REPLY */
5147                 md->md_rspt_ptr = rspt;
5148                 local_rspt = rspt;
5149         }
5150         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
5151
5152         /*
5153          * add to the list of tracked responses. It's added to tail of the
5154          * list in order to expire all the older entries first.
5155          */
5156         lnet_net_lock(cpt);
5157         list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
5158         lnet_net_unlock(cpt);
5159         lnet_res_unlock(cpt);
5160 }
5161
5162 /**
5163  * Initiate an asynchronous PUT operation.
5164  *
5165  * There are several events associated with a PUT: completion of the send on
5166  * the initiator node (LNET_EVENT_SEND), and when the send completes
5167  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
5168  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
5169  * used at the target node to indicate the completion of incoming data
5170  * delivery.
5171  *
5172  * The local events will be logged in the EQ associated with the MD pointed to
5173  * by \a mdh handle. Using a MD without an associated EQ results in these
5174  * events being discarded. In this case, the caller must have another
5175  * mechanism (e.g., a higher level protocol) for determining when it is safe
5176  * to modify the memory region associated with the MD.
5177  *
5178  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
5179  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
5180  *
5181  * \param self Indicates the NID of a local interface through which to send
5182  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
5183  * \param mdh A handle for the MD that describes the memory to be sent. The MD
5184  * must be "free floating" (See LNetMDBind()).
5185  * \param ack Controls whether an acknowledgment is requested.
5186  * Acknowledgments are only sent when they are requested by the initiating
5187  * process and the target MD enables them.
5188  * \param target A process identifier for the target process.
5189  * \param portal The index in the \a target's portal table.
5190  * \param match_bits The match bits to use for MD selection at the target
5191  * process.
5192  * \param offset The offset into the target MD (only used when the target
5193  * MD has the LNET_MD_MANAGE_REMOTE option set).
5194  * \param hdr_data 64 bits of user data that can be included in the message
5195  * header. This data is written to an event queue entry at the target if an
5196  * EQ is present on the matching MD.
5197  *
5198  * \retval  0      Success, and only in this case events will be generated
5199  * and logged to EQ (if it exists).
5200  * \retval -EIO    Simulated failure.
5201  * \retval -ENOMEM Memory allocation failure.
5202  * \retval -ENOENT Invalid MD object.
5203  *
5204  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
5205  */
5206 int
5207 LNetPut(struct lnet_nid *self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
5208         struct lnet_processid *target, unsigned int portal,
5209         __u64 match_bits, unsigned int offset,
5210         __u64 hdr_data)
5211 {
5212         struct lnet_msg *msg;
5213         struct lnet_libmd *md;
5214         int cpt;
5215         int rc;
5216         struct lnet_rsp_tracker *rspt = NULL;
5217
5218         LASSERT(the_lnet.ln_refcount > 0);
5219
5220         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5221             fail_peer(&target->nid, 1)) {               /* shall we now? */
5222                 CERROR("Dropping PUT to %s: simulated failure\n",
5223                        libcfs_idstr(target));
5224                 return -EIO;
5225         }
5226
5227         msg = lnet_msg_alloc();
5228         if (msg == NULL) {
5229                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
5230                        libcfs_idstr(target));
5231                 return -ENOMEM;
5232         }
5233         msg->msg_vmflush = !!(current->flags & PF_MEMALLOC);
5234
5235         cpt = lnet_cpt_of_cookie(mdh.cookie);
5236
5237         if (ack == LNET_ACK_REQ) {
5238                 rspt = lnet_rspt_alloc(cpt);
5239                 if (!rspt) {
5240                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
5241                                 libcfs_idstr(target));
5242                         lnet_msg_free(msg);
5243                         return -ENOMEM;
5244                 }
5245                 INIT_LIST_HEAD(&rspt->rspt_on_list);
5246         }
5247
5248         lnet_res_lock(cpt);
5249
5250         md = lnet_handle2md(&mdh);
5251         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5252                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
5253                        match_bits, portal, libcfs_idstr(target),
5254                        md == NULL ? -1 : md->md_threshold);
5255                 if (md != NULL && md->md_me != NULL)
5256                         CERROR("Source MD also attached to portal %d\n",
5257                                md->md_me->me_portal);
5258                 lnet_res_unlock(cpt);
5259
5260                 if (rspt)
5261                         lnet_rspt_free(rspt, cpt);
5262
5263                 lnet_msg_free(msg);
5264                 return -ENOENT;
5265         }
5266
5267         CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_idstr(target));
5268
5269         lnet_msg_attach_md(msg, md, 0, 0);
5270
5271         lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
5272
5273         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
5274         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
5275         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
5276         msg->msg_hdr.msg.put.hdr_data = hdr_data;
5277
5278         /* NB handles only looked up by creator (no flips) */
5279         if (ack == LNET_ACK_REQ) {
5280                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5281                         the_lnet.ln_interface_cookie;
5282                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5283                         md->md_lh.lh_cookie;
5284         } else {
5285                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5286                         LNET_WIRE_HANDLE_COOKIE_NONE;
5287                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5288                         LNET_WIRE_HANDLE_COOKIE_NONE;
5289         }
5290
5291         lnet_res_unlock(cpt);
5292
5293         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5294
5295         if (rspt && lnet_response_tracking_enabled(LNET_MSG_PUT,
5296                                                    md->md_options))
5297                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5298         else if (rspt)
5299                 lnet_rspt_free(rspt, cpt);
5300
5301         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
5302                                  CFS_FAIL_ONCE))
5303                 rc = -EIO;
5304         else
5305                 rc = lnet_send(self, msg, NULL);
5306
5307         if (rc != 0) {
5308                 CNETERR("Error sending PUT to %s: %d\n",
5309                         libcfs_idstr(target), rc);
5310                 msg->msg_no_resend = true;
5311                 lnet_finalize(msg, rc);
5312         }
5313
5314         /* completion will be signalled by an event */
5315         return 0;
5316 }
5317 EXPORT_SYMBOL(LNetPut);
5318
5319 /*
5320  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
5321  * returns a msg for the LND to pass to lnet_finalize() when the sink
5322  * data has been received.
5323  *
5324  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
5325  * lnet_finalize() is called on it, so the LND must call this first
5326  */
5327 struct lnet_msg *
5328 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
5329 {
5330         struct lnet_msg *msg = lnet_msg_alloc();
5331         struct lnet_libmd *getmd = getmsg->msg_md;
5332         struct lnet_processid *peer_id = &getmsg->msg_target;
5333         int cpt;
5334
5335         LASSERT(!getmsg->msg_target_is_router);
5336         LASSERT(!getmsg->msg_routing);
5337
5338         if (msg == NULL) {
5339                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
5340                        libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id));
5341                 goto drop;
5342         }
5343
5344         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
5345         lnet_res_lock(cpt);
5346
5347         LASSERT(getmd->md_refcount > 0);
5348
5349         if (getmd->md_threshold == 0) {
5350                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
5351                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id),
5352                         getmd);
5353                 lnet_res_unlock(cpt);
5354                 goto drop;
5355         }
5356
5357         LASSERT(getmd->md_offset == 0);
5358
5359         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
5360                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id), getmd);
5361
5362         /* setup information for lnet_build_msg_event */
5363         msg->msg_initiator =
5364                 getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
5365         msg->msg_from = peer_id->nid;
5366         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
5367         msg->msg_hdr.src_nid = peer_id->nid;
5368         msg->msg_hdr.payload_length = getmd->md_length;
5369         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
5370
5371         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
5372         lnet_res_unlock(cpt);
5373
5374         cpt = lnet_nid2cpt(&peer_id->nid, ni);
5375
5376         lnet_net_lock(cpt);
5377         lnet_msg_commit(msg, cpt);
5378         lnet_net_unlock(cpt);
5379
5380         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
5381
5382         return msg;
5383
5384  drop:
5385         cpt = lnet_nid2cpt(&peer_id->nid, ni);
5386
5387         lnet_net_lock(cpt);
5388         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
5389         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
5390         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
5391                 getmd->md_length;
5392         lnet_net_unlock(cpt);
5393
5394         if (msg != NULL)
5395                 lnet_msg_free(msg);
5396
5397         return NULL;
5398 }
5399 EXPORT_SYMBOL(lnet_create_reply_msg);
5400
5401 void
5402 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
5403                        unsigned int len)
5404 {
5405         /* Set the REPLY length, now the RDMA that elides the REPLY message has
5406          * completed and I know it. */
5407         LASSERT(reply != NULL);
5408         LASSERT(reply->msg_type == LNET_MSG_GET);
5409         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
5410
5411         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
5412          * the end of my buffer, I might as well be dead. */
5413         LASSERT(len <= reply->msg_ev.mlength);
5414
5415         reply->msg_ev.mlength = len;
5416 }
5417 EXPORT_SYMBOL(lnet_set_reply_msg_len);
5418
5419 /**
5420  * Initiate an asynchronous GET operation.
5421  *
5422  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
5423  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
5424  * the target node in the REPLY has been written to local MD.
5425  *
5426  * On the target node, an LNET_EVENT_GET is logged when the GET request
5427  * arrives and is accepted into a MD.
5428  *
5429  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
5430  * \param mdh A handle for the MD that describes the memory into which the
5431  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
5432  *
5433  * \retval  0      Success, and only in this case events will be generated
5434  * and logged to EQ (if it exists) of the MD.
5435  * \retval -EIO    Simulated failure.
5436  * \retval -ENOMEM Memory allocation failure.
5437  * \retval -ENOENT Invalid MD object.
5438  */
5439 int
5440 LNetGet(struct lnet_nid *self, struct lnet_handle_md mdh,
5441         struct lnet_processid *target, unsigned int portal,
5442         __u64 match_bits, unsigned int offset, bool recovery)
5443 {
5444         struct lnet_msg *msg;
5445         struct lnet_libmd *md;
5446         struct lnet_rsp_tracker *rspt;
5447         int cpt;
5448         int rc;
5449
5450         LASSERT(the_lnet.ln_refcount > 0);
5451
5452         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5453             fail_peer(&target->nid, 1))         /* shall we now? */
5454         {
5455                 CERROR("Dropping GET to %s: simulated failure\n",
5456                        libcfs_idstr(target));
5457                 return -EIO;
5458         }
5459
5460         msg = lnet_msg_alloc();
5461         if (!msg) {
5462                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
5463                        libcfs_idstr(target));
5464                 return -ENOMEM;
5465         }
5466
5467         cpt = lnet_cpt_of_cookie(mdh.cookie);
5468
5469         rspt = lnet_rspt_alloc(cpt);
5470         if (!rspt) {
5471                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
5472                        libcfs_idstr(target));
5473                 lnet_msg_free(msg);
5474                 return -ENOMEM;
5475         }
5476         INIT_LIST_HEAD(&rspt->rspt_on_list);
5477
5478         msg->msg_recovery = recovery;
5479
5480         lnet_res_lock(cpt);
5481
5482         md = lnet_handle2md(&mdh);
5483         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5484                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
5485                        match_bits, portal, libcfs_idstr(target),
5486                        md == NULL ? -1 : md->md_threshold);
5487                 if (md != NULL && md->md_me != NULL)
5488                         CERROR("REPLY MD also attached to portal %d\n",
5489                                md->md_me->me_portal);
5490
5491                 lnet_res_unlock(cpt);
5492
5493                 lnet_msg_free(msg);
5494                 lnet_rspt_free(rspt, cpt);
5495                 return -ENOENT;
5496         }
5497
5498         CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_idstr(target));
5499
5500         lnet_msg_attach_md(msg, md, 0, 0);
5501
5502         lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
5503
5504         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5505         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5506         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5507         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5508
5509         /* NB handles only looked up by creator (no flips) */
5510         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5511                 the_lnet.ln_interface_cookie;
5512         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5513                 md->md_lh.lh_cookie;
5514
5515         lnet_res_unlock(cpt);
5516
5517         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5518
5519         if (lnet_response_tracking_enabled(LNET_MSG_GET, md->md_options))
5520                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5521         else
5522                 lnet_rspt_free(rspt, cpt);
5523
5524         rc = lnet_send(self, msg, NULL);
5525         if (rc < 0) {
5526                 CNETERR("Error sending GET to %s: %d\n",
5527                         libcfs_idstr(target), rc);
5528                 msg->msg_no_resend = true;
5529                 lnet_finalize(msg, rc);
5530         }
5531
5532         /* completion will be signalled by an event */
5533         return 0;
5534 }
5535 EXPORT_SYMBOL(LNetGet);
5536
5537 /**
5538  * Calculate distance to node at \a dstnid.
5539  *
5540  * \param dstnid Target NID.
5541  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5542  * is saved here.
5543  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5544  * here.
5545  *
5546  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5547  * local_nid_dist_zero is set, which is the default.
5548  * \retval positives Distance to target NID, i.e. number of hops plus one.
5549  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5550  */
5551 int
5552 LNetDist(struct lnet_nid *dstnid, struct lnet_nid *srcnid, __u32 *orderp)
5553 {
5554         struct lnet_ni *ni = NULL;
5555         struct lnet_remotenet *rnet;
5556         __u32 dstnet = LNET_NID_NET(dstnid);
5557         int hops;
5558         int cpt;
5559         __u32 order = 2;
5560         struct list_head *rn_list;
5561         struct lnet_ni *matched_dstnet = NULL;
5562
5563         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5564          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5565          * keep order 0 free for 0@lo and order 1 free for a local NID
5566          * match
5567          * WARNING: dstnid and srcnid might point to same place.
5568          * Don't set *srcnid until late.
5569          */
5570
5571         LASSERT(the_lnet.ln_refcount > 0);
5572
5573         cpt = lnet_net_lock_current();
5574
5575         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5576                 if (nid_same(&ni->ni_nid, dstnid)) {
5577                         if (orderp != NULL) {
5578                                 if (nid_is_lo0(dstnid))
5579                                         *orderp = 0;
5580                                 else
5581                                         *orderp = 1;
5582                         }
5583                         if (srcnid)
5584                                 *srcnid = *dstnid;
5585                         lnet_net_unlock(cpt);
5586
5587                         return local_nid_dist_zero ? 0 : 1;
5588                 }
5589
5590                 if (!matched_dstnet && LNET_NID_NET(&ni->ni_nid) == dstnet) {
5591                         matched_dstnet = ni;
5592                         /* We matched the destination net, but we may have
5593                          * additional local NIs to inspect.
5594                          *
5595                          * We record the order as appropriate, but
5596                          * they may be overwritten if we match local NI above.
5597                          */
5598
5599                         if (orderp) {
5600                                 /* Check if ni was originally created in
5601                                  * current net namespace.
5602                                  * If not, assign order above 0xffff0000,
5603                                  * to make this ni not a priority.
5604                                  */
5605                                 if (current->nsproxy &&
5606                                     !net_eq(ni->ni_net_ns,
5607                                             current->nsproxy->net_ns))
5608                                         *orderp = order + 0xffff0000;
5609                                 else
5610                                         *orderp = order;
5611                         }
5612                 }
5613
5614                 order++;
5615         }
5616
5617         if (matched_dstnet) {
5618                 if (srcnid)
5619                         *srcnid = matched_dstnet->ni_nid;
5620                 lnet_net_unlock(cpt);
5621                 return 1;
5622         }
5623
5624         rn_list = lnet_net2rnethash(dstnet);
5625         list_for_each_entry(rnet, rn_list, lrn_list) {
5626                 if (rnet->lrn_net == dstnet) {
5627                         struct lnet_route *route;
5628                         struct lnet_route *shortest = NULL;
5629                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5630                         __u32 route_hops;
5631
5632                         LASSERT(!list_empty(&rnet->lrn_routes));
5633
5634                         list_for_each_entry(route, &rnet->lrn_routes,
5635                                             lr_list) {
5636                                 route_hops = route->lr_hops;
5637                                 if (route_hops == LNET_UNDEFINED_HOPS)
5638                                         route_hops = 1;
5639                                 if (shortest == NULL ||
5640                                     route_hops < shortest_hops) {
5641                                         shortest = route;
5642                                         shortest_hops = route_hops;
5643                                 }
5644                         }
5645
5646                         LASSERT(shortest != NULL);
5647                         hops = shortest_hops;
5648                         if (srcnid) {
5649                                 struct lnet_net *net;
5650                                 net = lnet_get_net_locked(shortest->lr_lnet);
5651                                 LASSERT(net);
5652                                 ni = lnet_get_next_ni_locked(net, NULL);
5653                                 *srcnid = ni->ni_nid;
5654                         }
5655                         if (orderp != NULL)
5656                                 *orderp = order;
5657                         lnet_net_unlock(cpt);
5658                         return hops + 1;
5659                 }
5660                 order++;
5661         }
5662
5663         lnet_net_unlock(cpt);
5664         return -EHOSTUNREACH;
5665 }
5666 EXPORT_SYMBOL(LNetDist);