Whamcloud - gitweb
8f2a7a61f5f5797e1619c6b6e495ee02d9946444
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/lib-move.c
32  *
33  * Data movement routines
34  */
35
36 #define DEBUG_SUBSYSTEM S_LNET
37
38 #include <linux/pagemap.h>
39
40 #include <lnet/lib-lnet.h>
41 #include <linux/nsproxy.h>
42 #include <lnet/lnet_rdma.h>
43 #include <net/net_namespace.h>
44
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48
49 struct lnet_send_data {
50         struct lnet_ni *sd_best_ni;
51         struct lnet_peer_ni *sd_best_lpni;
52         struct lnet_peer_ni *sd_final_dst_lpni;
53         struct lnet_peer *sd_peer;
54         struct lnet_peer *sd_gw_peer;
55         struct lnet_peer_ni *sd_gw_lpni;
56         struct lnet_peer_net *sd_peer_net;
57         struct lnet_msg *sd_msg;
58         lnet_nid_t sd_dst_nid;
59         lnet_nid_t sd_src_nid;
60         lnet_nid_t sd_rtr_nid;
61         int sd_cpt;
62         int sd_md_cpt;
63         __u32 sd_send_case;
64 };
65
66 static inline bool
67 lnet_msg_is_response(struct lnet_msg *msg)
68 {
69         return msg->msg_type == LNET_MSG_ACK || msg->msg_type == LNET_MSG_REPLY;
70 }
71
72 static inline bool
73 lnet_response_tracking_enabled(__u32 msg_type, unsigned int md_options)
74 {
75         if (md_options & LNET_MD_NO_TRACK_RESPONSE)
76                 /* Explicitly disabled in MD options */
77                 return false;
78
79         if (md_options & LNET_MD_TRACK_RESPONSE)
80                 /* Explicity enabled in MD options */
81                 return true;
82
83         if (lnet_response_tracking == 3)
84                 /* Enabled for all message types */
85                 return true;
86
87         if (msg_type == LNET_MSG_PUT)
88                 return lnet_response_tracking == 2;
89
90         if (msg_type == LNET_MSG_GET)
91                 return lnet_response_tracking == 1;
92
93         return false;
94 }
95
96 static inline struct lnet_comm_count *
97 get_stats_counts(struct lnet_element_stats *stats,
98                  enum lnet_stats_type stats_type)
99 {
100         switch (stats_type) {
101         case LNET_STATS_TYPE_SEND:
102                 return &stats->el_send_stats;
103         case LNET_STATS_TYPE_RECV:
104                 return &stats->el_recv_stats;
105         case LNET_STATS_TYPE_DROP:
106                 return &stats->el_drop_stats;
107         default:
108                 CERROR("Unknown stats type\n");
109         }
110
111         return NULL;
112 }
113
114 void lnet_incr_stats(struct lnet_element_stats *stats,
115                      enum lnet_msg_type msg_type,
116                      enum lnet_stats_type stats_type)
117 {
118         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
119         if (!counts)
120                 return;
121
122         switch (msg_type) {
123         case LNET_MSG_ACK:
124                 atomic_inc(&counts->co_ack_count);
125                 break;
126         case LNET_MSG_PUT:
127                 atomic_inc(&counts->co_put_count);
128                 break;
129         case LNET_MSG_GET:
130                 atomic_inc(&counts->co_get_count);
131                 break;
132         case LNET_MSG_REPLY:
133                 atomic_inc(&counts->co_reply_count);
134                 break;
135         case LNET_MSG_HELLO:
136                 atomic_inc(&counts->co_hello_count);
137                 break;
138         default:
139                 CERROR("There is a BUG in the code. Unknown message type\n");
140                 break;
141         }
142 }
143
144 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
145                      enum lnet_stats_type stats_type)
146 {
147         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
148         if (!counts)
149                 return 0;
150
151         return (atomic_read(&counts->co_ack_count) +
152                 atomic_read(&counts->co_put_count) +
153                 atomic_read(&counts->co_get_count) +
154                 atomic_read(&counts->co_reply_count) +
155                 atomic_read(&counts->co_hello_count));
156 }
157
158 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
159                                 struct lnet_comm_count *counts)
160 {
161         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
162         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
163         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
164         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
165         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
166 }
167
168 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
169                               struct lnet_element_stats *stats)
170 {
171         struct lnet_comm_count *counts;
172
173         LASSERT(msg_stats);
174         LASSERT(stats);
175
176         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
177         if (!counts)
178                 return;
179         assign_stats(&msg_stats->im_send_stats, counts);
180
181         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
182         if (!counts)
183                 return;
184         assign_stats(&msg_stats->im_recv_stats, counts);
185
186         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
187         if (!counts)
188                 return;
189         assign_stats(&msg_stats->im_drop_stats, counts);
190 }
191
192 int
193 lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
194 {
195         struct lnet_test_peer *tp;
196         struct list_head *el;
197         struct list_head *next;
198         LIST_HEAD(cull);
199
200         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
201         if (threshold != 0) {
202                 /* Adding a new entry */
203                 LIBCFS_ALLOC(tp, sizeof(*tp));
204                 if (tp == NULL)
205                         return -ENOMEM;
206
207                 tp->tp_nid = nid;
208                 tp->tp_threshold = threshold;
209
210                 lnet_net_lock(0);
211                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
212                 lnet_net_unlock(0);
213                 return 0;
214         }
215
216         lnet_net_lock(0);
217
218         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
219                 tp = list_entry(el, struct lnet_test_peer, tp_list);
220
221                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
222                     nid == LNET_NID_ANY ||      /* removing all entries */
223                     tp->tp_nid == nid) {        /* matched this one */
224                         list_move(&tp->tp_list, &cull);
225                 }
226         }
227
228         lnet_net_unlock(0);
229
230         while (!list_empty(&cull)) {
231                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
232
233                 list_del(&tp->tp_list);
234                 LIBCFS_FREE(tp, sizeof(*tp));
235         }
236         return 0;
237 }
238
239 static int
240 fail_peer (lnet_nid_t nid, int outgoing)
241 {
242         struct lnet_test_peer *tp;
243         struct list_head *el;
244         struct list_head *next;
245         LIST_HEAD(cull);
246         int fail = 0;
247
248         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
249         lnet_net_lock(0);
250
251         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
252                 tp = list_entry(el, struct lnet_test_peer, tp_list);
253
254                 if (tp->tp_threshold == 0) {
255                         /* zombie entry */
256                         if (outgoing) {
257                                 /* only cull zombies on outgoing tests,
258                                  * since we may be at interrupt priority on
259                                  * incoming messages. */
260                                 list_move(&tp->tp_list, &cull);
261                         }
262                         continue;
263                 }
264
265                 if (tp->tp_nid == LNET_NID_ANY ||       /* fail every peer */
266                     nid == tp->tp_nid) {                /* fail this peer */
267                         fail = 1;
268
269                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
270                                 tp->tp_threshold--;
271                                 if (outgoing &&
272                                     tp->tp_threshold == 0) {
273                                         /* see above */
274                                         list_move(&tp->tp_list, &cull);
275                                 }
276                         }
277                         break;
278                 }
279         }
280
281         lnet_net_unlock(0);
282
283         while (!list_empty(&cull)) {
284                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
285                 list_del(&tp->tp_list);
286
287                 LIBCFS_FREE(tp, sizeof(*tp));
288         }
289
290         return fail;
291 }
292
293 unsigned int
294 lnet_iov_nob(unsigned int niov, struct kvec *iov)
295 {
296         unsigned int nob = 0;
297
298         LASSERT(niov == 0 || iov != NULL);
299         while (niov-- > 0)
300                 nob += (iov++)->iov_len;
301
302         return (nob);
303 }
304 EXPORT_SYMBOL(lnet_iov_nob);
305
306 void
307 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
308                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
309                   unsigned int nob)
310 {
311         /* NB diov, siov are READ-ONLY */
312         unsigned int this_nob;
313
314         if (nob == 0)
315                 return;
316
317         /* skip complete frags before 'doffset' */
318         LASSERT(ndiov > 0);
319         while (doffset >= diov->iov_len) {
320                 doffset -= diov->iov_len;
321                 diov++;
322                 ndiov--;
323                 LASSERT(ndiov > 0);
324         }
325
326         /* skip complete frags before 'soffset' */
327         LASSERT(nsiov > 0);
328         while (soffset >= siov->iov_len) {
329                 soffset -= siov->iov_len;
330                 siov++;
331                 nsiov--;
332                 LASSERT(nsiov > 0);
333         }
334
335         do {
336                 LASSERT(ndiov > 0);
337                 LASSERT(nsiov > 0);
338                 this_nob = min3((unsigned int)diov->iov_len - doffset,
339                                 (unsigned int)siov->iov_len - soffset,
340                                 nob);
341
342                 memcpy((char *)diov->iov_base + doffset,
343                        (char *)siov->iov_base + soffset, this_nob);
344                 nob -= this_nob;
345
346                 if (diov->iov_len > doffset + this_nob) {
347                         doffset += this_nob;
348                 } else {
349                         diov++;
350                         ndiov--;
351                         doffset = 0;
352                 }
353
354                 if (siov->iov_len > soffset + this_nob) {
355                         soffset += this_nob;
356                 } else {
357                         siov++;
358                         nsiov--;
359                         soffset = 0;
360                 }
361         } while (nob > 0);
362 }
363 EXPORT_SYMBOL(lnet_copy_iov2iov);
364
365 unsigned int
366 lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
367 {
368         unsigned int  nob = 0;
369
370         LASSERT(niov == 0 || kiov != NULL);
371         while (niov-- > 0)
372                 nob += (kiov++)->bv_len;
373
374         return (nob);
375 }
376 EXPORT_SYMBOL(lnet_kiov_nob);
377
378 void
379 lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov,
380                     unsigned int doffset,
381                     unsigned int nsiov, struct bio_vec *siov,
382                     unsigned int soffset,
383                     unsigned int nob)
384 {
385         /* NB diov, siov are READ-ONLY */
386         unsigned int    this_nob;
387         char           *daddr = NULL;
388         char           *saddr = NULL;
389
390         if (nob == 0)
391                 return;
392
393         LASSERT (!in_interrupt ());
394
395         LASSERT (ndiov > 0);
396         while (doffset >= diov->bv_len) {
397                 doffset -= diov->bv_len;
398                 diov++;
399                 ndiov--;
400                 LASSERT(ndiov > 0);
401         }
402
403         LASSERT(nsiov > 0);
404         while (soffset >= siov->bv_len) {
405                 soffset -= siov->bv_len;
406                 siov++;
407                 nsiov--;
408                 LASSERT(nsiov > 0);
409         }
410
411         do {
412                 LASSERT(ndiov > 0);
413                 LASSERT(nsiov > 0);
414                 this_nob = min3(diov->bv_len - doffset,
415                                 siov->bv_len - soffset,
416                                 nob);
417
418                 if (daddr == NULL)
419                         daddr = ((char *)kmap(diov->bv_page)) +
420                                 diov->bv_offset + doffset;
421                 if (saddr == NULL)
422                         saddr = ((char *)kmap(siov->bv_page)) +
423                                 siov->bv_offset + soffset;
424
425                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
426                  * However in practice at least one of the kiovs will be mapped
427                  * kernel pages and the map/unmap will be NOOPs */
428
429                 memcpy (daddr, saddr, this_nob);
430                 nob -= this_nob;
431
432                 if (diov->bv_len > doffset + this_nob) {
433                         daddr += this_nob;
434                         doffset += this_nob;
435                 } else {
436                         kunmap(diov->bv_page);
437                         daddr = NULL;
438                         diov++;
439                         ndiov--;
440                         doffset = 0;
441                 }
442
443                 if (siov->bv_len > soffset + this_nob) {
444                         saddr += this_nob;
445                         soffset += this_nob;
446                 } else {
447                         kunmap(siov->bv_page);
448                         saddr = NULL;
449                         siov++;
450                         nsiov--;
451                         soffset = 0;
452                 }
453         } while (nob > 0);
454
455         if (daddr != NULL)
456                 kunmap(diov->bv_page);
457         if (saddr != NULL)
458                 kunmap(siov->bv_page);
459 }
460 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
461
462 void
463 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
464                     unsigned int nkiov, struct bio_vec *kiov,
465                     unsigned int kiovoffset,
466                     unsigned int nob)
467 {
468         /* NB iov, kiov are READ-ONLY */
469         unsigned int    this_nob;
470         char           *addr = NULL;
471
472         if (nob == 0)
473                 return;
474
475         LASSERT (!in_interrupt ());
476
477         LASSERT (niov > 0);
478         while (iovoffset >= iov->iov_len) {
479                 iovoffset -= iov->iov_len;
480                 iov++;
481                 niov--;
482                 LASSERT(niov > 0);
483         }
484
485         LASSERT(nkiov > 0);
486         while (kiovoffset >= kiov->bv_len) {
487                 kiovoffset -= kiov->bv_len;
488                 kiov++;
489                 nkiov--;
490                 LASSERT(nkiov > 0);
491         }
492
493         do {
494                 LASSERT(niov > 0);
495                 LASSERT(nkiov > 0);
496                 this_nob = min3((unsigned int)iov->iov_len - iovoffset,
497                                 (unsigned int)kiov->bv_len - kiovoffset,
498                                 nob);
499
500                 if (addr == NULL)
501                         addr = ((char *)kmap(kiov->bv_page)) +
502                                 kiov->bv_offset + kiovoffset;
503
504                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
505                 nob -= this_nob;
506
507                 if (iov->iov_len > iovoffset + this_nob) {
508                         iovoffset += this_nob;
509                 } else {
510                         iov++;
511                         niov--;
512                         iovoffset = 0;
513                 }
514
515                 if (kiov->bv_len > kiovoffset + this_nob) {
516                         addr += this_nob;
517                         kiovoffset += this_nob;
518                 } else {
519                         kunmap(kiov->bv_page);
520                         addr = NULL;
521                         kiov++;
522                         nkiov--;
523                         kiovoffset = 0;
524                 }
525
526         } while (nob > 0);
527
528         if (addr != NULL)
529                 kunmap(kiov->bv_page);
530 }
531 EXPORT_SYMBOL(lnet_copy_kiov2iov);
532
533 void
534 lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
535                    unsigned int kiovoffset,
536                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
537                    unsigned int nob)
538 {
539         /* NB kiov, iov are READ-ONLY */
540         unsigned int    this_nob;
541         char           *addr = NULL;
542
543         if (nob == 0)
544                 return;
545
546         LASSERT (!in_interrupt ());
547
548         LASSERT (nkiov > 0);
549         while (kiovoffset >= kiov->bv_len) {
550                 kiovoffset -= kiov->bv_len;
551                 kiov++;
552                 nkiov--;
553                 LASSERT(nkiov > 0);
554         }
555
556         LASSERT(niov > 0);
557         while (iovoffset >= iov->iov_len) {
558                 iovoffset -= iov->iov_len;
559                 iov++;
560                 niov--;
561                 LASSERT(niov > 0);
562         }
563
564         do {
565                 LASSERT(nkiov > 0);
566                 LASSERT(niov > 0);
567                 this_nob = min3((unsigned int)kiov->bv_len - kiovoffset,
568                                 (unsigned int)iov->iov_len - iovoffset,
569                                 nob);
570
571                 if (addr == NULL)
572                         addr = ((char *)kmap(kiov->bv_page)) +
573                                 kiov->bv_offset + kiovoffset;
574
575                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
576                 nob -= this_nob;
577
578                 if (kiov->bv_len > kiovoffset + this_nob) {
579                         addr += this_nob;
580                         kiovoffset += this_nob;
581                 } else {
582                         kunmap(kiov->bv_page);
583                         addr = NULL;
584                         kiov++;
585                         nkiov--;
586                         kiovoffset = 0;
587                 }
588
589                 if (iov->iov_len > iovoffset + this_nob) {
590                         iovoffset += this_nob;
591                 } else {
592                         iov++;
593                         niov--;
594                         iovoffset = 0;
595                 }
596         } while (nob > 0);
597
598         if (addr != NULL)
599                 kunmap(kiov->bv_page);
600 }
601 EXPORT_SYMBOL(lnet_copy_iov2kiov);
602
603 int
604 lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
605                   int src_niov, struct bio_vec *src,
606                   unsigned int offset, unsigned int len)
607 {
608         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
609          * for exactly 'len' bytes, and return the number of entries.
610          * NB not destructive to 'src' */
611         unsigned int    frag_len;
612         unsigned int    niov;
613
614         if (len == 0)                           /* no data => */
615                 return (0);                     /* no frags */
616
617         LASSERT(src_niov > 0);
618         while (offset >= src->bv_len) {      /* skip initial frags */
619                 offset -= src->bv_len;
620                 src_niov--;
621                 src++;
622                 LASSERT(src_niov > 0);
623         }
624
625         niov = 1;
626         for (;;) {
627                 LASSERT(src_niov > 0);
628                 LASSERT((int)niov <= dst_niov);
629
630                 frag_len = src->bv_len - offset;
631                 dst->bv_page = src->bv_page;
632                 dst->bv_offset = src->bv_offset + offset;
633
634                 if (len <= frag_len) {
635                         dst->bv_len = len;
636                         LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
637                         return niov;
638                 }
639
640                 dst->bv_len = frag_len;
641                 LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
642
643                 len -= frag_len;
644                 dst++;
645                 src++;
646                 niov++;
647                 src_niov--;
648                 offset = 0;
649         }
650 }
651 EXPORT_SYMBOL(lnet_extract_kiov);
652
653 void
654 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
655              int delayed, unsigned int offset, unsigned int mlen,
656              unsigned int rlen)
657 {
658         unsigned int niov = 0;
659         struct kvec *iov = NULL;
660         struct bio_vec  *kiov = NULL;
661         int rc;
662
663         LASSERT (!in_interrupt ());
664         LASSERT (mlen == 0 || msg != NULL);
665
666         if (msg != NULL) {
667                 LASSERT(msg->msg_receiving);
668                 LASSERT(!msg->msg_sending);
669                 LASSERT(rlen == msg->msg_len);
670                 LASSERT(mlen <= msg->msg_len);
671                 LASSERT(msg->msg_offset == offset);
672                 LASSERT(msg->msg_wanted == mlen);
673
674                 msg->msg_receiving = 0;
675
676                 if (mlen != 0) {
677                         niov = msg->msg_niov;
678                         kiov = msg->msg_kiov;
679
680                         LASSERT (niov > 0);
681                         LASSERT ((iov == NULL) != (kiov == NULL));
682                 }
683         }
684
685         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
686                                              niov, kiov, offset, mlen,
687                                              rlen);
688         if (rc < 0)
689                 lnet_finalize(msg, rc);
690 }
691
692 static void
693 lnet_setpayloadbuffer(struct lnet_msg *msg)
694 {
695         struct lnet_libmd *md = msg->msg_md;
696
697         LASSERT(msg->msg_len > 0);
698         LASSERT(!msg->msg_routing);
699         LASSERT(md != NULL);
700         LASSERT(msg->msg_niov == 0);
701         LASSERT(msg->msg_kiov == NULL);
702
703         msg->msg_niov = md->md_niov;
704         msg->msg_kiov = md->md_kiov;
705 }
706
707 void
708 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target,
709                unsigned int offset, unsigned int len)
710 {
711         msg->msg_type = type;
712         msg->msg_target = target;
713         msg->msg_len = len;
714         msg->msg_offset = offset;
715
716         if (len != 0)
717                 lnet_setpayloadbuffer(msg);
718
719         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
720         msg->msg_hdr.type           = cpu_to_le32(type);
721         /* dest_nid will be overwritten by lnet_select_pathway() */
722         msg->msg_hdr.dest_nid       = cpu_to_le64(target.nid);
723         msg->msg_hdr.dest_pid       = cpu_to_le32(target.pid);
724         /* src_nid will be set later */
725         msg->msg_hdr.src_pid        = cpu_to_le32(the_lnet.ln_pid);
726         msg->msg_hdr.payload_length = cpu_to_le32(len);
727 }
728
729 void
730 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
731 {
732         void *priv = msg->msg_private;
733         int rc;
734
735         LASSERT(!in_interrupt());
736         LASSERT(ni->ni_nid == LNET_NID_LO_0 ||
737                 (msg->msg_txcredit && msg->msg_peertxcredit));
738
739         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
740         if (rc < 0) {
741                 msg->msg_no_resend = true;
742                 lnet_finalize(msg, rc);
743         }
744 }
745
746 static int
747 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
748 {
749         int     rc;
750
751         LASSERT(!msg->msg_sending);
752         LASSERT(msg->msg_receiving);
753         LASSERT(!msg->msg_rx_ready_delay);
754         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
755
756         msg->msg_rx_ready_delay = 1;
757         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
758                                                   &msg->msg_private);
759         if (rc != 0) {
760                 CERROR("recv from %s / send to %s aborted: "
761                        "eager_recv failed %d\n",
762                        libcfs_nid2str(msg->msg_rxpeer->lpni_nid),
763                        libcfs_id2str(msg->msg_target), rc);
764                 LASSERT(rc < 0); /* required by my callers */
765         }
766
767         return rc;
768 }
769
770 static bool
771 lnet_is_peer_deadline_passed(struct lnet_peer_ni *lpni, time64_t now)
772 {
773         time64_t deadline;
774
775         deadline = lpni->lpni_last_alive +
776                    lpni->lpni_net->net_tunables.lct_peer_timeout;
777
778         /*
779          * assume peer_ni is alive as long as we're within the configured
780          * peer timeout
781          */
782         if (deadline > now)
783                 return false;
784
785         return true;
786 }
787
788 /* NB: returns 1 when alive, 0 when dead, negative when error;
789  *     may drop the lnet_net_lock */
790 static int
791 lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
792                        struct lnet_msg *msg)
793 {
794         time64_t now = ktime_get_seconds();
795
796         if (!lnet_peer_aliveness_enabled(lpni))
797                 return -ENODEV;
798
799         /*
800          * If we're resending a message, let's attempt to send it even if
801          * the peer is down to fulfill our resend quota on the message
802          */
803         if (msg->msg_retry_count > 0)
804                 return 1;
805
806         /* try and send recovery messages irregardless */
807         if (msg->msg_recovery)
808                 return 1;
809
810         /* always send any responses */
811         if (lnet_msg_is_response(msg))
812                 return 1;
813
814         if (!lnet_is_peer_deadline_passed(lpni, now))
815                 return true;
816
817         return lnet_is_peer_ni_alive(lpni);
818 }
819
820 /**
821  * \param msg The message to be sent.
822  * \param do_send True if lnet_ni_send() should be called in this function.
823  *        lnet_send() is going to lnet_net_unlock immediately after this, so
824  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
825  *
826  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
827  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
828  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
829  * \retval -ECANCELED If the MD of the message has been unlinked.
830  */
831 static int
832 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
833 {
834         struct lnet_peer_ni     *lp = msg->msg_txpeer;
835         struct lnet_ni          *ni = msg->msg_txni;
836         int                     cpt = msg->msg_tx_cpt;
837         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
838
839         /* non-lnet_send() callers have checked before */
840         LASSERT(!do_send || msg->msg_tx_delayed);
841         LASSERT(!msg->msg_receiving);
842         LASSERT(msg->msg_tx_committed);
843
844         /* can't get here if we're sending to the loopback interface */
845         if (the_lnet.ln_loni)
846                 LASSERT(lp->lpni_nid != the_lnet.ln_loni->ni_nid);
847
848         /* NB 'lp' is always the next hop */
849         if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
850             lnet_peer_alive_locked(ni, lp, msg) == 0) {
851                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
852                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
853                         msg->msg_len;
854                 lnet_net_unlock(cpt);
855                 if (msg->msg_txpeer)
856                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
857                                         msg->msg_type,
858                                         LNET_STATS_TYPE_DROP);
859                 if (msg->msg_txni)
860                         lnet_incr_stats(&msg->msg_txni->ni_stats,
861                                         msg->msg_type,
862                                         LNET_STATS_TYPE_DROP);
863
864                 CNETERR("Dropping message for %s: peer not alive\n",
865                         libcfs_id2str(msg->msg_target));
866                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
867                 if (do_send)
868                         lnet_finalize(msg, -EHOSTUNREACH);
869
870                 lnet_net_lock(cpt);
871                 return -EHOSTUNREACH;
872         }
873
874         if (msg->msg_md != NULL &&
875             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
876                 lnet_net_unlock(cpt);
877
878                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
879                         "called on the MD/ME.\n",
880                         libcfs_id2str(msg->msg_target));
881                 if (do_send) {
882                         msg->msg_no_resend = true;
883                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
884                                msg, libcfs_id2str(msg->msg_target));
885                         lnet_finalize(msg, -ECANCELED);
886                 }
887
888                 lnet_net_lock(cpt);
889                 return -ECANCELED;
890         }
891
892         if (!msg->msg_peertxcredit) {
893                 spin_lock(&lp->lpni_lock);
894                 LASSERT((lp->lpni_txcredits < 0) ==
895                         !list_empty(&lp->lpni_txq));
896
897                 msg->msg_peertxcredit = 1;
898                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr);
899                 lp->lpni_txcredits--;
900
901                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
902                         lp->lpni_mintxcredits = lp->lpni_txcredits;
903
904                 if (lp->lpni_txcredits < 0) {
905                         msg->msg_tx_delayed = 1;
906                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
907                         spin_unlock(&lp->lpni_lock);
908                         return LNET_CREDIT_WAIT;
909                 }
910                 spin_unlock(&lp->lpni_lock);
911         }
912
913         if (!msg->msg_txcredit) {
914                 LASSERT((tq->tq_credits < 0) ==
915                         !list_empty(&tq->tq_delayed));
916
917                 msg->msg_txcredit = 1;
918                 tq->tq_credits--;
919                 atomic_dec(&ni->ni_tx_credits);
920
921                 if (tq->tq_credits < tq->tq_credits_min)
922                         tq->tq_credits_min = tq->tq_credits;
923
924                 if (tq->tq_credits < 0) {
925                         msg->msg_tx_delayed = 1;
926                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
927                         return LNET_CREDIT_WAIT;
928                 }
929         }
930
931         if (unlikely(!list_empty(&the_lnet.ln_delay_rules)) &&
932             lnet_delay_rule_match_locked(&msg->msg_hdr, msg)) {
933                 msg->msg_tx_delayed = 1;
934                 return LNET_CREDIT_WAIT;
935         }
936
937         /* unset the tx_delay flag as we're going to send it now */
938         msg->msg_tx_delayed = 0;
939
940         if (do_send) {
941                 lnet_net_unlock(cpt);
942                 lnet_ni_send(ni, msg);
943                 lnet_net_lock(cpt);
944         }
945         return LNET_CREDIT_OK;
946 }
947
948
949 static struct lnet_rtrbufpool *
950 lnet_msg2bufpool(struct lnet_msg *msg)
951 {
952         struct lnet_rtrbufpool  *rbp;
953         int                     cpt;
954
955         LASSERT(msg->msg_rx_committed);
956
957         cpt = msg->msg_rx_cpt;
958         rbp = &the_lnet.ln_rtrpools[cpt][0];
959
960         LASSERT(msg->msg_len <= LNET_MTU);
961         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
962                 rbp++;
963                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
964         }
965
966         return rbp;
967 }
968
969 static int
970 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
971 {
972         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
973          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
974          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
975          * received or OK to receive */
976         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
977         struct lnet_peer *lp;
978         struct lnet_rtrbufpool *rbp;
979         struct lnet_rtrbuf *rb;
980
981         LASSERT(msg->msg_kiov == NULL);
982         LASSERT(msg->msg_niov == 0);
983         LASSERT(msg->msg_routing);
984         LASSERT(msg->msg_receiving);
985         LASSERT(!msg->msg_sending);
986         LASSERT(lpni->lpni_peer_net);
987         LASSERT(lpni->lpni_peer_net->lpn_peer);
988
989         lp = lpni->lpni_peer_net->lpn_peer;
990
991         /* non-lnet_parse callers only receive delayed messages */
992         LASSERT(!do_recv || msg->msg_rx_delayed);
993
994         if (!msg->msg_peerrtrcredit) {
995                 /* lpni_lock protects the credit manipulation */
996                 spin_lock(&lpni->lpni_lock);
997
998                 msg->msg_peerrtrcredit = 1;
999                 lpni->lpni_rtrcredits--;
1000                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
1001                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
1002
1003                 if (lpni->lpni_rtrcredits < 0) {
1004                         spin_unlock(&lpni->lpni_lock);
1005                         /* must have checked eager_recv before here */
1006                         LASSERT(msg->msg_rx_ready_delay);
1007                         msg->msg_rx_delayed = 1;
1008                         /* lp_lock protects the lp_rtrq */
1009                         spin_lock(&lp->lp_lock);
1010                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1011                         spin_unlock(&lp->lp_lock);
1012                         return LNET_CREDIT_WAIT;
1013                 }
1014                 spin_unlock(&lpni->lpni_lock);
1015         }
1016
1017         rbp = lnet_msg2bufpool(msg);
1018
1019         if (!msg->msg_rtrcredit) {
1020                 msg->msg_rtrcredit = 1;
1021                 rbp->rbp_credits--;
1022                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1023                         rbp->rbp_mincredits = rbp->rbp_credits;
1024
1025                 if (rbp->rbp_credits < 0) {
1026                         /* must have checked eager_recv before here */
1027                         LASSERT(msg->msg_rx_ready_delay);
1028                         msg->msg_rx_delayed = 1;
1029                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1030                         return LNET_CREDIT_WAIT;
1031                 }
1032         }
1033
1034         LASSERT(!list_empty(&rbp->rbp_bufs));
1035         rb = list_entry(rbp->rbp_bufs.next, struct lnet_rtrbuf, rb_list);
1036         list_del(&rb->rb_list);
1037
1038         msg->msg_niov = rbp->rbp_npages;
1039         msg->msg_kiov = &rb->rb_kiov[0];
1040
1041         /* unset the msg-rx_delayed flag since we're receiving the message */
1042         msg->msg_rx_delayed = 0;
1043
1044         if (do_recv) {
1045                 int cpt = msg->msg_rx_cpt;
1046
1047                 lnet_net_unlock(cpt);
1048                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1049                              0, msg->msg_len, msg->msg_len);
1050                 lnet_net_lock(cpt);
1051         }
1052         return LNET_CREDIT_OK;
1053 }
1054
1055 void
1056 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1057 {
1058         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1059         struct lnet_ni          *txni = msg->msg_txni;
1060         struct lnet_msg         *msg2;
1061
1062         if (msg->msg_txcredit) {
1063                 struct lnet_ni       *ni = msg->msg_txni;
1064                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1065
1066                 /* give back NI txcredits */
1067                 msg->msg_txcredit = 0;
1068
1069                 LASSERT((tq->tq_credits < 0) ==
1070                         !list_empty(&tq->tq_delayed));
1071
1072                 tq->tq_credits++;
1073                 atomic_inc(&ni->ni_tx_credits);
1074                 if (tq->tq_credits <= 0) {
1075                         msg2 = list_entry(tq->tq_delayed.next,
1076                                           struct lnet_msg, msg_list);
1077                         list_del(&msg2->msg_list);
1078
1079                         LASSERT(msg2->msg_txni == ni);
1080                         LASSERT(msg2->msg_tx_delayed);
1081                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1082
1083                         (void) lnet_post_send_locked(msg2, 1);
1084                 }
1085         }
1086
1087         if (msg->msg_peertxcredit) {
1088                 /* give back peer txcredits */
1089                 msg->msg_peertxcredit = 0;
1090
1091                 spin_lock(&txpeer->lpni_lock);
1092                 LASSERT((txpeer->lpni_txcredits < 0) ==
1093                         !list_empty(&txpeer->lpni_txq));
1094
1095                 txpeer->lpni_txqnob -= msg->msg_len + sizeof(struct lnet_hdr);
1096                 LASSERT(txpeer->lpni_txqnob >= 0);
1097
1098                 txpeer->lpni_txcredits++;
1099                 if (txpeer->lpni_txcredits <= 0) {
1100                         int msg2_cpt;
1101
1102                         msg2 = list_entry(txpeer->lpni_txq.next,
1103                                               struct lnet_msg, msg_list);
1104                         list_del(&msg2->msg_list);
1105                         spin_unlock(&txpeer->lpni_lock);
1106
1107                         LASSERT(msg2->msg_txpeer == txpeer);
1108                         LASSERT(msg2->msg_tx_delayed);
1109
1110                         msg2_cpt = msg2->msg_tx_cpt;
1111
1112                         /*
1113                          * The msg_cpt can be different from the msg2_cpt
1114                          * so we need to make sure we lock the correct cpt
1115                          * for msg2.
1116                          * Once we call lnet_post_send_locked() it is no
1117                          * longer safe to access msg2, since it could've
1118                          * been freed by lnet_finalize(), but we still
1119                          * need to relock the correct cpt, so we cache the
1120                          * msg2_cpt for the purpose of the check that
1121                          * follows the call to lnet_pose_send_locked().
1122                          */
1123                         if (msg2_cpt != msg->msg_tx_cpt) {
1124                                 lnet_net_unlock(msg->msg_tx_cpt);
1125                                 lnet_net_lock(msg2_cpt);
1126                         }
1127                         (void) lnet_post_send_locked(msg2, 1);
1128                         if (msg2_cpt != msg->msg_tx_cpt) {
1129                                 lnet_net_unlock(msg2_cpt);
1130                                 lnet_net_lock(msg->msg_tx_cpt);
1131                         }
1132                 } else {
1133                         spin_unlock(&txpeer->lpni_lock);
1134                 }
1135         }
1136
1137         if (txni != NULL) {
1138                 msg->msg_txni = NULL;
1139                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1140         }
1141
1142         if (txpeer != NULL) {
1143                 msg->msg_txpeer = NULL;
1144                 lnet_peer_ni_decref_locked(txpeer);
1145         }
1146 }
1147
1148 void
1149 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1150 {
1151         struct lnet_msg *msg;
1152
1153         if (list_empty(&rbp->rbp_msgs))
1154                 return;
1155         msg = list_entry(rbp->rbp_msgs.next,
1156                          struct lnet_msg, msg_list);
1157         list_del(&msg->msg_list);
1158
1159         (void)lnet_post_routed_recv_locked(msg, 1);
1160 }
1161
1162 void
1163 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1164 {
1165         struct lnet_msg *msg;
1166         struct lnet_msg *tmp;
1167
1168         lnet_net_unlock(cpt);
1169
1170         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1171                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1172                              0, 0, 0, msg->msg_hdr.payload_length);
1173                 list_del_init(&msg->msg_list);
1174                 msg->msg_no_resend = true;
1175                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1176                 lnet_finalize(msg, -ECANCELED);
1177         }
1178
1179         lnet_net_lock(cpt);
1180 }
1181
1182 void
1183 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1184 {
1185         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1186         struct lnet_peer *lp;
1187         struct lnet_ni *rxni = msg->msg_rxni;
1188         struct lnet_msg *msg2;
1189
1190         if (msg->msg_rtrcredit) {
1191                 /* give back global router credits */
1192                 struct lnet_rtrbuf *rb;
1193                 struct lnet_rtrbufpool *rbp;
1194
1195                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1196                  * there until it gets one allocated, or aborts the wait
1197                  * itself */
1198                 LASSERT(msg->msg_kiov != NULL);
1199
1200                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1201                 rbp = rb->rb_pool;
1202
1203                 msg->msg_kiov = NULL;
1204                 msg->msg_rtrcredit = 0;
1205
1206                 LASSERT(rbp == lnet_msg2bufpool(msg));
1207
1208                 LASSERT((rbp->rbp_credits > 0) ==
1209                         !list_empty(&rbp->rbp_bufs));
1210
1211                 /* If routing is now turned off, we just drop this buffer and
1212                  * don't bother trying to return credits.  */
1213                 if (!the_lnet.ln_routing) {
1214                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1215                         goto routing_off;
1216                 }
1217
1218                 /* It is possible that a user has lowered the desired number of
1219                  * buffers in this pool.  Make sure we never put back
1220                  * more buffers than the stated number. */
1221                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1222                         /* Discard this buffer so we don't have too
1223                          * many. */
1224                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1225                         rbp->rbp_nbuffers--;
1226                 } else {
1227                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1228                         rbp->rbp_credits++;
1229                         if (rbp->rbp_credits <= 0)
1230                                 lnet_schedule_blocked_locked(rbp);
1231                 }
1232         }
1233
1234 routing_off:
1235         if (msg->msg_peerrtrcredit) {
1236                 LASSERT(rxpeerni);
1237                 LASSERT(rxpeerni->lpni_peer_net);
1238                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1239
1240                 /* give back peer router credits */
1241                 msg->msg_peerrtrcredit = 0;
1242
1243                 spin_lock(&rxpeerni->lpni_lock);
1244                 rxpeerni->lpni_rtrcredits++;
1245                 spin_unlock(&rxpeerni->lpni_lock);
1246
1247                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1248                 spin_lock(&lp->lp_lock);
1249
1250                 /* drop all messages which are queued to be routed on that
1251                  * peer. */
1252                 if (!the_lnet.ln_routing) {
1253                         LIST_HEAD(drop);
1254                         list_splice_init(&lp->lp_rtrq, &drop);
1255                         spin_unlock(&lp->lp_lock);
1256                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1257                 } else if (!list_empty(&lp->lp_rtrq)) {
1258                         int msg2_cpt;
1259
1260                         msg2 = list_entry(lp->lp_rtrq.next,
1261                                           struct lnet_msg, msg_list);
1262                         list_del(&msg2->msg_list);
1263                         msg2_cpt = msg2->msg_rx_cpt;
1264                         spin_unlock(&lp->lp_lock);
1265                         /*
1266                          * messages on the lp_rtrq can be from any NID in
1267                          * the peer, which means they might have different
1268                          * cpts. We need to make sure we lock the right
1269                          * one.
1270                          */
1271                         if (msg2_cpt != msg->msg_rx_cpt) {
1272                                 lnet_net_unlock(msg->msg_rx_cpt);
1273                                 lnet_net_lock(msg2_cpt);
1274                         }
1275                         (void) lnet_post_routed_recv_locked(msg2, 1);
1276                         if (msg2_cpt != msg->msg_rx_cpt) {
1277                                 lnet_net_unlock(msg2_cpt);
1278                                 lnet_net_lock(msg->msg_rx_cpt);
1279                         }
1280                 } else {
1281                         spin_unlock(&lp->lp_lock);
1282                 }
1283         }
1284         if (rxni != NULL) {
1285                 msg->msg_rxni = NULL;
1286                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1287         }
1288         if (rxpeerni != NULL) {
1289                 msg->msg_rxpeer = NULL;
1290                 lnet_peer_ni_decref_locked(rxpeerni);
1291         }
1292 }
1293
1294 static struct lnet_peer_ni *
1295 lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid,
1296                     struct lnet_peer *peer,
1297                     struct lnet_peer_ni *best_lpni,
1298                     struct lnet_peer_net *peer_net)
1299 {
1300         /*
1301          * Look at the peer NIs for the destination peer that connect
1302          * to the chosen net. If a peer_ni is preferred when using the
1303          * best_ni to communicate, we use that one. If there is no
1304          * preferred peer_ni, or there are multiple preferred peer_ni,
1305          * the available transmit credits are used. If the transmit
1306          * credits are equal, we round-robin over the peer_ni.
1307          */
1308         struct lnet_peer_ni *lpni = NULL;
1309         int best_lpni_credits = (best_lpni) ? best_lpni->lpni_txcredits :
1310                 INT_MIN;
1311         int best_lpni_healthv = (best_lpni) ?
1312                 atomic_read(&best_lpni->lpni_healthv) : 0;
1313         bool best_lpni_is_preferred = false;
1314         bool lpni_is_preferred;
1315         int lpni_healthv;
1316         __u32 lpni_sel_prio;
1317         __u32 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1318
1319         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1320                 /*
1321                  * if the best_ni we've chosen aleady has this lpni
1322                  * preferred, then let's use it
1323                  */
1324                 if (best_ni) {
1325                         lpni_is_preferred = lnet_peer_is_pref_nid_locked(lpni,
1326                                                                 best_ni->ni_nid);
1327                         CDEBUG(D_NET, "%s lpni_is_preferred = %d\n",
1328                                libcfs_nid2str(best_ni->ni_nid),
1329                                lpni_is_preferred);
1330                 } else {
1331                         lpni_is_preferred = false;
1332                 }
1333
1334                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1335                 lpni_sel_prio = lpni->lpni_sel_priority;
1336
1337                 if (best_lpni)
1338                         CDEBUG(D_NET, "n:[%s, %s] h:[%d, %d] p:[%d, %d] c:[%d, %d] s:[%d, %d]\n",
1339                                 libcfs_nid2str(lpni->lpni_nid),
1340                                 libcfs_nid2str(best_lpni->lpni_nid),
1341                                 lpni_healthv, best_lpni_healthv,
1342                                 lpni_sel_prio, best_sel_prio,
1343                                 lpni->lpni_txcredits, best_lpni_credits,
1344                                 lpni->lpni_seq, best_lpni->lpni_seq);
1345                 else
1346                         goto select_lpni;
1347
1348                 /* pick the healthiest peer ni */
1349                 if (lpni_healthv < best_lpni_healthv)
1350                         continue;
1351                 else if (lpni_healthv > best_lpni_healthv) {
1352                         if (best_lpni_is_preferred)
1353                                 best_lpni_is_preferred = false;
1354                         goto select_lpni;
1355                 }
1356
1357                 if (lpni_sel_prio > best_sel_prio)
1358                         continue;
1359                 else if (lpni_sel_prio < best_sel_prio) {
1360                         if (best_lpni_is_preferred)
1361                                 best_lpni_is_preferred = false;
1362                         goto select_lpni;
1363                 }
1364
1365                 /* if this is a preferred peer use it */
1366                 if (!best_lpni_is_preferred && lpni_is_preferred) {
1367                         best_lpni_is_preferred = true;
1368                         goto select_lpni;
1369                 } else if (best_lpni_is_preferred && !lpni_is_preferred) {
1370                         /* this is not the preferred peer so let's ignore
1371                          * it.
1372                          */
1373                         continue;
1374                 }
1375
1376                 if (lpni->lpni_txcredits < best_lpni_credits)
1377                         /* We already have a peer that has more credits
1378                          * available than this one. No need to consider
1379                          * this peer further.
1380                          */
1381                         continue;
1382                 else if (lpni->lpni_txcredits > best_lpni_credits)
1383                         goto select_lpni;
1384
1385                 /* The best peer found so far and the current peer
1386                  * have the same number of available credits let's
1387                  * make sure to select between them using Round Robin
1388                  */
1389                 if (best_lpni && (best_lpni->lpni_seq <= lpni->lpni_seq))
1390                         continue;
1391 select_lpni:
1392                 best_lpni_is_preferred = lpni_is_preferred;
1393                 best_lpni_healthv = lpni_healthv;
1394                 best_sel_prio = lpni_sel_prio;
1395                 best_lpni = lpni;
1396                 best_lpni_credits = lpni->lpni_txcredits;
1397         }
1398
1399         /* if we still can't find a peer ni then we can't reach it */
1400         if (!best_lpni) {
1401                 __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1402                         LNET_NIDNET(dst_nid);
1403                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1404                                 libcfs_net2str(net_id));
1405                 return NULL;
1406         }
1407
1408         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1409                libcfs_nid2str(best_lpni->lpni_nid));
1410
1411         return best_lpni;
1412 }
1413
1414 /*
1415  * Prerequisite: the best_ni should already be set in the sd
1416  * Find the best lpni.
1417  * If the net id is provided then restrict lpni selection on
1418  * that particular net.
1419  * Otherwise find any reachable lpni. When dealing with an MR
1420  * gateway and it has multiple lpnis which we can use
1421  * we want to select the best one from the list of reachable
1422  * ones.
1423  */
1424 static inline struct lnet_peer_ni *
1425 lnet_find_best_lpni(struct lnet_ni *lni, lnet_nid_t dst_nid,
1426                     struct lnet_peer *peer, __u32 net_id)
1427 {
1428         struct lnet_peer_net *peer_net;
1429
1430         /* find the best_lpni on any local network */
1431         if (net_id == LNET_NET_ANY) {
1432                 struct lnet_peer_ni *best_lpni = NULL;
1433                 struct lnet_peer_net *lpn;
1434                 list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
1435                         /* no net specified find any reachable peer ni */
1436                         if (!lnet_islocalnet_locked(lpn->lpn_net_id))
1437                                 continue;
1438                         best_lpni = lnet_select_peer_ni(lni, dst_nid, peer,
1439                                                         best_lpni, lpn);
1440                 }
1441
1442                 return best_lpni;
1443         }
1444         /* restrict on the specified net */
1445         peer_net = lnet_peer_get_net_locked(peer, net_id);
1446         if (peer_net)
1447                 return lnet_select_peer_ni(lni, dst_nid, peer, NULL, peer_net);
1448
1449         return NULL;
1450 }
1451
1452 static int
1453 lnet_compare_gw_lpnis(struct lnet_peer_ni *lpni1, struct lnet_peer_ni *lpni2)
1454 {
1455         if (lpni1->lpni_txqnob < lpni2->lpni_txqnob)
1456                 return 1;
1457
1458         if (lpni1->lpni_txqnob > lpni2->lpni_txqnob)
1459                 return -1;
1460
1461         if (lpni1->lpni_txcredits > lpni2->lpni_txcredits)
1462                 return 1;
1463
1464         if (lpni1->lpni_txcredits < lpni2->lpni_txcredits)
1465                 return -1;
1466
1467         return 0;
1468 }
1469
1470 /* Compare route priorities and hop counts */
1471 static int
1472 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
1473 {
1474         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1475         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1476
1477         if (r1->lr_priority < r2->lr_priority)
1478                 return 1;
1479
1480         if (r1->lr_priority > r2->lr_priority)
1481                 return -1;
1482
1483         if (r1_hops < r2_hops)
1484                 return 1;
1485
1486         if (r1_hops > r2_hops)
1487                 return -1;
1488
1489         return 0;
1490 }
1491
1492 static struct lnet_route *
1493 lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net,
1494                        struct lnet_peer_ni *remote_lpni,
1495                        struct lnet_route **prev_route,
1496                        struct lnet_peer_ni **gwni)
1497 {
1498         struct lnet_peer_ni *lpni, *best_gw_ni = NULL;
1499         struct lnet_route *best_route;
1500         struct lnet_route *last_route;
1501         struct lnet_route *route;
1502         int rc;
1503         bool best_rte_is_preferred = false;
1504         lnet_nid_t gw_pnid;
1505
1506         CDEBUG(D_NET, "Looking up a route to %s, from %s\n",
1507                libcfs_net2str(rnet->lrn_net), libcfs_net2str(src_net));
1508
1509         best_route = last_route = NULL;
1510         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1511                 if (!lnet_is_route_alive(route))
1512                         continue;
1513                 gw_pnid = route->lr_gateway->lp_primary_nid;
1514
1515                 /* no protection on below fields, but it's harmless */
1516                 if (last_route && (last_route->lr_seq - route->lr_seq < 0))
1517                         last_route = route;
1518
1519                 /* if the best route found is in the preferred list then
1520                  * tag it as preferred and use it later on. But if we
1521                  * didn't find any routes which are on the preferred list
1522                  * then just use the best route possible.
1523                  */
1524                 rc = lnet_peer_is_pref_rtr_locked(remote_lpni, gw_pnid);
1525
1526                 if (!best_route || (rc && !best_rte_is_preferred)) {
1527                         /* Restrict the selection of the router NI on the
1528                          * src_net provided. If the src_net is LNET_NID_ANY,
1529                          * then select the best interface available.
1530                          */
1531                         lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1532                                                    route->lr_gateway,
1533                                                    src_net);
1534                         if (!lpni) {
1535                                 CDEBUG(D_NET,
1536                                        "Gateway %s does not have a peer NI on net %s\n",
1537                                        libcfs_nid2str(gw_pnid),
1538                                        libcfs_net2str(src_net));
1539                                 continue;
1540                         }
1541                 }
1542
1543                 if (rc && !best_rte_is_preferred) {
1544                         /* This is the first preferred route we found,
1545                          * so it beats any route found previously
1546                          */
1547                         best_route = route;
1548                         if (!last_route)
1549                                 last_route = route;
1550                         best_gw_ni = lpni;
1551                         best_rte_is_preferred = true;
1552                         CDEBUG(D_NET, "preferred gw = %s\n",
1553                                libcfs_nid2str(gw_pnid));
1554                         continue;
1555                 } else if ((!rc) && best_rte_is_preferred)
1556                         /* The best route we found so far is in the preferred
1557                          * list, so it beats any non-preferred route
1558                          */
1559                         continue;
1560
1561                 if (!best_route) {
1562                         best_route = last_route = route;
1563                         best_gw_ni = lpni;
1564                         continue;
1565                 }
1566
1567                 rc = lnet_compare_routes(route, best_route);
1568                 if (rc == -1)
1569                         continue;
1570
1571                 /* Restrict the selection of the router NI on the
1572                  * src_net provided. If the src_net is LNET_NID_ANY,
1573                  * then select the best interface available.
1574                  */
1575                 lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1576                                            route->lr_gateway,
1577                                            src_net);
1578                 if (!lpni) {
1579                         CDEBUG(D_NET,
1580                                "Gateway %s does not have a peer NI on net %s\n",
1581                                libcfs_nid2str(gw_pnid),
1582                                libcfs_net2str(src_net));
1583                         continue;
1584                 }
1585
1586                 if (rc == 1) {
1587                         best_route = route;
1588                         best_gw_ni = lpni;
1589                         continue;
1590                 }
1591
1592                 rc = lnet_compare_gw_lpnis(lpni, best_gw_ni);
1593                 if (rc == -1)
1594                         continue;
1595
1596                 if (rc == 1 || route->lr_seq <= best_route->lr_seq) {
1597                         best_route = route;
1598                         best_gw_ni = lpni;
1599                         continue;
1600                 }
1601         }
1602
1603         *prev_route = last_route;
1604         *gwni = best_gw_ni;
1605
1606         return best_route;
1607 }
1608
1609 static inline unsigned int
1610 lnet_dev_prio_of_md(struct lnet_ni *ni, unsigned int dev_idx)
1611 {
1612         if (dev_idx == UINT_MAX)
1613                 return UINT_MAX;
1614
1615         if (!ni || !ni->ni_net || !ni->ni_net->net_lnd ||
1616             !ni->ni_net->net_lnd->lnd_get_dev_prio)
1617                 return UINT_MAX;
1618
1619         return ni->ni_net->net_lnd->lnd_get_dev_prio(ni, dev_idx);
1620 }
1621
1622 static struct lnet_ni *
1623 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1624                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1625                  struct lnet_msg *msg, int md_cpt)
1626 {
1627         struct lnet_libmd *md = msg->msg_md;
1628         unsigned int offset = msg->msg_offset;
1629         unsigned int shortest_distance;
1630         struct lnet_ni *ni = NULL;
1631         int best_credits;
1632         int best_healthv;
1633         __u32 best_sel_prio;
1634         unsigned int best_dev_prio;
1635         unsigned int dev_idx = UINT_MAX;
1636         struct page *page = lnet_get_first_page(md, offset);
1637         msg->msg_rdma_force = lnet_is_rdma_only_page(page);
1638
1639         if (msg->msg_rdma_force)
1640                 dev_idx = lnet_get_dev_idx(page);
1641
1642         /*
1643          * If there is no peer_ni that we can send to on this network,
1644          * then there is no point in looking for a new best_ni here.
1645         */
1646         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1647                 return best_ni;
1648
1649         if (best_ni == NULL) {
1650                 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1651                 shortest_distance = UINT_MAX;
1652                 best_dev_prio = UINT_MAX;
1653                 best_credits = INT_MIN;
1654                 best_healthv = 0;
1655         } else {
1656                 best_dev_prio = lnet_dev_prio_of_md(best_ni, dev_idx);
1657                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1658                                                      best_ni->ni_dev_cpt);
1659                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1660                 best_healthv = atomic_read(&best_ni->ni_healthv);
1661                 best_sel_prio = best_ni->ni_sel_priority;
1662         }
1663
1664         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1665                 unsigned int distance;
1666                 int ni_credits;
1667                 int ni_healthv;
1668                 int ni_fatal;
1669                 __u32 ni_sel_prio;
1670                 unsigned int ni_dev_prio;
1671
1672                 ni_credits = atomic_read(&ni->ni_tx_credits);
1673                 ni_healthv = atomic_read(&ni->ni_healthv);
1674                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1675                 ni_sel_prio = ni->ni_sel_priority;
1676
1677                 /*
1678                  * calculate the distance from the CPT on which
1679                  * the message memory is allocated to the CPT of
1680                  * the NI's physical device
1681                  */
1682                 distance = cfs_cpt_distance(lnet_cpt_table(),
1683                                             md_cpt,
1684                                             ni->ni_dev_cpt);
1685
1686                 ni_dev_prio = lnet_dev_prio_of_md(ni, dev_idx);
1687
1688                 /*
1689                  * All distances smaller than the NUMA range
1690                  * are treated equally.
1691                  */
1692                 if (distance < lnet_numa_range)
1693                         distance = lnet_numa_range;
1694
1695                 /*
1696                  * Select on health, selection policy, direct dma prio,
1697                  * shorter distance, available credits, then round-robin.
1698                  */
1699                 if (ni_fatal)
1700                         continue;
1701
1702                 if (best_ni)
1703                         CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d, p:%u, g:%u] with best_ni %s [c:%d, d:%d, s:%d, p:%u, g:%u]\n",
1704                                libcfs_nid2str(ni->ni_nid), ni_credits, distance,
1705                                ni->ni_seq, ni_sel_prio, ni_dev_prio,
1706                                (best_ni) ? libcfs_nid2str(best_ni->ni_nid)
1707                                : "not selected", best_credits, shortest_distance,
1708                                (best_ni) ? best_ni->ni_seq : 0,
1709                                best_sel_prio, best_dev_prio);
1710                 else
1711                         goto select_ni;
1712
1713                 if (ni_healthv < best_healthv)
1714                         continue;
1715                 else if (ni_healthv > best_healthv)
1716                         goto select_ni;
1717
1718                 if (ni_sel_prio > best_sel_prio)
1719                         continue;
1720                 else if (ni_sel_prio < best_sel_prio)
1721                         goto select_ni;
1722
1723                 if (ni_dev_prio > best_dev_prio)
1724                         continue;
1725                 else if (ni_dev_prio < best_dev_prio)
1726                         goto select_ni;
1727
1728                 if (distance > shortest_distance)
1729                         continue;
1730                 else if (distance < shortest_distance)
1731                         goto select_ni;
1732
1733                 if (ni_credits < best_credits)
1734                         continue;
1735                 else if (ni_credits > best_credits)
1736                         goto select_ni;
1737
1738                 if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1739                         continue;
1740
1741 select_ni:
1742                 best_sel_prio = ni_sel_prio;
1743                 best_dev_prio = ni_dev_prio;
1744                 shortest_distance = distance;
1745                 best_healthv = ni_healthv;
1746                 best_ni = ni;
1747                 best_credits = ni_credits;
1748         }
1749
1750         CDEBUG(D_NET, "selected best_ni %s\n",
1751                (best_ni) ? libcfs_nid2str(best_ni->ni_nid) : "no selection");
1752
1753         return best_ni;
1754 }
1755
1756 /*
1757  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1758  * because such traffic is required to perform discovery. We therefore
1759  * exclude all GET and PUT on that portal. We also exclude all ACK and
1760  * REPLY traffic, but that is because the portal is not tracked in the
1761  * message structure for these message types. We could restrict this
1762  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1763  */
1764 static bool
1765 lnet_msg_discovery(struct lnet_msg *msg)
1766 {
1767         if (msg->msg_type == LNET_MSG_PUT) {
1768                 if (msg->msg_hdr.msg.put.ptl_index != LNET_RESERVED_PORTAL)
1769                         return true;
1770         } else if (msg->msg_type == LNET_MSG_GET) {
1771                 if (msg->msg_hdr.msg.get.ptl_index != LNET_RESERVED_PORTAL)
1772                         return true;
1773         }
1774         return false;
1775 }
1776
1777 #define SRC_SPEC        0x0001
1778 #define SRC_ANY         0x0002
1779 #define LOCAL_DST       0x0004
1780 #define REMOTE_DST      0x0008
1781 #define MR_DST          0x0010
1782 #define NMR_DST         0x0020
1783 #define SND_RESP        0x0040
1784
1785 /* The following to defines are used for return codes */
1786 #define REPEAT_SEND     0x1000
1787 #define PASS_THROUGH    0x2000
1788
1789 /* The different cases lnet_select pathway needs to handle */
1790 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1791 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1792 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1793 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1794 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1795 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1796 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1797 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1798
1799 static int
1800 lnet_handle_lo_send(struct lnet_send_data *sd)
1801 {
1802         struct lnet_msg *msg = sd->sd_msg;
1803         int cpt = sd->sd_cpt;
1804
1805         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1806                 return -ESHUTDOWN;
1807
1808         /* No send credit hassles with LOLND */
1809         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1810         msg->msg_hdr.dest_nid = cpu_to_le64(the_lnet.ln_loni->ni_nid);
1811         if (!msg->msg_routing)
1812                 msg->msg_hdr.src_nid =
1813                         cpu_to_le64(the_lnet.ln_loni->ni_nid);
1814         msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
1815         lnet_msg_commit(msg, cpt);
1816         msg->msg_txni = the_lnet.ln_loni;
1817
1818         return LNET_CREDIT_OK;
1819 }
1820
1821 static int
1822 lnet_handle_send(struct lnet_send_data *sd)
1823 {
1824         struct lnet_ni *best_ni = sd->sd_best_ni;
1825         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1826         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1827         struct lnet_msg *msg = sd->sd_msg;
1828         int cpt2;
1829         __u32 send_case = sd->sd_send_case;
1830         int rc;
1831         __u32 routing = send_case & REMOTE_DST;
1832          struct lnet_rsp_tracker *rspt;
1833
1834         /* Increment sequence number of the selected peer, peer net,
1835          * local ni and local net so that we pick the next ones
1836          * in Round Robin.
1837          */
1838         best_lpni->lpni_seq++;
1839         best_lpni->lpni_peer_net->lpn_seq++;
1840         best_ni->ni_seq++;
1841         best_ni->ni_net->net_seq++;
1842
1843         CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n",
1844                libcfs_nid2str(best_ni->ni_nid),
1845                best_ni->ni_seq, best_ni->ni_net->net_seq,
1846                atomic_read(&best_ni->ni_tx_credits),
1847                best_ni->ni_sel_priority,
1848                libcfs_nid2str(best_lpni->lpni_nid),
1849                best_lpni->lpni_seq, best_lpni->lpni_peer_net->lpn_seq,
1850                best_lpni->lpni_txcredits,
1851                best_lpni->lpni_sel_priority);
1852
1853         /*
1854          * grab a reference on the peer_ni so it sticks around even if
1855          * we need to drop and relock the lnet_net_lock below.
1856          */
1857         lnet_peer_ni_addref_locked(best_lpni);
1858
1859         /*
1860          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1861          * message. This ensures that we get a CPT that is correct for
1862          * the NI when the NI has been restricted to a subset of all CPTs.
1863          * If the selected CPT differs from the one currently locked, we
1864          * must unlock and relock the lnet_net_lock(), and then check whether
1865          * the configuration has changed. We don't have a hold on the best_ni
1866          * yet, and it may have vanished.
1867          */
1868         cpt2 = lnet_cpt_of_nid_locked(best_lpni->lpni_nid, best_ni);
1869         if (sd->sd_cpt != cpt2) {
1870                 __u32 seq = lnet_get_dlc_seq_locked();
1871                 lnet_net_unlock(sd->sd_cpt);
1872                 sd->sd_cpt = cpt2;
1873                 lnet_net_lock(sd->sd_cpt);
1874                 if (seq != lnet_get_dlc_seq_locked()) {
1875                         lnet_peer_ni_decref_locked(best_lpni);
1876                         return REPEAT_SEND;
1877                 }
1878         }
1879
1880         /*
1881          * store the best_lpni in the message right away to avoid having
1882          * to do the same operation under different conditions
1883          */
1884         msg->msg_txpeer = best_lpni;
1885         msg->msg_txni = best_ni;
1886
1887         /*
1888          * grab a reference for the best_ni since now it's in use in this
1889          * send. The reference will be dropped in lnet_finalize()
1890          */
1891         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1892
1893         /*
1894          * Always set the target.nid to the best peer picked. Either the
1895          * NID will be one of the peer NIDs selected, or the same NID as
1896          * what was originally set in the target or it will be the NID of
1897          * a router if this message should be routed
1898          */
1899         msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1900
1901         /*
1902          * lnet_msg_commit assigns the correct cpt to the message, which
1903          * is used to decrement the correct refcount on the ni when it's
1904          * time to return the credits
1905          */
1906         lnet_msg_commit(msg, sd->sd_cpt);
1907
1908         /*
1909          * If we are routing the message then we keep the src_nid that was
1910          * set by the originator. If we are not routing then we are the
1911          * originator and set it here.
1912          */
1913         if (!msg->msg_routing)
1914                 msg->msg_hdr.src_nid = cpu_to_le64(msg->msg_txni->ni_nid);
1915
1916         if (routing) {
1917                 msg->msg_target_is_router = 1;
1918                 msg->msg_target.pid = LNET_PID_LUSTRE;
1919                 /*
1920                  * since we're routing we want to ensure that the
1921                  * msg_hdr.dest_nid is set to the final destination. When
1922                  * the router receives this message it knows how to route
1923                  * it.
1924                  *
1925                  * final_dst_lpni is set at the beginning of the
1926                  * lnet_select_pathway() function and is never changed.
1927                  * It's safe to use it here.
1928                  */
1929                 msg->msg_hdr.dest_nid = cpu_to_le64(final_dst_lpni->lpni_nid);
1930         } else {
1931                 /*
1932                  * if we're not routing set the dest_nid to the best peer
1933                  * ni NID that we picked earlier in the algorithm.
1934                  */
1935                 msg->msg_hdr.dest_nid = cpu_to_le64(msg->msg_txpeer->lpni_nid);
1936         }
1937
1938         /*
1939          * if we have response tracker block update it with the next hop
1940          * nid
1941          */
1942         if (msg->msg_md) {
1943                 rspt = msg->msg_md->md_rspt_ptr;
1944                 if (rspt) {
1945                         rspt->rspt_next_hop_nid = msg->msg_txpeer->lpni_nid;
1946                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1947                                libcfs_nid2str(rspt->rspt_next_hop_nid));
1948                 }
1949         }
1950
1951         rc = lnet_post_send_locked(msg, 0);
1952
1953         if (!rc)
1954                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
1955                        libcfs_nid2str(msg->msg_hdr.src_nid),
1956                        libcfs_nid2str(msg->msg_txni->ni_nid),
1957                        libcfs_nid2str(sd->sd_src_nid),
1958                        libcfs_nid2str(msg->msg_hdr.dest_nid),
1959                        libcfs_nid2str(sd->sd_dst_nid),
1960                        libcfs_nid2str(msg->msg_txpeer->lpni_nid),
1961                        libcfs_nid2str(sd->sd_rtr_nid),
1962                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
1963
1964         return rc;
1965 }
1966
1967 static inline void
1968 lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni,
1969                          struct lnet_msg *msg)
1970 {
1971         if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) &&
1972             !lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) {
1973                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
1974                        libcfs_nid2str(lni->ni_nid),
1975                        libcfs_nid2str(lpni->lpni_nid));
1976                 lnet_peer_ni_set_non_mr_pref_nid(lpni, lni->ni_nid);
1977         }
1978 }
1979
1980 /*
1981  * Source Specified
1982  * Local Destination
1983  * non-mr peer
1984  *
1985  * use the source and destination NIDs as the pathway
1986  */
1987 static int
1988 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
1989 {
1990         /* the destination lpni is set before we get here. */
1991
1992         /* find local NI */
1993         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
1994         if (!sd->sd_best_ni) {
1995                 CERROR("Can't send to %s: src %s is not a "
1996                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
1997                                 libcfs_nid2str(sd->sd_src_nid));
1998                 return -EINVAL;
1999         }
2000
2001         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2002
2003         return lnet_handle_send(sd);
2004 }
2005
2006 /*
2007  * Source Specified
2008  * Local Destination
2009  * MR Peer
2010  *
2011  * Don't run the selection algorithm on the peer NIs. By specifying the
2012  * local NID, we're also saying that we should always use the destination NID
2013  * provided. This handles the case where we should be using the same
2014  * destination NID for the all the messages which belong to the same RPC
2015  * request.
2016  */
2017 static int
2018 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
2019 {
2020         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
2021         if (!sd->sd_best_ni) {
2022                 CERROR("Can't send to %s: src %s is not a "
2023                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
2024                                 libcfs_nid2str(sd->sd_src_nid));
2025                 return -EINVAL;
2026         }
2027
2028         if (sd->sd_best_lpni &&
2029             sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid)
2030                 return lnet_handle_lo_send(sd);
2031         else if (sd->sd_best_lpni)
2032                 return lnet_handle_send(sd);
2033
2034         CERROR("can't send to %s. no NI on %s\n",
2035                libcfs_nid2str(sd->sd_dst_nid),
2036                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
2037
2038         return -EHOSTUNREACH;
2039 }
2040
2041 struct lnet_ni *
2042 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
2043                               struct lnet_peer *peer,
2044                               struct lnet_peer_net *peer_net,
2045                               struct lnet_msg *msg,
2046                               int cpt)
2047 {
2048         struct lnet_net *local_net;
2049         struct lnet_ni *best_ni;
2050
2051         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
2052         if (!local_net)
2053                 return NULL;
2054
2055         /*
2056          * Iterate through the NIs in this local Net and select
2057          * the NI to send from. The selection is determined by
2058          * these 3 criterion in the following priority:
2059          *      1. NUMA
2060          *      2. NI available credits
2061          *      3. Round Robin
2062          */
2063         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
2064                                    peer, peer_net, msg, cpt);
2065
2066         return best_ni;
2067 }
2068
2069 static int
2070 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg,
2071                              int cpt)
2072 {
2073         struct lnet_peer *peer;
2074         struct lnet_peer_ni *new_lpni;
2075         int rc;
2076
2077         lnet_peer_ni_addref_locked(lpni);
2078
2079         peer = lpni->lpni_peer_net->lpn_peer;
2080
2081         if (lnet_peer_gw_discovery(peer)) {
2082                 lnet_peer_ni_decref_locked(lpni);
2083                 return 0;
2084         }
2085
2086         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
2087                 lnet_peer_ni_decref_locked(lpni);
2088                 return 0;
2089         }
2090
2091         rc = lnet_discover_peer_locked(lpni, cpt, false);
2092         if (rc) {
2093                 lnet_peer_ni_decref_locked(lpni);
2094                 return rc;
2095         }
2096
2097         new_lpni = lnet_find_peer_ni_locked(lpni->lpni_nid);
2098         if (!new_lpni) {
2099                 lnet_peer_ni_decref_locked(lpni);
2100                 return -ENOENT;
2101         }
2102
2103         peer = new_lpni->lpni_peer_net->lpn_peer;
2104         spin_lock(&peer->lp_lock);
2105         if (lpni == new_lpni && lnet_peer_is_uptodate_locked(peer)) {
2106                 /* The peer NI did not change and the peer is up to date.
2107                  * Nothing more to do.
2108                  */
2109                 spin_unlock(&peer->lp_lock);
2110                 lnet_peer_ni_decref_locked(lpni);
2111                 lnet_peer_ni_decref_locked(new_lpni);
2112                 return 0;
2113         }
2114         spin_unlock(&peer->lp_lock);
2115
2116         /* Either the peer NI changed during discovery, or the peer isn't up
2117          * to date. In both cases we want to queue the message on the
2118          * (possibly new) peer's pending queue and queue the peer for discovery
2119          */
2120         msg->msg_sending = 0;
2121         msg->msg_txpeer = NULL;
2122         lnet_net_unlock(cpt);
2123         lnet_peer_queue_message(peer, msg);
2124         lnet_net_lock(cpt);
2125
2126         lnet_peer_ni_decref_locked(lpni);
2127         lnet_peer_ni_decref_locked(new_lpni);
2128
2129         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2130                msg, libcfs_nid2str(peer->lp_primary_nid));
2131
2132         return LNET_DC_WAIT;
2133 }
2134
2135 static int
2136 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2137                              lnet_nid_t dst_nid,
2138                              struct lnet_peer_ni **gw_lpni,
2139                              struct lnet_peer **gw_peer)
2140 {
2141         int rc;
2142         struct lnet_peer *gw;
2143         struct lnet_peer *lp;
2144         struct lnet_peer_net *lpn;
2145         struct lnet_peer_net *best_lpn = NULL;
2146         struct lnet_remotenet *rnet, *best_rnet = NULL;
2147         struct lnet_route *best_route = NULL;
2148         struct lnet_route *last_route = NULL;
2149         struct lnet_peer_ni *lpni = NULL;
2150         struct lnet_peer_ni *gwni = NULL;
2151         bool route_found = false;
2152         lnet_nid_t src_nid = (sd->sd_src_nid != LNET_NID_ANY) ? sd->sd_src_nid :
2153                 (sd->sd_best_ni != NULL) ? sd->sd_best_ni->ni_nid :
2154                 LNET_NID_ANY;
2155         int best_lpn_healthv = 0;
2156         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2157
2158         CDEBUG(D_NET, "using src nid %s for route restriction\n",
2159                libcfs_nid2str(src_nid));
2160
2161         /* If a router nid was specified then we are replying to a GET or
2162          * sending an ACK. In this case we use the gateway associated with the
2163          * specified router nid.
2164          */
2165         if (sd->sd_rtr_nid != LNET_NID_ANY) {
2166                 gwni = lnet_find_peer_ni_locked(sd->sd_rtr_nid);
2167                 if (gwni) {
2168                         gw = gwni->lpni_peer_net->lpn_peer;
2169                         lnet_peer_ni_decref_locked(gwni);
2170                         if (gw->lp_rtr_refcount)
2171                                 route_found = true;
2172                 } else {
2173                         CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n",
2174                                libcfs_nid2str(sd->sd_rtr_nid));
2175                 }
2176         }
2177
2178         if (!route_found) {
2179                 if (sd->sd_msg->msg_routing) {
2180                         /* If I'm routing this message then I need to find the
2181                          * next hop based on the destination NID
2182                          */
2183                         best_rnet = lnet_find_rnet_locked(LNET_NIDNET(sd->sd_dst_nid));
2184                         if (!best_rnet) {
2185                                 CERROR("Unable to route message to %s - Route table may be misconfigured\n",
2186                                        libcfs_nid2str(sd->sd_dst_nid));
2187                                 return -EHOSTUNREACH;
2188                         }
2189                 } else {
2190                         /* we've already looked up the initial lpni using
2191                          * dst_nid
2192                          */
2193                         lpni = sd->sd_best_lpni;
2194                         /* the peer tree must be in existence */
2195                         LASSERT(lpni && lpni->lpni_peer_net &&
2196                                 lpni->lpni_peer_net->lpn_peer);
2197                         lp = lpni->lpni_peer_net->lpn_peer;
2198
2199                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2200                                 /* is this remote network reachable?  */
2201                                 rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2202                                 if (!rnet)
2203                                         continue;
2204
2205                                 if (!best_lpn) {
2206                                         best_lpn = lpn;
2207                                         best_rnet = rnet;
2208                                 }
2209
2210                                 /* select the preferred peer net */
2211                                 if (best_lpn_healthv > lpn->lpn_healthv)
2212                                         continue;
2213                                 else if (best_lpn_healthv < lpn->lpn_healthv)
2214                                         goto use_lpn;
2215
2216                                 if (best_lpn_sel_prio < lpn->lpn_sel_priority)
2217                                         continue;
2218                                 else if (best_lpn_sel_prio > lpn->lpn_sel_priority)
2219                                         goto use_lpn;
2220
2221                                 if (best_lpn->lpn_seq <= lpn->lpn_seq)
2222                                         continue;
2223 use_lpn:
2224                                 best_lpn_healthv = lpn->lpn_healthv;
2225                                 best_lpn_sel_prio = lpn->lpn_sel_priority;
2226                                 best_lpn = lpn;
2227                                 best_rnet = rnet;
2228                         }
2229
2230                         if (!best_lpn) {
2231                                 CERROR("peer %s has no available nets\n",
2232                                        libcfs_nid2str(sd->sd_dst_nid));
2233                                 return -EHOSTUNREACH;
2234                         }
2235
2236                         sd->sd_best_lpni = lnet_find_best_lpni(sd->sd_best_ni,
2237                                                                sd->sd_dst_nid,
2238                                                                lp,
2239                                                                best_lpn->lpn_net_id);
2240                         if (!sd->sd_best_lpni) {
2241                                 CERROR("peer %s is unreachable\n",
2242                                        libcfs_nid2str(sd->sd_dst_nid));
2243                                 return -EHOSTUNREACH;
2244                         }
2245
2246                         /* We're attempting to round robin over the remote peer
2247                          * NI's so update the final destination we selected
2248                          */
2249                         sd->sd_final_dst_lpni = sd->sd_best_lpni;
2250
2251                         /* Increment the sequence number of the remote lpni so
2252                          * we can round robin over the different interfaces of
2253                          * the remote lpni
2254                          */
2255                         sd->sd_best_lpni->lpni_seq++;
2256                 }
2257
2258                 /*
2259                  * find the best route. Restrict the selection on the net of the
2260                  * local NI if we've already picked the local NI to send from.
2261                  * Otherwise, let's pick any route we can find and then find
2262                  * a local NI we can reach the route's gateway on. Any route we
2263                  * select will be reachable by virtue of the restriction we have
2264                  * when adding a route.
2265                  */
2266                 best_route = lnet_find_route_locked(best_rnet,
2267                                                     LNET_NIDNET(src_nid),
2268                                                     sd->sd_best_lpni,
2269                                                     &last_route, &gwni);
2270
2271                 if (!best_route) {
2272                         CERROR("no route to %s from %s\n",
2273                                libcfs_nid2str(dst_nid),
2274                                libcfs_nid2str(src_nid));
2275                         return -EHOSTUNREACH;
2276                 }
2277
2278                 if (!gwni) {
2279                         CERROR("Internal Error. Route expected to %s from %s\n",
2280                                libcfs_nid2str(dst_nid),
2281                                libcfs_nid2str(src_nid));
2282                         return -EFAULT;
2283                 }
2284
2285                 gw = best_route->lr_gateway;
2286                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2287         }
2288
2289         /*
2290          * Discover this gateway if it hasn't already been discovered.
2291          * This means we might delay the message until discovery has
2292          * completed
2293          */
2294         rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt);
2295         if (rc)
2296                 return rc;
2297
2298         if (!sd->sd_best_ni) {
2299                 lpn = gwni->lpni_peer_net;
2300                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw, lpn,
2301                                                                sd->sd_msg,
2302                                                                sd->sd_md_cpt);
2303                 if (!sd->sd_best_ni) {
2304                         CERROR("Internal Error. Expected local ni on %s but non found: %s\n",
2305                                libcfs_net2str(lpn->lpn_net_id),
2306                                libcfs_nid2str(sd->sd_src_nid));
2307                         return -EFAULT;
2308                 }
2309         }
2310
2311         *gw_lpni = gwni;
2312         *gw_peer = gw;
2313
2314         /*
2315          * increment the sequence numbers since now we're sure we're
2316          * going to use this path
2317          */
2318         if (sd->sd_rtr_nid == LNET_NID_ANY) {
2319                 LASSERT(best_route && last_route);
2320                 best_route->lr_seq = last_route->lr_seq + 1;
2321                 if (best_lpn)
2322                         best_lpn->lpn_seq++;
2323         }
2324
2325         return 0;
2326 }
2327
2328 /*
2329  * Handle two cases:
2330  *
2331  * Case 1:
2332  *  Source specified
2333  *  Remote destination
2334  *  Non-MR destination
2335  *
2336  * Case 2:
2337  *  Source specified
2338  *  Remote destination
2339  *  MR destination
2340  *
2341  * The handling of these two cases is similar. Even though the destination
2342  * can be MR or non-MR, we'll deal directly with the router.
2343  */
2344 static int
2345 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2346 {
2347         int rc;
2348         struct lnet_peer_ni *gw_lpni = NULL;
2349         struct lnet_peer *gw_peer = NULL;
2350
2351         /* find local NI */
2352         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
2353         if (!sd->sd_best_ni) {
2354                 CERROR("Can't send to %s: src %s is not a "
2355                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
2356                                 libcfs_nid2str(sd->sd_src_nid));
2357                 return -EINVAL;
2358         }
2359
2360         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2361                                      &gw_peer);
2362         if (rc)
2363                 return rc;
2364
2365         if (sd->sd_send_case & NMR_DST)
2366                 /*
2367                  * since the final destination is non-MR let's set its preferred
2368                  * NID before we send
2369                  */
2370                 lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni,
2371                                          sd->sd_msg);
2372
2373         /*
2374          * We're going to send to the gw found so let's set its
2375          * info
2376          */
2377         sd->sd_peer = gw_peer;
2378         sd->sd_best_lpni = gw_lpni;
2379
2380         return lnet_handle_send(sd);
2381 }
2382
2383 struct lnet_ni *
2384 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2385                                struct lnet_msg *msg, bool discovery)
2386 {
2387         struct lnet_peer_net *lpn = NULL;
2388         struct lnet_peer_net *best_lpn = NULL;
2389         struct lnet_net *net = NULL;
2390         struct lnet_net *best_net = NULL;
2391         struct lnet_ni *best_ni = NULL;
2392         int best_lpn_healthv = 0;
2393         int best_net_healthv = 0;
2394         int net_healthv;
2395         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2396         __u32 lpn_sel_prio;
2397         __u32 best_net_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2398         __u32 net_sel_prio;
2399         bool exit = false;
2400
2401         /*
2402          * The peer can have multiple interfaces, some of them can be on
2403          * the local network and others on a routed network. We should
2404          * prefer the local network. However if the local network is not
2405          * available then we need to try the routed network
2406          */
2407
2408         /* go through all the peer nets and find the best_ni */
2409         list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
2410                 /*
2411                  * The peer's list of nets can contain non-local nets. We
2412                  * want to only examine the local ones.
2413                  */
2414                 net = lnet_get_net_locked(lpn->lpn_net_id);
2415                 if (!net)
2416                         continue;
2417
2418                 lpn_sel_prio = lpn->lpn_sel_priority;
2419                 net_healthv = lnet_get_net_healthv_locked(net);
2420                 net_sel_prio = net->net_sel_priority;
2421
2422                 /*
2423                  * if this is a discovery message and lp_disc_net_id is
2424                  * specified then use that net to send the discovery on.
2425                  */
2426                 if (peer->lp_disc_net_id == lpn->lpn_net_id &&
2427                     discovery) {
2428                         exit = true;
2429                         goto select_lpn;
2430                 }
2431
2432                 if (!best_lpn)
2433                         goto select_lpn;
2434
2435                 /* always select the lpn with the best health */
2436                 if (best_lpn_healthv > lpn->lpn_healthv)
2437                         continue;
2438                 else if (best_lpn_healthv < lpn->lpn_healthv)
2439                         goto select_lpn;
2440
2441                 /* select the preferred peer and local nets */
2442                 if (best_lpn_sel_prio < lpn_sel_prio)
2443                         continue;
2444                 else if (best_lpn_sel_prio > lpn_sel_prio)
2445                         goto select_lpn;
2446
2447                 if (best_net_healthv > net_healthv)
2448                         continue;
2449                 else if (best_net_healthv < net_healthv)
2450                         goto select_lpn;
2451
2452                 if (best_net_sel_prio < net_sel_prio)
2453                         continue;
2454                 else if (best_net_sel_prio > net_sel_prio)
2455                         goto select_lpn;
2456
2457                 if (best_lpn->lpn_seq < lpn->lpn_seq)
2458                         continue;
2459                 else if (best_lpn->lpn_seq > lpn->lpn_seq)
2460                         goto select_lpn;
2461
2462                 /* round robin over the local networks */
2463                 if (best_net->net_seq <= net->net_seq)
2464                         continue;
2465
2466 select_lpn:
2467                 best_net_healthv = net_healthv;
2468                 best_net_sel_prio = net_sel_prio;
2469                 best_lpn_healthv = lpn->lpn_healthv;
2470                 best_lpn_sel_prio = lpn_sel_prio;
2471                 best_lpn = lpn;
2472                 best_net = net;
2473
2474                 if (exit)
2475                         break;
2476         }
2477
2478         if (best_lpn) {
2479                 /* Select the best NI on the same net as best_lpn chosen
2480                  * above
2481                  */
2482                 best_ni = lnet_find_best_ni_on_spec_net(NULL, peer, best_lpn,
2483                                                         msg, md_cpt);
2484         }
2485
2486         return best_ni;
2487 }
2488
2489 static struct lnet_ni *
2490 lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt)
2491 {
2492         struct lnet_ni *best_ni = NULL;
2493         struct lnet_peer_net *peer_net = lpni->lpni_peer_net;
2494         struct lnet_peer_ni *lpni_entry;
2495
2496         /*
2497          * We must use a consistent source address when sending to a
2498          * non-MR peer. However, a non-MR peer can have multiple NIDs
2499          * on multiple networks, and we may even need to talk to this
2500          * peer on multiple networks -- certain types of
2501          * load-balancing configuration do this.
2502          *
2503          * So we need to pick the NI the peer prefers for this
2504          * particular network.
2505          */
2506         LASSERT(peer_net);
2507         list_for_each_entry(lpni_entry, &peer_net->lpn_peer_nis,
2508                             lpni_peer_nis) {
2509                 if (lpni_entry->lpni_pref_nnids == 0)
2510                         continue;
2511                 LASSERT(lpni_entry->lpni_pref_nnids == 1);
2512                 best_ni = lnet_nid2ni_locked(lpni_entry->lpni_pref.nid, cpt);
2513                 break;
2514         }
2515
2516         return best_ni;
2517 }
2518
2519 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2520 static int
2521 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2522 {
2523         struct lnet_ni *best_ni = NULL;
2524         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
2525
2526         /*
2527          * We must use a consistent source address when sending to a
2528          * non-MR peer. However, a non-MR peer can have multiple NIDs
2529          * on multiple networks, and we may even need to talk to this
2530          * peer on multiple networks -- certain types of
2531          * load-balancing configuration do this.
2532          *
2533          * So we need to pick the NI the peer prefers for this
2534          * particular network.
2535          */
2536
2537         best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2538                                                        sd->sd_cpt);
2539
2540         /* if best_ni is still not set just pick one */
2541         if (!best_ni) {
2542                 best_ni =
2543                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2544                                                 sd->sd_best_lpni->lpni_peer_net,
2545                                                 sd->sd_msg,
2546                                                 sd->sd_md_cpt);
2547                 /* If there is no best_ni we don't have a route */
2548                 if (!best_ni) {
2549                         CERROR("no path to %s from net %s\n",
2550                                 libcfs_nid2str(best_lpni->lpni_nid),
2551                                 libcfs_net2str(best_lpni->lpni_net->net_id));
2552                         return -EHOSTUNREACH;
2553                 }
2554         }
2555
2556         sd->sd_best_ni = best_ni;
2557
2558         /* Set preferred NI if necessary. */
2559         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2560
2561         return 0;
2562 }
2563
2564
2565 /*
2566  * Source not specified
2567  * Local destination
2568  * Non-MR Peer
2569  *
2570  * always use the same source NID for NMR peers
2571  * If we've talked to that peer before then we already have a preferred
2572  * source NI associated with it. Otherwise, we select a preferred local NI
2573  * and store it in the peer
2574  */
2575 static int
2576 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2577 {
2578         int rc = 0;
2579
2580         /* sd->sd_best_lpni is already set to the final destination */
2581
2582         /*
2583          * At this point we should've created the peer ni and peer. If we
2584          * can't find it, then something went wrong. Instead of assert
2585          * output a relevant message and fail the send
2586          */
2587         if (!sd->sd_best_lpni) {
2588                 CERROR("Internal fault. Unable to send msg %s to %s. "
2589                        "NID not known\n",
2590                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2591                        libcfs_nid2str(sd->sd_dst_nid));
2592                 return -EFAULT;
2593         }
2594
2595         if (sd->sd_msg->msg_routing) {
2596                 /* If I'm forwarding this message then I can choose any NI
2597                  * on the destination peer net
2598                  */
2599                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL,
2600                                                                sd->sd_peer,
2601                                                                sd->sd_best_lpni->lpni_peer_net,
2602                                                                sd->sd_msg,
2603                                                                sd->sd_md_cpt);
2604                 if (!sd->sd_best_ni) {
2605                         CERROR("Unable to forward message to %s. No local NI available\n",
2606                                libcfs_nid2str(sd->sd_dst_nid));
2607                         rc = -EHOSTUNREACH;
2608                 }
2609         } else
2610                 rc = lnet_select_preferred_best_ni(sd);
2611
2612         if (!rc)
2613                 rc = lnet_handle_send(sd);
2614
2615         return rc;
2616 }
2617
2618 static int
2619 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2620 {
2621         /*
2622          * NOTE we've already handled the remote peer case. So we only
2623          * need to worry about the local case here.
2624          *
2625          * if we're sending a response, ACK or reply, we need to send it
2626          * to the destination NID given to us. At this point we already
2627          * have the peer_ni we're suppose to send to, so just find the
2628          * best_ni on the peer net and use that. Since we're sending to an
2629          * MR peer then we can just run the selection algorithm on our
2630          * local NIs and pick the best one.
2631          */
2632         if (sd->sd_send_case & SND_RESP) {
2633                 sd->sd_best_ni =
2634                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2635                                                 sd->sd_best_lpni->lpni_peer_net,
2636                                                 sd->sd_msg,
2637                                                 sd->sd_md_cpt);
2638
2639                 if (!sd->sd_best_ni) {
2640                         /*
2641                          * We're not going to deal with not able to send
2642                          * a response to the provided final destination
2643                          */
2644                         CERROR("Can't send response to %s. "
2645                                "No local NI available\n",
2646                                 libcfs_nid2str(sd->sd_dst_nid));
2647                         return -EHOSTUNREACH;
2648                 }
2649
2650                 return lnet_handle_send(sd);
2651         }
2652
2653         /*
2654          * If we get here that means we're sending a fresh request, PUT or
2655          * GET, so we need to run our standard selection algorithm.
2656          * First find the best local interface that's on any of the peer's
2657          * networks.
2658          */
2659         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2660                                         sd->sd_md_cpt,
2661                                         sd->sd_msg,
2662                                         lnet_msg_discovery(sd->sd_msg));
2663         if (sd->sd_best_ni) {
2664                 sd->sd_best_lpni =
2665                   lnet_find_best_lpni(sd->sd_best_ni, sd->sd_dst_nid,
2666                                       sd->sd_peer,
2667                                       sd->sd_best_ni->ni_net->net_id);
2668
2669                 /*
2670                  * if we're successful in selecting a peer_ni on the local
2671                  * network, then send to it. Otherwise fall through and
2672                  * try and see if we can reach it over another routed
2673                  * network
2674                  */
2675                 if (sd->sd_best_lpni &&
2676                     sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid) {
2677                         /*
2678                          * in case we initially started with a routed
2679                          * destination, let's reset to local
2680                          */
2681                         sd->sd_send_case &= ~REMOTE_DST;
2682                         sd->sd_send_case |= LOCAL_DST;
2683                         return lnet_handle_lo_send(sd);
2684                 } else if (sd->sd_best_lpni) {
2685                         /*
2686                          * in case we initially started with a routed
2687                          * destination, let's reset to local
2688                          */
2689                         sd->sd_send_case &= ~REMOTE_DST;
2690                         sd->sd_send_case |= LOCAL_DST;
2691                         return lnet_handle_send(sd);
2692                 }
2693
2694                 CERROR("Internal Error. Expected to have a best_lpni: "
2695                        "%s -> %s\n",
2696                        libcfs_nid2str(sd->sd_src_nid),
2697                        libcfs_nid2str(sd->sd_dst_nid));
2698
2699                 return -EFAULT;
2700         }
2701
2702         /*
2703          * Peer doesn't have a local network. Let's see if there is
2704          * a remote network we can reach it on.
2705          */
2706         return PASS_THROUGH;
2707 }
2708
2709 /*
2710  * Case 1:
2711  *      Source NID not specified
2712  *      Local destination
2713  *      MR peer
2714  *
2715  * Case 2:
2716  *      Source NID not speified
2717  *      Remote destination
2718  *      MR peer
2719  *
2720  * In both of these cases if we're sending a response, ACK or REPLY, then
2721  * we need to send to the destination NID provided.
2722  *
2723  * In the remote case let's deal with MR routers.
2724  *
2725  */
2726
2727 static int
2728 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2729 {
2730         int rc = 0;
2731         struct lnet_peer *gw_peer = NULL;
2732         struct lnet_peer_ni *gw_lpni = NULL;
2733
2734         /*
2735          * handle sending a response to a remote peer here so we don't
2736          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2737          */
2738         if (sd->sd_send_case & REMOTE_DST &&
2739             sd->sd_send_case & SND_RESP) {
2740                 struct lnet_peer_ni *gw;
2741                 struct lnet_peer *gw_peer;
2742
2743                 rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw,
2744                                                   &gw_peer);
2745                 if (rc < 0) {
2746                         CERROR("Can't send response to %s. "
2747                                "No route available\n",
2748                                 libcfs_nid2str(sd->sd_dst_nid));
2749                         return -EHOSTUNREACH;
2750                 } else if (rc > 0) {
2751                         return rc;
2752                 }
2753
2754                 sd->sd_best_lpni = gw;
2755                 sd->sd_peer = gw_peer;
2756
2757                 return lnet_handle_send(sd);
2758         }
2759
2760         /*
2761          * Even though the NID for the peer might not be on a local network,
2762          * since the peer is MR there could be other interfaces on the
2763          * local network. In that case we'd still like to prefer the local
2764          * network over the routed network. If we're unable to do that
2765          * then we select the best router among the different routed networks,
2766          * and if the router is MR then we can deal with it as such.
2767          */
2768         rc = lnet_handle_any_mr_dsta(sd);
2769         if (rc != PASS_THROUGH)
2770                 return rc;
2771
2772         /*
2773          * Now that we must route to the destination, we must consider the
2774          * MR case, where the destination has multiple interfaces, some of
2775          * which we can route to and others we do not. For this reason we
2776          * need to select the destination which we can route to and if
2777          * there are multiple, we need to round robin.
2778          */
2779         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2780                                           &gw_peer);
2781         if (rc)
2782                 return rc;
2783
2784         sd->sd_send_case &= ~LOCAL_DST;
2785         sd->sd_send_case |= REMOTE_DST;
2786
2787         sd->sd_peer = gw_peer;
2788         sd->sd_best_lpni = gw_lpni;
2789
2790         return lnet_handle_send(sd);
2791 }
2792
2793 /*
2794  * Source not specified
2795  * Remote destination
2796  * Non-MR peer
2797  *
2798  * Must send to the specified peer NID using the same source NID that
2799  * we've used before. If it's the first time to talk to that peer then
2800  * find the source NI and assign it as preferred to that peer
2801  */
2802 static int
2803 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2804 {
2805         int rc;
2806         struct lnet_peer_ni *gw_lpni = NULL;
2807         struct lnet_peer *gw_peer = NULL;
2808
2809         /*
2810          * Let's see if we have a preferred NI to talk to this NMR peer
2811          */
2812         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2813                                                               sd->sd_cpt);
2814
2815         /*
2816          * find the router and that'll find the best NI if we didn't find
2817          * it already.
2818          */
2819         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2820                                           &gw_peer);
2821         if (rc)
2822                 return rc;
2823
2824         /*
2825          * set the best_ni we've chosen as the preferred one for
2826          * this peer
2827          */
2828         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2829
2830         /* we'll be sending to the gw */
2831         sd->sd_best_lpni = gw_lpni;
2832         sd->sd_peer = gw_peer;
2833
2834         return lnet_handle_send(sd);
2835 }
2836
2837 static int
2838 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2839 {
2840         /*
2841          * turn off the SND_RESP bit.
2842          * It will be checked in the case handling
2843          */
2844         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2845
2846         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2847                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2848                 (send_case & SRC_SPEC) ? libcfs_nid2str(sd->sd_src_nid) : "",
2849                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2850                 libcfs_nid2str(sd->sd_dst_nid),
2851                 (send_case & LOCAL_DST) ? "local" : "routed");
2852
2853         switch (send_case) {
2854         /*
2855          * For all cases where the source is specified, we should always
2856          * use the destination NID, whether it's an MR destination or not,
2857          * since we're continuing a series of related messages for the
2858          * same RPC
2859          */
2860         case SRC_SPEC_LOCAL_NMR_DST:
2861                 return lnet_handle_spec_local_nmr_dst(sd);
2862         case SRC_SPEC_LOCAL_MR_DST:
2863                 return lnet_handle_spec_local_mr_dst(sd);
2864         case SRC_SPEC_ROUTER_NMR_DST:
2865         case SRC_SPEC_ROUTER_MR_DST:
2866                 return lnet_handle_spec_router_dst(sd);
2867         case SRC_ANY_LOCAL_NMR_DST:
2868                 return lnet_handle_any_local_nmr_dst(sd);
2869         case SRC_ANY_LOCAL_MR_DST:
2870         case SRC_ANY_ROUTER_MR_DST:
2871                 return lnet_handle_any_mr_dst(sd);
2872         case SRC_ANY_ROUTER_NMR_DST:
2873                 return lnet_handle_any_router_nmr_dst(sd);
2874         default:
2875                 CERROR("Unknown send case\n");
2876                 return -1;
2877         }
2878 }
2879
2880 static int
2881 lnet_select_pathway(lnet_nid_t src_nid, lnet_nid_t dst_nid,
2882                     struct lnet_msg *msg, lnet_nid_t rtr_nid)
2883 {
2884         struct lnet_peer_ni *lpni;
2885         struct lnet_peer *peer;
2886         struct lnet_send_data send_data;
2887         int cpt, rc;
2888         int md_cpt;
2889         __u32 send_case = 0;
2890         bool final_hop;
2891         bool mr_forwarding_allowed;
2892
2893         memset(&send_data, 0, sizeof(send_data));
2894
2895         /*
2896          * get an initial CPT to use for locking. The idea here is not to
2897          * serialize the calls to select_pathway, so that as many
2898          * operations can run concurrently as possible. To do that we use
2899          * the CPT where this call is being executed. Later on when we
2900          * determine the CPT to use in lnet_message_commit, we switch the
2901          * lock and check if there was any configuration change.  If none,
2902          * then we proceed, if there is, then we restart the operation.
2903          */
2904         cpt = lnet_net_lock_current();
2905
2906         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
2907         if (md_cpt == CFS_CPT_ANY)
2908                 md_cpt = cpt;
2909
2910 again:
2911
2912         /*
2913          * If we're being asked to send to the loopback interface, there
2914          * is no need to go through any selection. We can just shortcut
2915          * the entire process and send over lolnd
2916          */
2917         send_data.sd_msg = msg;
2918         send_data.sd_cpt = cpt;
2919         if (dst_nid == LNET_NID_LO_0) {
2920                 rc = lnet_handle_lo_send(&send_data);
2921                 lnet_net_unlock(cpt);
2922                 return rc;
2923         }
2924
2925         /*
2926          * find an existing peer_ni, or create one and mark it as having been
2927          * created due to network traffic. This call will create the
2928          * peer->peer_net->peer_ni tree.
2929          */
2930         lpni = lnet_nid2peerni_locked(dst_nid, LNET_NID_ANY, cpt);
2931         if (IS_ERR(lpni)) {
2932                 lnet_net_unlock(cpt);
2933                 return PTR_ERR(lpni);
2934         }
2935
2936         /*
2937          * Cache the original src_nid and rtr_nid. If we need to resend the
2938          * message then we'll need to know whether the src_nid was originally
2939          * specified for this message. If it was originally specified,
2940          * then we need to keep using the same src_nid since it's
2941          * continuing the same sequence of messages. Similarly, rtr_nid will
2942          * affect our choice of next hop.
2943          */
2944         msg->msg_src_nid_param = src_nid;
2945         msg->msg_rtr_nid_param = rtr_nid;
2946
2947         /*
2948          * If necessary, perform discovery on the peer that owns this peer_ni.
2949          * Note, this can result in the ownership of this peer_ni changing
2950          * to another peer object.
2951          */
2952         rc = lnet_initiate_peer_discovery(lpni, msg, cpt);
2953         if (rc) {
2954                 lnet_peer_ni_decref_locked(lpni);
2955                 lnet_net_unlock(cpt);
2956                 return rc;
2957         }
2958         lnet_peer_ni_decref_locked(lpni);
2959
2960         peer = lpni->lpni_peer_net->lpn_peer;
2961
2962         /*
2963          * Identify the different send cases
2964          */
2965         if (src_nid == LNET_NID_ANY)
2966                 send_case |= SRC_ANY;
2967         else
2968                 send_case |= SRC_SPEC;
2969
2970         if (lnet_get_net_locked(LNET_NIDNET(dst_nid)))
2971                 send_case |= LOCAL_DST;
2972         else
2973                 send_case |= REMOTE_DST;
2974
2975         final_hop = false;
2976         if (msg->msg_routing && (send_case & LOCAL_DST))
2977                 final_hop = true;
2978
2979         /* Determine whether to allow MR forwarding for this message.
2980          * NB: MR forwarding is allowed if the message originator and the
2981          * destination are both MR capable, and the destination lpni that was
2982          * originally chosen by the originator is unhealthy or down.
2983          * We check the MR capability of the destination further below
2984          */
2985         mr_forwarding_allowed = false;
2986         if (final_hop) {
2987                 struct lnet_peer *src_lp;
2988                 struct lnet_peer_ni *src_lpni;
2989
2990                 src_lpni = lnet_nid2peerni_locked(msg->msg_hdr.src_nid,
2991                                                   LNET_NID_ANY, cpt);
2992                 /* We don't fail the send if we hit any errors here. We'll just
2993                  * try to send it via non-multi-rail criteria
2994                  */
2995                 if (!IS_ERR(src_lpni)) {
2996                         /* Drop ref taken by lnet_nid2peerni_locked() */
2997                         lnet_peer_ni_decref_locked(src_lpni);
2998                         src_lp = lpni->lpni_peer_net->lpn_peer;
2999                         if (lnet_peer_is_multi_rail(src_lp) &&
3000                             !lnet_is_peer_ni_alive(lpni))
3001                                 mr_forwarding_allowed = true;
3002
3003                 }
3004                 CDEBUG(D_NET, "msg %p MR forwarding %s\n", msg,
3005                        mr_forwarding_allowed ? "allowed" : "not allowed");
3006         }
3007
3008         /*
3009          * Deal with the peer as NMR in the following cases:
3010          * 1. the peer is NMR
3011          * 2. We're trying to recover a specific peer NI
3012          * 3. I'm a router sending to the final destination and MR forwarding is
3013          *    not allowed for this message (as determined above).
3014          *    In this case the source of the message would've
3015          *    already selected the final destination so my job
3016          *    is to honor the selection.
3017          */
3018         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery ||
3019             (final_hop && !mr_forwarding_allowed))
3020                 send_case |= NMR_DST;
3021         else
3022                 send_case |= MR_DST;
3023
3024         if (lnet_msg_is_response(msg))
3025                 send_case |= SND_RESP;
3026
3027         /* assign parameters to the send_data */
3028         send_data.sd_rtr_nid = rtr_nid;
3029         send_data.sd_src_nid = src_nid;
3030         send_data.sd_dst_nid = dst_nid;
3031         send_data.sd_best_lpni = lpni;
3032         /*
3033          * keep a pointer to the final destination in case we're going to
3034          * route, so we'll need to access it later
3035          */
3036         send_data.sd_final_dst_lpni = lpni;
3037         send_data.sd_peer = peer;
3038         send_data.sd_md_cpt = md_cpt;
3039         send_data.sd_send_case = send_case;
3040
3041         rc = lnet_handle_send_case_locked(&send_data);
3042
3043         /*
3044          * Update the local cpt since send_data.sd_cpt might've been
3045          * updated as a result of calling lnet_handle_send_case_locked().
3046          */
3047         cpt = send_data.sd_cpt;
3048
3049         if (rc == REPEAT_SEND)
3050                 goto again;
3051
3052         lnet_net_unlock(cpt);
3053
3054         return rc;
3055 }
3056
3057 int
3058 lnet_send(lnet_nid_t src_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid)
3059 {
3060         lnet_nid_t              dst_nid = msg->msg_target.nid;
3061         int                     rc;
3062
3063         /*
3064          * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
3065          * but we might want to use pre-determined router for ACK/REPLY
3066          * in the future
3067          */
3068         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
3069         LASSERT(msg->msg_txpeer == NULL);
3070         LASSERT(msg->msg_txni == NULL);
3071         LASSERT(!msg->msg_sending);
3072         LASSERT(!msg->msg_target_is_router);
3073         LASSERT(!msg->msg_receiving);
3074
3075         msg->msg_sending = 1;
3076
3077         LASSERT(!msg->msg_tx_committed);
3078
3079         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
3080         if (rc < 0) {
3081                 if (rc == -EHOSTUNREACH)
3082                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
3083                 else
3084                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
3085                 return rc;
3086         }
3087
3088         if (rc == LNET_CREDIT_OK)
3089                 lnet_ni_send(msg->msg_txni, msg);
3090
3091         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
3092         return 0;
3093 }
3094
3095 enum lnet_mt_event_type {
3096         MT_TYPE_LOCAL_NI = 0,
3097         MT_TYPE_PEER_NI
3098 };
3099
3100 struct lnet_mt_event_info {
3101         enum lnet_mt_event_type mt_type;
3102         lnet_nid_t mt_nid;
3103 };
3104
3105 /* called with res_lock held */
3106 void
3107 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
3108 {
3109         struct lnet_rsp_tracker *rspt;
3110
3111         /*
3112          * msg has a refcount on the MD so the MD is not going away.
3113          * The rspt queue for the cpt is protected by
3114          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
3115          */
3116         if (!md->md_rspt_ptr)
3117                 return;
3118
3119         rspt = md->md_rspt_ptr;
3120
3121         /* debug code */
3122         LASSERT(rspt->rspt_cpt == cpt);
3123
3124         md->md_rspt_ptr = NULL;
3125
3126         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3127                 /*
3128                  * The monitor thread has invalidated this handle because the
3129                  * response timed out, but it failed to lookup the MD. That
3130                  * means this response tracker is on the zombie list. We can
3131                  * safely remove it under the resource lock (held by caller) and
3132                  * free the response tracker block.
3133                  */
3134                 list_del(&rspt->rspt_on_list);
3135                 lnet_rspt_free(rspt, cpt);
3136         } else {
3137                 /*
3138                  * invalidate the handle to indicate that a response has been
3139                  * received, which will then lead the monitor thread to clean up
3140                  * the rspt block.
3141                  */
3142                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
3143         }
3144 }
3145
3146 void
3147 lnet_clean_zombie_rstqs(void)
3148 {
3149         struct lnet_rsp_tracker *rspt, *tmp;
3150         int i;
3151
3152         cfs_cpt_for_each(i, lnet_cpt_table()) {
3153                 list_for_each_entry_safe(rspt, tmp,
3154                                          the_lnet.ln_mt_zombie_rstqs[i],
3155                                          rspt_on_list) {
3156                         list_del(&rspt->rspt_on_list);
3157                         lnet_rspt_free(rspt, i);
3158                 }
3159         }
3160
3161         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
3162 }
3163
3164 static void
3165 lnet_finalize_expired_responses(void)
3166 {
3167         struct lnet_libmd *md;
3168         struct lnet_rsp_tracker *rspt, *tmp;
3169         ktime_t now;
3170         int i;
3171
3172         if (the_lnet.ln_mt_rstq == NULL)
3173                 return;
3174
3175         cfs_cpt_for_each(i, lnet_cpt_table()) {
3176                 LIST_HEAD(local_queue);
3177
3178                 lnet_net_lock(i);
3179                 if (!the_lnet.ln_mt_rstq[i]) {
3180                         lnet_net_unlock(i);
3181                         continue;
3182                 }
3183                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
3184                 lnet_net_unlock(i);
3185
3186                 now = ktime_get();
3187
3188                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
3189                         /*
3190                          * The rspt mdh will be invalidated when a response
3191                          * is received or whenever we want to discard the
3192                          * block the monitor thread will walk the queue
3193                          * and clean up any rsts with an invalid mdh.
3194                          * The monitor thread will walk the queue until
3195                          * the first unexpired rspt block. This means that
3196                          * some rspt blocks which received their
3197                          * corresponding responses will linger in the
3198                          * queue until they are cleaned up eventually.
3199                          */
3200                         lnet_res_lock(i);
3201                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3202                                 lnet_res_unlock(i);
3203                                 list_del(&rspt->rspt_on_list);
3204                                 lnet_rspt_free(rspt, i);
3205                                 continue;
3206                         }
3207
3208                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
3209                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
3210                                 struct lnet_peer_ni *lpni;
3211                                 lnet_nid_t nid;
3212
3213                                 md = lnet_handle2md(&rspt->rspt_mdh);
3214                                 if (!md) {
3215                                         /* MD has been queued for unlink, but
3216                                          * rspt hasn't been detached (Note we've
3217                                          * checked above that the rspt_mdh is
3218                                          * valid). Since we cannot lookup the MD
3219                                          * we're unable to detach the rspt
3220                                          * ourselves. Thus, move the rspt to the
3221                                          * zombie list where we'll wait for
3222                                          * either:
3223                                          *   1. The remaining operations on the
3224                                          *   MD to complete. In this case the
3225                                          *   final operation will result in
3226                                          *   lnet_msg_detach_md()->
3227                                          *   lnet_detach_rsp_tracker() where
3228                                          *   we will clean up this response
3229                                          *   tracker.
3230                                          *   2. LNet to shutdown. In this case
3231                                          *   we'll wait until after all LND Nets
3232                                          *   have shutdown and then we can
3233                                          *   safely free any remaining response
3234                                          *   tracker blocks on the zombie list.
3235                                          * Note: We need to hold the resource
3236                                          * lock when adding to the zombie list
3237                                          * because we may have concurrent access
3238                                          * with lnet_detach_rsp_tracker().
3239                                          */
3240                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
3241                                         list_move(&rspt->rspt_on_list,
3242                                                   the_lnet.ln_mt_zombie_rstqs[i]);
3243                                         lnet_res_unlock(i);
3244                                         continue;
3245                                 }
3246                                 LASSERT(md->md_rspt_ptr == rspt);
3247                                 md->md_rspt_ptr = NULL;
3248                                 lnet_res_unlock(i);
3249
3250                                 LNetMDUnlink(rspt->rspt_mdh);
3251
3252                                 nid = rspt->rspt_next_hop_nid;
3253
3254                                 list_del(&rspt->rspt_on_list);
3255                                 lnet_rspt_free(rspt, i);
3256
3257                                 /* If we're shutting down we just want to clean
3258                                  * up the rspt blocks
3259                                  */
3260                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3261                                         continue;
3262
3263                                 lnet_net_lock(i);
3264                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
3265                                 lnet_net_unlock(i);
3266
3267                                 CDEBUG(D_NET,
3268                                        "Response timeout: md = %p: nid = %s\n",
3269                                        md, libcfs_nid2str(nid));
3270
3271                                 /*
3272                                  * If there is a timeout on the response
3273                                  * from the next hop decrement its health
3274                                  * value so that we don't use it
3275                                  */
3276                                 lnet_net_lock(0);
3277                                 lpni = lnet_find_peer_ni_locked(nid);
3278                                 if (lpni) {
3279                                         lnet_handle_remote_failure_locked(lpni);
3280                                         lnet_peer_ni_decref_locked(lpni);
3281                                 }
3282                                 lnet_net_unlock(0);
3283                         } else {
3284                                 lnet_res_unlock(i);
3285                                 break;
3286                         }
3287                 }
3288
3289                 if (!list_empty(&local_queue)) {
3290                         lnet_net_lock(i);
3291                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
3292                         lnet_net_unlock(i);
3293                 }
3294         }
3295 }
3296
3297 static void
3298 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
3299 {
3300         struct lnet_msg *msg;
3301
3302         while (!list_empty(resendq)) {
3303                 struct lnet_peer_ni *lpni;
3304
3305                 msg = list_entry(resendq->next, struct lnet_msg,
3306                                  msg_list);
3307
3308                 list_del_init(&msg->msg_list);
3309
3310                 lpni = lnet_find_peer_ni_locked(msg->msg_hdr.dest_nid);
3311                 if (!lpni) {
3312                         lnet_net_unlock(cpt);
3313                         CERROR("Expected that a peer is already created for %s\n",
3314                                libcfs_nid2str(msg->msg_hdr.dest_nid));
3315                         msg->msg_no_resend = true;
3316                         lnet_finalize(msg, -EFAULT);
3317                         lnet_net_lock(cpt);
3318                 } else {
3319                         int rc;
3320
3321                         lnet_peer_ni_decref_locked(lpni);
3322
3323                         lnet_net_unlock(cpt);
3324                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3325                                libcfs_nid2str(msg->msg_src_nid_param),
3326                                libcfs_id2str(msg->msg_target),
3327                                lnet_msgtyp2str(msg->msg_type),
3328                                msg->msg_recovery,
3329                                msg->msg_retry_count);
3330                         rc = lnet_send(msg->msg_src_nid_param, msg,
3331                                        msg->msg_rtr_nid_param);
3332                         if (rc) {
3333                                 CERROR("Error sending %s to %s: %d\n",
3334                                        lnet_msgtyp2str(msg->msg_type),
3335                                        libcfs_id2str(msg->msg_target), rc);
3336                                 msg->msg_no_resend = true;
3337                                 lnet_finalize(msg, rc);
3338                         }
3339                         lnet_net_lock(cpt);
3340                         if (!rc)
3341                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3342                 }
3343         }
3344 }
3345
3346 static void
3347 lnet_resend_pending_msgs(void)
3348 {
3349         int i;
3350
3351         cfs_cpt_for_each(i, lnet_cpt_table()) {
3352                 lnet_net_lock(i);
3353                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3354                 lnet_net_unlock(i);
3355         }
3356 }
3357
3358 /* called with cpt and ni_lock held */
3359 static void
3360 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3361 {
3362         struct lnet_handle_md recovery_mdh;
3363
3364         LNetInvalidateMDHandle(&recovery_mdh);
3365
3366         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3367             force) {
3368                 recovery_mdh = ni->ni_ping_mdh;
3369                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3370         }
3371         lnet_ni_unlock(ni);
3372         lnet_net_unlock(cpt);
3373         if (!LNetMDHandleIsInvalid(recovery_mdh))
3374                 LNetMDUnlink(recovery_mdh);
3375         lnet_net_lock(cpt);
3376         lnet_ni_lock(ni);
3377 }
3378
3379 static void
3380 lnet_recover_local_nis(void)
3381 {
3382         struct lnet_mt_event_info *ev_info;
3383         LIST_HEAD(processed_list);
3384         LIST_HEAD(local_queue);
3385         struct lnet_handle_md mdh;
3386         struct lnet_ni *tmp;
3387         struct lnet_ni *ni;
3388         lnet_nid_t nid;
3389         int healthv;
3390         int rc;
3391         time64_t now;
3392
3393         /*
3394          * splice the recovery queue on a local queue. We will iterate
3395          * through the local queue and update it as needed. Once we're
3396          * done with the traversal, we'll splice the local queue back on
3397          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3398          * will be traversed in the next iteration.
3399          */
3400         lnet_net_lock(0);
3401         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3402                          &local_queue);
3403         lnet_net_unlock(0);
3404
3405         now = ktime_get_seconds();
3406
3407         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3408                 /*
3409                  * if an NI is being deleted or it is now healthy, there
3410                  * is no need to keep it around in the recovery queue.
3411                  * The monitor thread is the only thread responsible for
3412                  * removing the NI from the recovery queue.
3413                  * Multiple threads can be adding NIs to the recovery
3414                  * queue.
3415                  */
3416                 healthv = atomic_read(&ni->ni_healthv);
3417
3418                 lnet_net_lock(0);
3419                 lnet_ni_lock(ni);
3420                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3421                     healthv == LNET_MAX_HEALTH_VALUE) {
3422                         list_del_init(&ni->ni_recovery);
3423                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3424                         lnet_ni_unlock(ni);
3425                         lnet_ni_decref_locked(ni, 0);
3426                         lnet_net_unlock(0);
3427                         continue;
3428                 }
3429
3430                 /*
3431                  * if the local NI failed recovery we must unlink the md.
3432                  * But we want to keep the local_ni on the recovery queue
3433                  * so we can continue the attempts to recover it.
3434                  */
3435                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3436                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3437                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3438                 }
3439
3440
3441                 lnet_ni_unlock(ni);
3442
3443                 if (now < ni->ni_next_ping) {
3444                         lnet_net_unlock(0);
3445                         continue;
3446                 }
3447
3448                 lnet_net_unlock(0);
3449
3450                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3451                        libcfs_nid2str(ni->ni_nid));
3452
3453                 lnet_ni_lock(ni);
3454                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3455                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3456                         lnet_ni_unlock(ni);
3457
3458                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3459                         if (!ev_info) {
3460                                 CERROR("out of memory. Can't recover %s\n",
3461                                        libcfs_nid2str(ni->ni_nid));
3462                                 lnet_ni_lock(ni);
3463                                 ni->ni_recovery_state &=
3464                                   ~LNET_NI_RECOVERY_PENDING;
3465                                 lnet_ni_unlock(ni);
3466                                 continue;
3467                         }
3468
3469                         mdh = ni->ni_ping_mdh;
3470                         /*
3471                          * Invalidate the ni mdh in case it's deleted.
3472                          * We'll unlink the mdh in this case below.
3473                          */
3474                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3475                         nid = ni->ni_nid;
3476
3477                         /*
3478                          * remove the NI from the local queue and drop the
3479                          * reference count to it while we're recovering
3480                          * it. The reason for that, is that the NI could
3481                          * be deleted, and the way the code is structured
3482                          * is if we don't drop the NI, then the deletion
3483                          * code will enter a loop waiting for the
3484                          * reference count to be removed while holding the
3485                          * ln_mutex_lock(). When we look up the peer to
3486                          * send to in lnet_select_pathway() we will try to
3487                          * lock the ln_mutex_lock() as well, leading to
3488                          * a deadlock. By dropping the refcount and
3489                          * removing it from the list, we allow for the NI
3490                          * to be removed, then we use the cached NID to
3491                          * look it up again. If it's gone, then we just
3492                          * continue examining the rest of the queue.
3493                          */
3494                         lnet_net_lock(0);
3495                         list_del_init(&ni->ni_recovery);
3496                         lnet_ni_decref_locked(ni, 0);
3497                         lnet_net_unlock(0);
3498
3499                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3500                         ev_info->mt_nid = nid;
3501                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3502                                             ev_info, the_lnet.ln_mt_handler,
3503                                             true);
3504                         /* lookup the nid again */
3505                         lnet_net_lock(0);
3506                         ni = lnet_nid2ni_locked(nid, 0);
3507                         if (!ni) {
3508                                 /*
3509                                  * the NI has been deleted when we dropped
3510                                  * the ref count
3511                                  */
3512                                 lnet_net_unlock(0);
3513                                 LNetMDUnlink(mdh);
3514                                 continue;
3515                         }
3516                         ni->ni_ping_count++;
3517
3518                         ni->ni_ping_mdh = mdh;
3519                         lnet_ni_add_to_recoveryq_locked(ni, &processed_list,
3520                                                         now);
3521
3522                         if (rc) {
3523                                 lnet_ni_lock(ni);
3524                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3525                                 lnet_ni_unlock(ni);
3526                         }
3527                         lnet_net_unlock(0);
3528                 } else
3529                         lnet_ni_unlock(ni);
3530         }
3531
3532         /*
3533          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3534          * reexamined in the next iteration.
3535          */
3536         list_splice_init(&processed_list, &local_queue);
3537         lnet_net_lock(0);
3538         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3539         lnet_net_unlock(0);
3540 }
3541
3542 static int
3543 lnet_resendqs_create(void)
3544 {
3545         struct list_head **resendqs;
3546         resendqs = lnet_create_array_of_queues();
3547
3548         if (!resendqs)
3549                 return -ENOMEM;
3550
3551         lnet_net_lock(LNET_LOCK_EX);
3552         the_lnet.ln_mt_resendqs = resendqs;
3553         lnet_net_unlock(LNET_LOCK_EX);
3554
3555         return 0;
3556 }
3557
3558 static void
3559 lnet_clean_local_ni_recoveryq(void)
3560 {
3561         struct lnet_ni *ni;
3562
3563         /* This is only called when the monitor thread has stopped */
3564         lnet_net_lock(0);
3565
3566         while (!list_empty(&the_lnet.ln_mt_localNIRecovq)) {
3567                 ni = list_entry(the_lnet.ln_mt_localNIRecovq.next,
3568                                 struct lnet_ni, ni_recovery);
3569                 list_del_init(&ni->ni_recovery);
3570                 lnet_ni_lock(ni);
3571                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3572                 lnet_ni_unlock(ni);
3573                 lnet_ni_decref_locked(ni, 0);
3574         }
3575
3576         lnet_net_unlock(0);
3577 }
3578
3579 static void
3580 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3581                                      bool force)
3582 {
3583         struct lnet_handle_md recovery_mdh;
3584
3585         LNetInvalidateMDHandle(&recovery_mdh);
3586
3587         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3588                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3589                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3590         }
3591         spin_unlock(&lpni->lpni_lock);
3592         lnet_net_unlock(cpt);
3593         if (!LNetMDHandleIsInvalid(recovery_mdh))
3594                 LNetMDUnlink(recovery_mdh);
3595         lnet_net_lock(cpt);
3596         spin_lock(&lpni->lpni_lock);
3597 }
3598
3599 static void
3600 lnet_clean_peer_ni_recoveryq(void)
3601 {
3602         struct lnet_peer_ni *lpni, *tmp;
3603
3604         lnet_net_lock(LNET_LOCK_EX);
3605
3606         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3607                                  lpni_recovery) {
3608                 list_del_init(&lpni->lpni_recovery);
3609                 spin_lock(&lpni->lpni_lock);
3610                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3611                 spin_unlock(&lpni->lpni_lock);
3612                 lnet_peer_ni_decref_locked(lpni);
3613         }
3614
3615         lnet_net_unlock(LNET_LOCK_EX);
3616 }
3617
3618 static void
3619 lnet_clean_resendqs(void)
3620 {
3621         struct lnet_msg *msg, *tmp;
3622         LIST_HEAD(msgs);
3623         int i;
3624
3625         cfs_cpt_for_each(i, lnet_cpt_table()) {
3626                 lnet_net_lock(i);
3627                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3628                 lnet_net_unlock(i);
3629                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3630                         list_del_init(&msg->msg_list);
3631                         msg->msg_no_resend = true;
3632                         lnet_finalize(msg, -ESHUTDOWN);
3633                 }
3634         }
3635
3636         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3637 }
3638
3639 static void
3640 lnet_recover_peer_nis(void)
3641 {
3642         struct lnet_mt_event_info *ev_info;
3643         LIST_HEAD(processed_list);
3644         LIST_HEAD(local_queue);
3645         struct lnet_handle_md mdh;
3646         struct lnet_peer_ni *lpni;
3647         struct lnet_peer_ni *tmp;
3648         lnet_nid_t nid;
3649         int healthv;
3650         int rc;
3651         time64_t now;
3652
3653         /*
3654          * Always use cpt 0 for locking across all interactions with
3655          * ln_mt_peerNIRecovq
3656          */
3657         lnet_net_lock(0);
3658         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3659                          &local_queue);
3660         lnet_net_unlock(0);
3661
3662         now = ktime_get_seconds();
3663
3664         list_for_each_entry_safe(lpni, tmp, &local_queue,
3665                                  lpni_recovery) {
3666                 /*
3667                  * The same protection strategy is used here as is in the
3668                  * local recovery case.
3669                  */
3670                 lnet_net_lock(0);
3671                 healthv = atomic_read(&lpni->lpni_healthv);
3672                 spin_lock(&lpni->lpni_lock);
3673                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3674                     healthv == LNET_MAX_HEALTH_VALUE) {
3675                         list_del_init(&lpni->lpni_recovery);
3676                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3677                         spin_unlock(&lpni->lpni_lock);
3678                         lnet_peer_ni_decref_locked(lpni);
3679                         lnet_net_unlock(0);
3680                         continue;
3681                 }
3682
3683                 /*
3684                  * If the peer NI has failed recovery we must unlink the
3685                  * md. But we want to keep the peer ni on the recovery
3686                  * queue so we can try to continue recovering it
3687                  */
3688                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3689                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3690                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3691                 }
3692
3693                 spin_unlock(&lpni->lpni_lock);
3694
3695                 if (now < lpni->lpni_next_ping) {
3696                         lnet_net_unlock(0);
3697                         continue;
3698                 }
3699
3700                 lnet_net_unlock(0);
3701
3702                 /*
3703                  * NOTE: we're racing with peer deletion from user space.
3704                  * It's possible that a peer is deleted after we check its
3705                  * state. In this case the recovery can create a new peer
3706                  */
3707                 spin_lock(&lpni->lpni_lock);
3708                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3709                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3710                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3711                         spin_unlock(&lpni->lpni_lock);
3712
3713                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3714                         if (!ev_info) {
3715                                 CERROR("out of memory. Can't recover %s\n",
3716                                        libcfs_nid2str(lpni->lpni_nid));
3717                                 spin_lock(&lpni->lpni_lock);
3718                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3719                                 spin_unlock(&lpni->lpni_lock);
3720                                 continue;
3721                         }
3722
3723                         /* look at the comments in lnet_recover_local_nis() */
3724                         mdh = lpni->lpni_recovery_ping_mdh;
3725                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3726                         nid = lpni->lpni_nid;
3727                         lnet_net_lock(0);
3728                         list_del_init(&lpni->lpni_recovery);
3729                         lnet_peer_ni_decref_locked(lpni);
3730                         lnet_net_unlock(0);
3731
3732                         ev_info->mt_type = MT_TYPE_PEER_NI;
3733                         ev_info->mt_nid = nid;
3734                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3735                                             ev_info, the_lnet.ln_mt_handler,
3736                                             true);
3737                         lnet_net_lock(0);
3738                         /*
3739                          * lnet_find_peer_ni_locked() grabs a refcount for
3740                          * us. No need to take it explicitly.
3741                          */
3742                         lpni = lnet_find_peer_ni_locked(nid);
3743                         if (!lpni) {
3744                                 lnet_net_unlock(0);
3745                                 LNetMDUnlink(mdh);
3746                                 continue;
3747                         }
3748
3749                         lpni->lpni_ping_count++;
3750
3751                         lpni->lpni_recovery_ping_mdh = mdh;
3752
3753                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
3754                                                              &processed_list,
3755                                                              now);
3756                         if (rc) {
3757                                 spin_lock(&lpni->lpni_lock);
3758                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3759                                 spin_unlock(&lpni->lpni_lock);
3760                         }
3761
3762                         /* Drop the ref taken by lnet_find_peer_ni_locked() */
3763                         lnet_peer_ni_decref_locked(lpni);
3764                         lnet_net_unlock(0);
3765                 } else
3766                         spin_unlock(&lpni->lpni_lock);
3767         }
3768
3769         list_splice_init(&processed_list, &local_queue);
3770         lnet_net_lock(0);
3771         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3772         lnet_net_unlock(0);
3773 }
3774
3775 static int
3776 lnet_monitor_thread(void *arg)
3777 {
3778         time64_t rsp_timeout = 0;
3779         time64_t now;
3780
3781         wait_for_completion(&the_lnet.ln_started);
3782         /*
3783          * The monitor thread takes care of the following:
3784          *  1. Checks the aliveness of routers
3785          *  2. Checks if there are messages on the resend queue to resend
3786          *     them.
3787          *  3. Check if there are any NIs on the local recovery queue and
3788          *     pings them
3789          *  4. Checks if there are any NIs on the remote recovery queue
3790          *     and pings them.
3791          */
3792         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
3793                 now = ktime_get_real_seconds();
3794
3795                 if (lnet_router_checker_active())
3796                         lnet_check_routers();
3797
3798                 lnet_resend_pending_msgs();
3799
3800                 if (now >= rsp_timeout) {
3801                         lnet_finalize_expired_responses();
3802                         rsp_timeout = now + (lnet_transaction_timeout / 2);
3803                 }
3804
3805                 lnet_recover_local_nis();
3806                 lnet_recover_peer_nis();
3807
3808                 /*
3809                  * TODO do we need to check if we should sleep without
3810                  * timeout?  Technically, an active system will always
3811                  * have messages in flight so this check will always
3812                  * evaluate to false. And on an idle system do we care
3813                  * if we wake up every 1 second? Although, we've seen
3814                  * cases where we get a complaint that an idle thread
3815                  * is waking up unnecessarily.
3816                  */
3817                 wait_for_completion_interruptible_timeout(
3818                         &the_lnet.ln_mt_wait_complete,
3819                         cfs_time_seconds(1));
3820                 /* Must re-init the completion before testing anything,
3821                  * including ln_mt_state.
3822                  */
3823                 reinit_completion(&the_lnet.ln_mt_wait_complete);
3824         }
3825
3826         /* Shutting down */
3827         lnet_net_lock(LNET_LOCK_EX);
3828         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3829         lnet_net_unlock(LNET_LOCK_EX);
3830
3831         /* signal that the monitor thread is exiting */
3832         up(&the_lnet.ln_mt_signal);
3833
3834         return 0;
3835 }
3836
3837 /*
3838  * lnet_send_ping
3839  * Sends a ping.
3840  * Returns == 0 if success
3841  * Returns > 0 if LNetMDBind or prior fails
3842  * Returns < 0 if LNetGet fails
3843  */
3844 int
3845 lnet_send_ping(lnet_nid_t dest_nid,
3846                struct lnet_handle_md *mdh, int nnis,
3847                void *user_data, lnet_handler_t handler, bool recovery)
3848 {
3849         struct lnet_md md = { NULL };
3850         struct lnet_process_id id;
3851         struct lnet_ping_buffer *pbuf;
3852         int rc;
3853
3854         if (dest_nid == LNET_NID_ANY) {
3855                 rc = -EHOSTUNREACH;
3856                 goto fail_error;
3857         }
3858
3859         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
3860         if (!pbuf) {
3861                 rc = ENOMEM;
3862                 goto fail_error;
3863         }
3864
3865         /* initialize md content */
3866         md.start     = &pbuf->pb_info;
3867         md.length    = LNET_PING_INFO_SIZE(nnis);
3868         md.threshold = 2; /* GET/REPLY */
3869         md.max_size  = 0;
3870         md.options   = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE;
3871         md.user_ptr  = user_data;
3872         md.handler   = handler;
3873
3874         rc = LNetMDBind(&md, LNET_UNLINK, mdh);
3875         if (rc) {
3876                 lnet_ping_buffer_decref(pbuf);
3877                 CERROR("Can't bind MD: %d\n", rc);
3878                 rc = -rc; /* change the rc to positive */
3879                 goto fail_error;
3880         }
3881         id.pid = LNET_PID_LUSTRE;
3882         id.nid = dest_nid;
3883
3884         rc = LNetGet(LNET_NID_ANY, *mdh, id,
3885                      LNET_RESERVED_PORTAL,
3886                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
3887
3888         if (rc)
3889                 goto fail_unlink_md;
3890
3891         return 0;
3892
3893 fail_unlink_md:
3894         LNetMDUnlink(*mdh);
3895         LNetInvalidateMDHandle(mdh);
3896 fail_error:
3897         return rc;
3898 }
3899
3900 static void
3901 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
3902                            int status, bool send, bool unlink_event)
3903 {
3904         lnet_nid_t nid = ev_info->mt_nid;
3905
3906         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
3907                 struct lnet_ni *ni;
3908
3909                 lnet_net_lock(0);
3910                 ni = lnet_nid2ni_locked(nid, 0);
3911                 if (!ni) {
3912                         lnet_net_unlock(0);
3913                         return;
3914                 }
3915                 lnet_ni_lock(ni);
3916                 if (!send || (send && status != 0))
3917                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3918                 if (status)
3919                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
3920                 lnet_ni_unlock(ni);
3921                 lnet_net_unlock(0);
3922
3923                 if (status != 0) {
3924                         CERROR("local NI (%s) recovery failed with %d\n",
3925                                libcfs_nid2str(nid), status);
3926                         return;
3927                 }
3928                 /*
3929                  * need to increment healthv for the ni here, because in
3930                  * the lnet_finalize() path we don't have access to this
3931                  * NI. And in order to get access to it, we'll need to
3932                  * carry forward too much information.
3933                  * In the peer case, it'll naturally be incremented
3934                  */
3935                 if (!unlink_event)
3936                         lnet_inc_healthv(&ni->ni_healthv,
3937                                          lnet_health_sensitivity);
3938         } else {
3939                 struct lnet_peer_ni *lpni;
3940                 int cpt;
3941
3942                 cpt = lnet_net_lock_current();
3943                 lpni = lnet_find_peer_ni_locked(nid);
3944                 if (!lpni) {
3945                         lnet_net_unlock(cpt);
3946                         return;
3947                 }
3948                 spin_lock(&lpni->lpni_lock);
3949                 if (!send || (send && status != 0))
3950                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3951                 if (status)
3952                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
3953                 spin_unlock(&lpni->lpni_lock);
3954                 lnet_peer_ni_decref_locked(lpni);
3955                 lnet_net_unlock(cpt);
3956
3957                 if (status != 0)
3958                         CERROR("peer NI (%s) recovery failed with %d\n",
3959                                libcfs_nid2str(nid), status);
3960         }
3961 }
3962
3963 void
3964 lnet_mt_event_handler(struct lnet_event *event)
3965 {
3966         struct lnet_mt_event_info *ev_info = event->md_user_ptr;
3967         struct lnet_ping_buffer *pbuf;
3968
3969         /* TODO: remove assert */
3970         LASSERT(event->type == LNET_EVENT_REPLY ||
3971                 event->type == LNET_EVENT_SEND ||
3972                 event->type == LNET_EVENT_UNLINK);
3973
3974         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
3975                event->status);
3976
3977         switch (event->type) {
3978         case LNET_EVENT_UNLINK:
3979                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
3980                        libcfs_nid2str(ev_info->mt_nid));
3981                 /* fallthrough */
3982         case LNET_EVENT_REPLY:
3983                 lnet_handle_recovery_reply(ev_info, event->status, false,
3984                                            event->type == LNET_EVENT_UNLINK);
3985                 break;
3986         case LNET_EVENT_SEND:
3987                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
3988                                libcfs_nid2str(ev_info->mt_nid),
3989                                (event->status) ? "unsuccessfully" :
3990                                "successfully", event->status);
3991                 lnet_handle_recovery_reply(ev_info, event->status, true, false);
3992                 break;
3993         default:
3994                 CERROR("Unexpected event: %d\n", event->type);
3995                 break;
3996         }
3997         if (event->unlinked) {
3998                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
3999                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
4000                 lnet_ping_buffer_decref(pbuf);
4001         }
4002 }
4003
4004 static int
4005 lnet_rsp_tracker_create(void)
4006 {
4007         struct list_head **rstqs;
4008         rstqs = lnet_create_array_of_queues();
4009
4010         if (!rstqs)
4011                 return -ENOMEM;
4012
4013         the_lnet.ln_mt_rstq = rstqs;
4014
4015         return 0;
4016 }
4017
4018 static void
4019 lnet_rsp_tracker_clean(void)
4020 {
4021         lnet_finalize_expired_responses();
4022
4023         cfs_percpt_free(the_lnet.ln_mt_rstq);
4024         the_lnet.ln_mt_rstq = NULL;
4025 }
4026
4027 int lnet_monitor_thr_start(void)
4028 {
4029         int rc = 0;
4030         struct task_struct *task;
4031
4032         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
4033                 return -EALREADY;
4034
4035         rc = lnet_resendqs_create();
4036         if (rc)
4037                 return rc;
4038
4039         rc = lnet_rsp_tracker_create();
4040         if (rc)
4041                 goto clean_queues;
4042
4043         sema_init(&the_lnet.ln_mt_signal, 0);
4044
4045         lnet_net_lock(LNET_LOCK_EX);
4046         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
4047         lnet_net_unlock(LNET_LOCK_EX);
4048         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
4049         if (IS_ERR(task)) {
4050                 rc = PTR_ERR(task);
4051                 CERROR("Can't start monitor thread: %d\n", rc);
4052                 goto clean_thread;
4053         }
4054
4055         return 0;
4056
4057 clean_thread:
4058         lnet_net_lock(LNET_LOCK_EX);
4059         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4060         lnet_net_unlock(LNET_LOCK_EX);
4061         /* block until event callback signals exit */
4062         down(&the_lnet.ln_mt_signal);
4063         /* clean up */
4064         lnet_net_lock(LNET_LOCK_EX);
4065         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
4066         lnet_net_unlock(LNET_LOCK_EX);
4067         lnet_rsp_tracker_clean();
4068         lnet_clean_local_ni_recoveryq();
4069         lnet_clean_peer_ni_recoveryq();
4070         lnet_clean_resendqs();
4071         the_lnet.ln_mt_handler = NULL;
4072         return rc;
4073 clean_queues:
4074         lnet_rsp_tracker_clean();
4075         lnet_clean_local_ni_recoveryq();
4076         lnet_clean_peer_ni_recoveryq();
4077         lnet_clean_resendqs();
4078         return rc;
4079 }
4080
4081 void lnet_monitor_thr_stop(void)
4082 {
4083         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
4084                 return;
4085
4086         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
4087         lnet_net_lock(LNET_LOCK_EX);
4088         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4089         lnet_net_unlock(LNET_LOCK_EX);
4090
4091         /* tell the monitor thread that we're shutting down */
4092         complete(&the_lnet.ln_mt_wait_complete);
4093
4094         /* block until monitor thread signals that it's done */
4095         down(&the_lnet.ln_mt_signal);
4096         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
4097
4098         /* perform cleanup tasks */
4099         lnet_rsp_tracker_clean();
4100         lnet_clean_local_ni_recoveryq();
4101         lnet_clean_peer_ni_recoveryq();
4102         lnet_clean_resendqs();
4103 }
4104
4105 void
4106 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
4107                   __u32 msg_type)
4108 {
4109         lnet_net_lock(cpt);
4110         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
4111         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4112         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
4113         lnet_net_unlock(cpt);
4114
4115         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
4116 }
4117
4118 static void
4119 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
4120 {
4121         struct lnet_hdr *hdr = &msg->msg_hdr;
4122
4123         if (msg->msg_wanted != 0)
4124                 lnet_setpayloadbuffer(msg);
4125
4126         lnet_build_msg_event(msg, LNET_EVENT_PUT);
4127
4128         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
4129          * it back into the ACK during lnet_finalize() */
4130         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
4131                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
4132
4133         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
4134                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
4135 }
4136
4137 static int
4138 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
4139 {
4140         struct lnet_hdr         *hdr = &msg->msg_hdr;
4141         struct lnet_match_info  info;
4142         int                     rc;
4143         bool                    ready_delay;
4144
4145         /* Convert put fields to host byte order */
4146         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
4147         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
4148         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
4149
4150         /* Primary peer NID. */
4151         info.mi_id.nid  = msg->msg_initiator;
4152         info.mi_id.pid  = hdr->src_pid;
4153         info.mi_opc     = LNET_MD_OP_PUT;
4154         info.mi_portal  = hdr->msg.put.ptl_index;
4155         info.mi_rlength = hdr->payload_length;
4156         info.mi_roffset = hdr->msg.put.offset;
4157         info.mi_mbits   = hdr->msg.put.match_bits;
4158         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
4159
4160         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
4161         ready_delay = msg->msg_rx_ready_delay;
4162
4163  again:
4164         rc = lnet_ptl_match_md(&info, msg);
4165         switch (rc) {
4166         default:
4167                 LBUG();
4168
4169         case LNET_MATCHMD_OK:
4170                 lnet_recv_put(ni, msg);
4171                 return 0;
4172
4173         case LNET_MATCHMD_NONE:
4174                 if (ready_delay)
4175                         /* no eager_recv or has already called it, should
4176                          * have been attached on delayed list */
4177                         return 0;
4178
4179                 rc = lnet_ni_eager_recv(ni, msg);
4180                 if (rc == 0) {
4181                         ready_delay = true;
4182                         goto again;
4183                 }
4184                 /* fall through */
4185
4186         case LNET_MATCHMD_DROP:
4187                 CNETERR("Dropping PUT from %s portal %d match %llu"
4188                         " offset %d length %d: %d\n",
4189                         libcfs_id2str(info.mi_id), info.mi_portal,
4190                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
4191
4192                 return -ENOENT; /* -ve: OK but no match */
4193         }
4194 }
4195
4196 static int
4197 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
4198 {
4199         struct lnet_match_info info;
4200         struct lnet_hdr *hdr = &msg->msg_hdr;
4201         struct lnet_process_id source_id;
4202         struct lnet_handle_wire reply_wmd;
4203         int rc;
4204
4205         /* Convert get fields to host byte order */
4206         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
4207         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
4208         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
4209         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
4210
4211         source_id.nid = hdr->src_nid;
4212         source_id.pid = hdr->src_pid;
4213         /* Primary peer NID */
4214         info.mi_id.nid  = msg->msg_initiator;
4215         info.mi_id.pid  = hdr->src_pid;
4216         info.mi_opc     = LNET_MD_OP_GET;
4217         info.mi_portal  = hdr->msg.get.ptl_index;
4218         info.mi_rlength = hdr->msg.get.sink_length;
4219         info.mi_roffset = hdr->msg.get.src_offset;
4220         info.mi_mbits   = hdr->msg.get.match_bits;
4221         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
4222
4223         rc = lnet_ptl_match_md(&info, msg);
4224         if (rc == LNET_MATCHMD_DROP) {
4225                 CNETERR("Dropping GET from %s portal %d match %llu"
4226                         " offset %d length %d\n",
4227                         libcfs_id2str(info.mi_id), info.mi_portal,
4228                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
4229                 return -ENOENT; /* -ve: OK but no match */
4230         }
4231
4232         LASSERT(rc == LNET_MATCHMD_OK);
4233
4234         lnet_build_msg_event(msg, LNET_EVENT_GET);
4235
4236         reply_wmd = hdr->msg.get.return_wmd;
4237
4238         lnet_prep_send(msg, LNET_MSG_REPLY, source_id,
4239                        msg->msg_offset, msg->msg_wanted);
4240
4241         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
4242
4243         if (rdma_get) {
4244                 /* The LND completes the REPLY from her recv procedure */
4245                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
4246                              msg->msg_offset, msg->msg_len, msg->msg_len);
4247                 return 0;
4248         }
4249
4250         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
4251         msg->msg_receiving = 0;
4252
4253         rc = lnet_send(ni->ni_nid, msg, msg->msg_from);
4254         if (rc < 0) {
4255                 /* didn't get as far as lnet_ni_send() */
4256                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
4257                        libcfs_nid2str(ni->ni_nid),
4258                        libcfs_id2str(info.mi_id), rc);
4259
4260                 lnet_finalize(msg, rc);
4261         }
4262
4263         return 0;
4264 }
4265
4266 static int
4267 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
4268 {
4269         void *private = msg->msg_private;
4270         struct lnet_hdr *hdr = &msg->msg_hdr;
4271         struct lnet_process_id src = {0};
4272         struct lnet_libmd *md;
4273         unsigned int rlength;
4274         unsigned int mlength;
4275         int cpt;
4276
4277         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4278         lnet_res_lock(cpt);
4279
4280         src.nid = hdr->src_nid;
4281         src.pid = hdr->src_pid;
4282
4283         /* NB handles only looked up by creator (no flips) */
4284         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4285         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4286                 CNETERR("%s: Dropping REPLY from %s for %s "
4287                         "MD %#llx.%#llx\n",
4288                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4289                         (md == NULL) ? "invalid" : "inactive",
4290                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4291                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4292                 if (md != NULL && md->md_me != NULL)
4293                         CERROR("REPLY MD also attached to portal %d\n",
4294                                md->md_me->me_portal);
4295
4296                 lnet_res_unlock(cpt);
4297                 return -ENOENT; /* -ve: OK but no match */
4298         }
4299
4300         LASSERT(md->md_offset == 0);
4301
4302         rlength = hdr->payload_length;
4303         mlength = min(rlength, md->md_length);
4304
4305         if (mlength < rlength &&
4306             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4307                 CNETERR("%s: Dropping REPLY from %s length %d "
4308                         "for MD %#llx would overflow (%d)\n",
4309                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4310                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4311                         mlength);
4312                 lnet_res_unlock(cpt);
4313                 return -ENOENT; /* -ve: OK but no match */
4314         }
4315
4316         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4317                libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4318                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4319
4320         lnet_msg_attach_md(msg, md, 0, mlength);
4321
4322         if (mlength != 0)
4323                 lnet_setpayloadbuffer(msg);
4324
4325         lnet_res_unlock(cpt);
4326
4327         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4328
4329         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4330         return 0;
4331 }
4332
4333 static int
4334 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4335 {
4336         struct lnet_hdr *hdr = &msg->msg_hdr;
4337         struct lnet_process_id src = {0};
4338         struct lnet_libmd *md;
4339         int cpt;
4340
4341         src.nid = hdr->src_nid;
4342         src.pid = hdr->src_pid;
4343
4344         /* Convert ack fields to host byte order */
4345         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4346         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4347
4348         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4349         lnet_res_lock(cpt);
4350
4351         /* NB handles only looked up by creator (no flips) */
4352         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4353         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4354                 /* Don't moan; this is expected */
4355                 CDEBUG(D_NET,
4356                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4357                        libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4358                        (md == NULL) ? "invalid" : "inactive",
4359                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4360                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4361                 if (md != NULL && md->md_me != NULL)
4362                         CERROR("Source MD also attached to portal %d\n",
4363                                md->md_me->me_portal);
4364
4365                 lnet_res_unlock(cpt);
4366                 return -ENOENT;                  /* -ve! */
4367         }
4368
4369         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4370                libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4371                hdr->msg.ack.dst_wmd.wh_object_cookie);
4372
4373         lnet_msg_attach_md(msg, md, 0, 0);
4374
4375         lnet_res_unlock(cpt);
4376
4377         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4378
4379         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4380         return 0;
4381 }
4382
4383 /**
4384  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4385  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4386  * \retval -ve                  error code
4387  */
4388 int
4389 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4390 {
4391         int     rc = 0;
4392
4393         if (!the_lnet.ln_routing)
4394                 return -ECANCELED;
4395
4396         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4397             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4398                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4399                         msg->msg_rx_ready_delay = 1;
4400                 } else {
4401                         lnet_net_unlock(msg->msg_rx_cpt);
4402                         rc = lnet_ni_eager_recv(ni, msg);
4403                         lnet_net_lock(msg->msg_rx_cpt);
4404                 }
4405         }
4406
4407         if (rc == 0)
4408                 rc = lnet_post_routed_recv_locked(msg, 0);
4409         return rc;
4410 }
4411
4412 int
4413 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4414 {
4415         int     rc;
4416
4417         switch (msg->msg_type) {
4418         case LNET_MSG_ACK:
4419                 rc = lnet_parse_ack(ni, msg);
4420                 break;
4421         case LNET_MSG_PUT:
4422                 rc = lnet_parse_put(ni, msg);
4423                 break;
4424         case LNET_MSG_GET:
4425                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4426                 break;
4427         case LNET_MSG_REPLY:
4428                 rc = lnet_parse_reply(ni, msg);
4429                 break;
4430         default: /* prevent an unused label if !kernel */
4431                 LASSERT(0);
4432                 return -EPROTO;
4433         }
4434
4435         LASSERT(rc == 0 || rc == -ENOENT);
4436         return rc;
4437 }
4438
4439 char *
4440 lnet_msgtyp2str (int type)
4441 {
4442         switch (type) {
4443         case LNET_MSG_ACK:
4444                 return ("ACK");
4445         case LNET_MSG_PUT:
4446                 return ("PUT");
4447         case LNET_MSG_GET:
4448                 return ("GET");
4449         case LNET_MSG_REPLY:
4450                 return ("REPLY");
4451         case LNET_MSG_HELLO:
4452                 return ("HELLO");
4453         default:
4454                 return ("<UNKNOWN>");
4455         }
4456 }
4457
4458 int
4459 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
4460            void *private, int rdma_req)
4461 {
4462         struct lnet_peer_ni *lpni;
4463         struct lnet_msg *msg;
4464         __u32 payload_length;
4465         lnet_pid_t dest_pid;
4466         lnet_nid_t dest_nid;
4467         lnet_nid_t src_nid;
4468         bool push = false;
4469         int for_me;
4470         __u32 type;
4471         int rc = 0;
4472         int cpt;
4473
4474         LASSERT (!in_interrupt ());
4475
4476         type = le32_to_cpu(hdr->type);
4477         src_nid = le64_to_cpu(hdr->src_nid);
4478         dest_nid = le64_to_cpu(hdr->dest_nid);
4479         dest_pid = le32_to_cpu(hdr->dest_pid);
4480         payload_length = le32_to_cpu(hdr->payload_length);
4481
4482         for_me = (ni->ni_nid == dest_nid);
4483         cpt = lnet_cpt_of_nid(from_nid, ni);
4484
4485         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4486                 libcfs_nid2str(dest_nid),
4487                 libcfs_nid2str(ni->ni_nid),
4488                 libcfs_nid2str(src_nid),
4489                 lnet_msgtyp2str(type),
4490                 (for_me) ? "for me" : "routed");
4491
4492         switch (type) {
4493         case LNET_MSG_ACK:
4494         case LNET_MSG_GET:
4495                 if (payload_length > 0) {
4496                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4497                                libcfs_nid2str(from_nid),
4498                                libcfs_nid2str(src_nid),
4499                                lnet_msgtyp2str(type), payload_length);
4500                         return -EPROTO;
4501                 }
4502                 break;
4503
4504         case LNET_MSG_PUT:
4505         case LNET_MSG_REPLY:
4506                 if (payload_length >
4507                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4508                         CERROR("%s, src %s: bad %s payload %d "
4509                                "(%d max expected)\n",
4510                                libcfs_nid2str(from_nid),
4511                                libcfs_nid2str(src_nid),
4512                                lnet_msgtyp2str(type),
4513                                payload_length,
4514                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4515                         return -EPROTO;
4516                 }
4517                 break;
4518
4519         default:
4520                 CERROR("%s, src %s: Bad message type 0x%x\n",
4521                        libcfs_nid2str(from_nid),
4522                        libcfs_nid2str(src_nid), type);
4523                 return -EPROTO;
4524         }
4525
4526         if (the_lnet.ln_routing &&
4527             ni->ni_net->net_last_alive != ktime_get_real_seconds()) {
4528                 lnet_ni_lock(ni);
4529                 spin_lock(&ni->ni_net->net_lock);
4530                 ni->ni_net->net_last_alive = ktime_get_real_seconds();
4531                 spin_unlock(&ni->ni_net->net_lock);
4532                 push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP);
4533                 lnet_ni_unlock(ni);
4534         }
4535
4536         if (push)
4537                 lnet_push_update_to_peers(1);
4538
4539         /* Regard a bad destination NID as a protocol error.  Senders should
4540          * know what they're doing; if they don't they're misconfigured, buggy
4541          * or malicious so we chop them off at the knees :) */
4542
4543         if (!for_me) {
4544                 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
4545                         /* should have gone direct */
4546                         CERROR("%s, src %s: Bad dest nid %s "
4547                                "(should have been sent direct)\n",
4548                                 libcfs_nid2str(from_nid),
4549                                 libcfs_nid2str(src_nid),
4550                                 libcfs_nid2str(dest_nid));
4551                         return -EPROTO;
4552                 }
4553
4554                 if (lnet_islocalnid(dest_nid)) {
4555                         /* dest is another local NI; sender should have used
4556                          * this node's NID on its own network */
4557                         CERROR("%s, src %s: Bad dest nid %s "
4558                                "(it's my nid but on a different network)\n",
4559                                 libcfs_nid2str(from_nid),
4560                                 libcfs_nid2str(src_nid),
4561                                 libcfs_nid2str(dest_nid));
4562                         return -EPROTO;
4563                 }
4564
4565                 if (rdma_req && type == LNET_MSG_GET) {
4566                         CERROR("%s, src %s: Bad optimized GET for %s "
4567                                "(final destination must be me)\n",
4568                                 libcfs_nid2str(from_nid),
4569                                 libcfs_nid2str(src_nid),
4570                                 libcfs_nid2str(dest_nid));
4571                         return -EPROTO;
4572                 }
4573
4574                 if (!the_lnet.ln_routing) {
4575                         CERROR("%s, src %s: Dropping message for %s "
4576                                "(routing not enabled)\n",
4577                                 libcfs_nid2str(from_nid),
4578                                 libcfs_nid2str(src_nid),
4579                                 libcfs_nid2str(dest_nid));
4580                         goto drop;
4581                 }
4582         }
4583
4584         /* Message looks OK; we're not going to return an error, so we MUST
4585          * call back lnd_recv() come what may... */
4586
4587         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4588             fail_peer(src_nid, 0)) {                    /* shall we now? */
4589                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4590                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4591                        lnet_msgtyp2str(type));
4592                 goto drop;
4593         }
4594
4595         if (!list_empty(&the_lnet.ln_drop_rules) &&
4596             lnet_drop_rule_match(hdr, ni->ni_nid, NULL)) {
4597                 CDEBUG(D_NET,
4598                        "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
4599                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4600                        libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
4601                 goto drop;
4602         }
4603
4604         msg = lnet_msg_alloc();
4605         if (msg == NULL) {
4606                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4607                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4608                        lnet_msgtyp2str(type));
4609                 goto drop;
4610         }
4611
4612         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4613          * pointers NULL etc */
4614
4615         msg->msg_type = type;
4616         msg->msg_private = private;
4617         msg->msg_receiving = 1;
4618         msg->msg_rdma_get = rdma_req;
4619         msg->msg_len = msg->msg_wanted = payload_length;
4620         msg->msg_offset = 0;
4621         msg->msg_hdr = *hdr;
4622         /* for building message event */
4623         msg->msg_from = from_nid;
4624         if (!for_me) {
4625                 msg->msg_target.pid     = dest_pid;
4626                 msg->msg_target.nid     = dest_nid;
4627                 msg->msg_routing        = 1;
4628
4629         } else {
4630                 /* convert common msg->hdr fields to host byteorder */
4631                 msg->msg_hdr.type       = type;
4632                 msg->msg_hdr.src_nid    = src_nid;
4633                 msg->msg_hdr.src_pid    = le32_to_cpu(msg->msg_hdr.src_pid);
4634                 msg->msg_hdr.dest_nid   = dest_nid;
4635                 msg->msg_hdr.dest_pid   = dest_pid;
4636                 msg->msg_hdr.payload_length = payload_length;
4637         }
4638
4639         lnet_net_lock(cpt);
4640         lpni = lnet_nid2peerni_locked(from_nid, ni->ni_nid, cpt);
4641         if (IS_ERR(lpni)) {
4642                 lnet_net_unlock(cpt);
4643                 CERROR("%s, src %s: Dropping %s "
4644                        "(error %ld looking up sender)\n",
4645                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4646                        lnet_msgtyp2str(type), PTR_ERR(lpni));
4647                 lnet_msg_free(msg);
4648                 if (rc == -ESHUTDOWN)
4649                         /* We are shutting down.  Don't do anything more */
4650                         return 0;
4651                 goto drop;
4652         }
4653
4654         /* If this message was forwarded to us from a router then we may need
4655          * to update router aliveness or check for an asymmetrical route
4656          * (or both)
4657          */
4658         if (((lnet_drop_asym_route && for_me) ||
4659              !lpni->lpni_peer_net->lpn_peer->lp_alive) &&
4660             LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
4661                 __u32 src_net_id = LNET_NIDNET(src_nid);
4662                 struct lnet_peer *gw = lpni->lpni_peer_net->lpn_peer;
4663                 struct lnet_route *route;
4664                 bool found = false;
4665
4666                 list_for_each_entry(route, &gw->lp_routes, lr_gwlist) {
4667                         if (route->lr_net == src_net_id) {
4668                                 found = true;
4669                                 /* If we're transitioning the gateway from
4670                                  * dead -> alive, and discovery is disabled
4671                                  * locally or on the gateway, then we need to
4672                                  * update the cached route aliveness for each
4673                                  * route to the src_nid's net.
4674                                  *
4675                                  * Otherwise, we're only checking for
4676                                  * symmetrical route, and we can break the
4677                                  * loop
4678                                  */
4679                                 if (!gw->lp_alive &&
4680                                     lnet_is_discovery_disabled(gw))
4681                                         lnet_set_route_aliveness(route, true);
4682                                 else
4683                                         break;
4684                         }
4685                 }
4686                 if (lnet_drop_asym_route && for_me && !found) {
4687                         lnet_net_unlock(cpt);
4688                         /* we would not use from_nid to route a message to
4689                          * src_nid
4690                          * => asymmetric routing detected but forbidden
4691                          */
4692                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4693                                libcfs_nid2str(from_nid),
4694                                libcfs_nid2str(src_nid), lnet_msgtyp2str(type));
4695                         lnet_msg_free(msg);
4696                         goto drop;
4697                 }
4698                 if (!gw->lp_alive) {
4699                         struct lnet_peer_net *lpn;
4700                         struct lnet_peer_ni *lpni2;
4701
4702                         gw->lp_alive = true;
4703                         /* Mark all remote NIs on src_nid's net UP */
4704                         lpn = lnet_peer_get_net_locked(gw, src_net_id);
4705                         if (lpn)
4706                                 list_for_each_entry(lpni2, &lpn->lpn_peer_nis,
4707                                                     lpni_peer_nis)
4708                                         lpni2->lpni_ns_status = LNET_NI_STATUS_UP;
4709                 }
4710         }
4711
4712         lpni->lpni_last_alive = ktime_get_seconds();
4713
4714         msg->msg_rxpeer = lpni;
4715         msg->msg_rxni = ni;
4716         lnet_ni_addref_locked(ni, cpt);
4717         /* Multi-Rail: Primary NID of source. */
4718         msg->msg_initiator = lnet_peer_primary_nid_locked(src_nid);
4719
4720         /*
4721          * mark the status of this lpni as UP since we received a message
4722          * from it. The ping response reports back the ns_status which is
4723          * marked on the remote as up or down and we cache it here.
4724          */
4725         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
4726
4727         lnet_msg_commit(msg, cpt);
4728
4729         /* message delay simulation */
4730         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
4731                      lnet_delay_rule_match_locked(hdr, msg))) {
4732                 lnet_net_unlock(cpt);
4733                 return 0;
4734         }
4735
4736         if (!for_me) {
4737                 rc = lnet_parse_forward_locked(ni, msg);
4738                 lnet_net_unlock(cpt);
4739
4740                 if (rc < 0)
4741                         goto free_drop;
4742
4743                 if (rc == LNET_CREDIT_OK) {
4744                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
4745                                      0, payload_length, payload_length);
4746                 }
4747                 return 0;
4748         }
4749
4750         lnet_net_unlock(cpt);
4751
4752         rc = lnet_parse_local(ni, msg);
4753         if (rc != 0)
4754                 goto free_drop;
4755         return 0;
4756
4757  free_drop:
4758         LASSERT(msg->msg_md == NULL);
4759         lnet_finalize(msg, rc);
4760
4761  drop:
4762         lnet_drop_message(ni, cpt, private, payload_length, type);
4763         return 0;
4764 }
4765 EXPORT_SYMBOL(lnet_parse);
4766
4767 void
4768 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
4769 {
4770         while (!list_empty(head)) {
4771                 struct lnet_process_id id = {0};
4772                 struct lnet_msg *msg;
4773
4774                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4775                 list_del(&msg->msg_list);
4776
4777                 id.nid = msg->msg_hdr.src_nid;
4778                 id.pid = msg->msg_hdr.src_pid;
4779
4780                 LASSERT(msg->msg_md == NULL);
4781                 LASSERT(msg->msg_rx_delayed);
4782                 LASSERT(msg->msg_rxpeer != NULL);
4783                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4784
4785                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
4786                       " offset %d length %d: %s\n",
4787                       libcfs_id2str(id),
4788                       msg->msg_hdr.msg.put.ptl_index,
4789                       msg->msg_hdr.msg.put.match_bits,
4790                       msg->msg_hdr.msg.put.offset,
4791                       msg->msg_hdr.payload_length, reason);
4792
4793                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
4794                  * called lnet_drop_message(), so I just hang onto msg as well
4795                  * until that's done */
4796
4797                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
4798                                   msg->msg_private, msg->msg_len,
4799                                   msg->msg_type);
4800
4801                 msg->msg_no_resend = true;
4802                 /*
4803                  * NB: message will not generate event because w/o attached MD,
4804                  * but we still should give error code so lnet_msg_decommit()
4805                  * can skip counters operations and other checks.
4806                  */
4807                 lnet_finalize(msg, -ENOENT);
4808         }
4809 }
4810
4811 void
4812 lnet_recv_delayed_msg_list(struct list_head *head)
4813 {
4814         while (!list_empty(head)) {
4815                 struct lnet_msg *msg;
4816                 struct lnet_process_id id;
4817
4818                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4819                 list_del(&msg->msg_list);
4820
4821                 /* md won't disappear under me, since each msg
4822                  * holds a ref on it */
4823
4824                 id.nid = msg->msg_hdr.src_nid;
4825                 id.pid = msg->msg_hdr.src_pid;
4826
4827                 LASSERT(msg->msg_rx_delayed);
4828                 LASSERT(msg->msg_md != NULL);
4829                 LASSERT(msg->msg_rxpeer != NULL);
4830                 LASSERT(msg->msg_rxni != NULL);
4831                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4832
4833                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
4834                        "match %llu offset %d length %d.\n",
4835                         libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
4836                         msg->msg_hdr.msg.put.match_bits,
4837                         msg->msg_hdr.msg.put.offset,
4838                         msg->msg_hdr.payload_length);
4839
4840                 lnet_recv_put(msg->msg_rxni, msg);
4841         }
4842 }
4843
4844 static void
4845 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
4846                         struct lnet_libmd *md, struct lnet_handle_md mdh)
4847 {
4848         s64 timeout_ns;
4849         struct lnet_rsp_tracker *local_rspt;
4850
4851         /*
4852          * MD has a refcount taken by message so it's not going away.
4853          * The MD however can be looked up. We need to secure the access
4854          * to the md_rspt_ptr by taking the res_lock.
4855          * The rspt can be accessed without protection up to when it gets
4856          * added to the list.
4857          */
4858
4859         lnet_res_lock(cpt);
4860         local_rspt = md->md_rspt_ptr;
4861         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
4862         if (local_rspt != NULL) {
4863                 /*
4864                  * we already have an rspt attached to the md, so we'll
4865                  * update the deadline on that one.
4866                  */
4867                 lnet_rspt_free(rspt, cpt);
4868         } else {
4869                 /* new md */
4870                 rspt->rspt_mdh = mdh;
4871                 rspt->rspt_cpt = cpt;
4872                 /* store the rspt so we can access it when we get the REPLY */
4873                 md->md_rspt_ptr = rspt;
4874                 local_rspt = rspt;
4875         }
4876         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
4877
4878         /*
4879          * add to the list of tracked responses. It's added to tail of the
4880          * list in order to expire all the older entries first.
4881          */
4882         lnet_net_lock(cpt);
4883         list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
4884         lnet_net_unlock(cpt);
4885         lnet_res_unlock(cpt);
4886 }
4887
4888 /**
4889  * Initiate an asynchronous PUT operation.
4890  *
4891  * There are several events associated with a PUT: completion of the send on
4892  * the initiator node (LNET_EVENT_SEND), and when the send completes
4893  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
4894  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
4895  * used at the target node to indicate the completion of incoming data
4896  * delivery.
4897  *
4898  * The local events will be logged in the EQ associated with the MD pointed to
4899  * by \a mdh handle. Using a MD without an associated EQ results in these
4900  * events being discarded. In this case, the caller must have another
4901  * mechanism (e.g., a higher level protocol) for determining when it is safe
4902  * to modify the memory region associated with the MD.
4903  *
4904  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
4905  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
4906  *
4907  * \param self Indicates the NID of a local interface through which to send
4908  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
4909  * \param mdh A handle for the MD that describes the memory to be sent. The MD
4910  * must be "free floating" (See LNetMDBind()).
4911  * \param ack Controls whether an acknowledgment is requested.
4912  * Acknowledgments are only sent when they are requested by the initiating
4913  * process and the target MD enables them.
4914  * \param target A process identifier for the target process.
4915  * \param portal The index in the \a target's portal table.
4916  * \param match_bits The match bits to use for MD selection at the target
4917  * process.
4918  * \param offset The offset into the target MD (only used when the target
4919  * MD has the LNET_MD_MANAGE_REMOTE option set).
4920  * \param hdr_data 64 bits of user data that can be included in the message
4921  * header. This data is written to an event queue entry at the target if an
4922  * EQ is present on the matching MD.
4923  *
4924  * \retval  0      Success, and only in this case events will be generated
4925  * and logged to EQ (if it exists).
4926  * \retval -EIO    Simulated failure.
4927  * \retval -ENOMEM Memory allocation failure.
4928  * \retval -ENOENT Invalid MD object.
4929  *
4930  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
4931  */
4932 int
4933 LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
4934         struct lnet_process_id target, unsigned int portal,
4935         __u64 match_bits, unsigned int offset,
4936         __u64 hdr_data)
4937 {
4938         struct lnet_msg *msg;
4939         struct lnet_libmd *md;
4940         int cpt;
4941         int rc;
4942         struct lnet_rsp_tracker *rspt = NULL;
4943
4944         LASSERT(the_lnet.ln_refcount > 0);
4945
4946         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4947             fail_peer(target.nid, 1)) {                 /* shall we now? */
4948                 CERROR("Dropping PUT to %s: simulated failure\n",
4949                        libcfs_id2str(target));
4950                 return -EIO;
4951         }
4952
4953         msg = lnet_msg_alloc();
4954         if (msg == NULL) {
4955                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
4956                        libcfs_id2str(target));
4957                 return -ENOMEM;
4958         }
4959         msg->msg_vmflush = !!(current->flags & PF_MEMALLOC);
4960
4961         cpt = lnet_cpt_of_cookie(mdh.cookie);
4962
4963         if (ack == LNET_ACK_REQ) {
4964                 rspt = lnet_rspt_alloc(cpt);
4965                 if (!rspt) {
4966                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
4967                                 libcfs_id2str(target));
4968                         return -ENOMEM;
4969                 }
4970                 INIT_LIST_HEAD(&rspt->rspt_on_list);
4971         }
4972
4973         lnet_res_lock(cpt);
4974
4975         md = lnet_handle2md(&mdh);
4976         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4977                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
4978                        match_bits, portal, libcfs_id2str(target),
4979                        md == NULL ? -1 : md->md_threshold);
4980                 if (md != NULL && md->md_me != NULL)
4981                         CERROR("Source MD also attached to portal %d\n",
4982                                md->md_me->me_portal);
4983                 lnet_res_unlock(cpt);
4984
4985                 if (rspt)
4986                         lnet_rspt_free(rspt, cpt);
4987
4988                 lnet_msg_free(msg);
4989                 return -ENOENT;
4990         }
4991
4992         CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
4993
4994         lnet_msg_attach_md(msg, md, 0, 0);
4995
4996         lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
4997
4998         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
4999         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
5000         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
5001         msg->msg_hdr.msg.put.hdr_data = hdr_data;
5002
5003         /* NB handles only looked up by creator (no flips) */
5004         if (ack == LNET_ACK_REQ) {
5005                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5006                         the_lnet.ln_interface_cookie;
5007                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5008                         md->md_lh.lh_cookie;
5009         } else {
5010                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5011                         LNET_WIRE_HANDLE_COOKIE_NONE;
5012                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5013                         LNET_WIRE_HANDLE_COOKIE_NONE;
5014         }
5015
5016         lnet_res_unlock(cpt);
5017
5018         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5019
5020         if (rspt && lnet_response_tracking_enabled(LNET_MSG_PUT,
5021                                                    md->md_options))
5022                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5023         else if (rspt)
5024                 lnet_rspt_free(rspt, cpt);
5025
5026         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
5027                                  CFS_FAIL_ONCE))
5028                 rc = -EIO;
5029         else
5030                 rc = lnet_send(self, msg, LNET_NID_ANY);
5031
5032         if (rc != 0) {
5033                 CNETERR("Error sending PUT to %s: %d\n",
5034                         libcfs_id2str(target), rc);
5035                 msg->msg_no_resend = true;
5036                 lnet_finalize(msg, rc);
5037         }
5038
5039         /* completion will be signalled by an event */
5040         return 0;
5041 }
5042 EXPORT_SYMBOL(LNetPut);
5043
5044 /*
5045  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
5046  * returns a msg for the LND to pass to lnet_finalize() when the sink
5047  * data has been received.
5048  *
5049  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
5050  * lnet_finalize() is called on it, so the LND must call this first
5051  */
5052 struct lnet_msg *
5053 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
5054 {
5055         struct lnet_msg *msg = lnet_msg_alloc();
5056         struct lnet_libmd *getmd = getmsg->msg_md;
5057         struct lnet_process_id peer_id = getmsg->msg_target;
5058         int cpt;
5059
5060         LASSERT(!getmsg->msg_target_is_router);
5061         LASSERT(!getmsg->msg_routing);
5062
5063         if (msg == NULL) {
5064                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
5065                        libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
5066                 goto drop;
5067         }
5068
5069         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
5070         lnet_res_lock(cpt);
5071
5072         LASSERT(getmd->md_refcount > 0);
5073
5074         if (getmd->md_threshold == 0) {
5075                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
5076                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
5077                         getmd);
5078                 lnet_res_unlock(cpt);
5079                 goto drop;
5080         }
5081
5082         LASSERT(getmd->md_offset == 0);
5083
5084         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
5085                libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
5086
5087         /* setup information for lnet_build_msg_event */
5088         msg->msg_initiator = getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
5089         msg->msg_from = peer_id.nid;
5090         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
5091         msg->msg_hdr.src_nid = peer_id.nid;
5092         msg->msg_hdr.payload_length = getmd->md_length;
5093         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
5094
5095         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
5096         lnet_res_unlock(cpt);
5097
5098         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
5099
5100         lnet_net_lock(cpt);
5101         lnet_msg_commit(msg, cpt);
5102         lnet_net_unlock(cpt);
5103
5104         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
5105
5106         return msg;
5107
5108  drop:
5109         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
5110
5111         lnet_net_lock(cpt);
5112         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
5113         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
5114         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
5115                 getmd->md_length;
5116         lnet_net_unlock(cpt);
5117
5118         if (msg != NULL)
5119                 lnet_msg_free(msg);
5120
5121         return NULL;
5122 }
5123 EXPORT_SYMBOL(lnet_create_reply_msg);
5124
5125 void
5126 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
5127                        unsigned int len)
5128 {
5129         /* Set the REPLY length, now the RDMA that elides the REPLY message has
5130          * completed and I know it. */
5131         LASSERT(reply != NULL);
5132         LASSERT(reply->msg_type == LNET_MSG_GET);
5133         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
5134
5135         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
5136          * the end of my buffer, I might as well be dead. */
5137         LASSERT(len <= reply->msg_ev.mlength);
5138
5139         reply->msg_ev.mlength = len;
5140 }
5141 EXPORT_SYMBOL(lnet_set_reply_msg_len);
5142
5143 /**
5144  * Initiate an asynchronous GET operation.
5145  *
5146  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
5147  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
5148  * the target node in the REPLY has been written to local MD.
5149  *
5150  * On the target node, an LNET_EVENT_GET is logged when the GET request
5151  * arrives and is accepted into a MD.
5152  *
5153  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
5154  * \param mdh A handle for the MD that describes the memory into which the
5155  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
5156  *
5157  * \retval  0      Success, and only in this case events will be generated
5158  * and logged to EQ (if it exists) of the MD.
5159  * \retval -EIO    Simulated failure.
5160  * \retval -ENOMEM Memory allocation failure.
5161  * \retval -ENOENT Invalid MD object.
5162  */
5163 int
5164 LNetGet(lnet_nid_t self, struct lnet_handle_md mdh,
5165         struct lnet_process_id target, unsigned int portal,
5166         __u64 match_bits, unsigned int offset, bool recovery)
5167 {
5168         struct lnet_msg *msg;
5169         struct lnet_libmd *md;
5170         struct lnet_rsp_tracker *rspt;
5171         int cpt;
5172         int rc;
5173
5174         LASSERT(the_lnet.ln_refcount > 0);
5175
5176         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5177             fail_peer(target.nid, 1))                   /* shall we now? */
5178         {
5179                 CERROR("Dropping GET to %s: simulated failure\n",
5180                        libcfs_id2str(target));
5181                 return -EIO;
5182         }
5183
5184         msg = lnet_msg_alloc();
5185         if (!msg) {
5186                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
5187                        libcfs_id2str(target));
5188                 return -ENOMEM;
5189         }
5190
5191         cpt = lnet_cpt_of_cookie(mdh.cookie);
5192
5193         rspt = lnet_rspt_alloc(cpt);
5194         if (!rspt) {
5195                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
5196                        libcfs_id2str(target));
5197                 return -ENOMEM;
5198         }
5199         INIT_LIST_HEAD(&rspt->rspt_on_list);
5200
5201         msg->msg_recovery = recovery;
5202
5203         lnet_res_lock(cpt);
5204
5205         md = lnet_handle2md(&mdh);
5206         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5207                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
5208                        match_bits, portal, libcfs_id2str(target),
5209                        md == NULL ? -1 : md->md_threshold);
5210                 if (md != NULL && md->md_me != NULL)
5211                         CERROR("REPLY MD also attached to portal %d\n",
5212                                md->md_me->me_portal);
5213
5214                 lnet_res_unlock(cpt);
5215
5216                 lnet_msg_free(msg);
5217                 lnet_rspt_free(rspt, cpt);
5218                 return -ENOENT;
5219         }
5220
5221         CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
5222
5223         lnet_msg_attach_md(msg, md, 0, 0);
5224
5225         lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
5226
5227         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5228         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5229         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5230         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5231
5232         /* NB handles only looked up by creator (no flips) */
5233         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5234                 the_lnet.ln_interface_cookie;
5235         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5236                 md->md_lh.lh_cookie;
5237
5238         lnet_res_unlock(cpt);
5239
5240         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5241
5242         if (lnet_response_tracking_enabled(LNET_MSG_GET, md->md_options))
5243                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5244         else
5245                 lnet_rspt_free(rspt, cpt);
5246
5247         rc = lnet_send(self, msg, LNET_NID_ANY);
5248         if (rc < 0) {
5249                 CNETERR("Error sending GET to %s: %d\n",
5250                         libcfs_id2str(target), rc);
5251                 msg->msg_no_resend = true;
5252                 lnet_finalize(msg, rc);
5253         }
5254
5255         /* completion will be signalled by an event */
5256         return 0;
5257 }
5258 EXPORT_SYMBOL(LNetGet);
5259
5260 /**
5261  * Calculate distance to node at \a dstnid.
5262  *
5263  * \param dstnid Target NID.
5264  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5265  * is saved here.
5266  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5267  * here.
5268  *
5269  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5270  * local_nid_dist_zero is set, which is the default.
5271  * \retval positives Distance to target NID, i.e. number of hops plus one.
5272  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5273  */
5274 int
5275 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
5276 {
5277         struct list_head *e;
5278         struct lnet_ni *ni = NULL;
5279         struct lnet_remotenet *rnet;
5280         __u32 dstnet = LNET_NIDNET(dstnid);
5281         int hops;
5282         int cpt;
5283         __u32 order = 2;
5284         struct list_head *rn_list;
5285         bool matched_dstnet = false;
5286
5287         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5288          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5289          * keep order 0 free for 0@lo and order 1 free for a local NID
5290          * match */
5291
5292         LASSERT(the_lnet.ln_refcount > 0);
5293
5294         cpt = lnet_net_lock_current();
5295
5296         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5297                 if (ni->ni_nid == dstnid) {
5298                         if (srcnidp != NULL)
5299                                 *srcnidp = dstnid;
5300                         if (orderp != NULL) {
5301                                 if (dstnid == LNET_NID_LO_0)
5302                                         *orderp = 0;
5303                                 else
5304                                         *orderp = 1;
5305                         }
5306                         lnet_net_unlock(cpt);
5307
5308                         return local_nid_dist_zero ? 0 : 1;
5309                 }
5310
5311                 if (!matched_dstnet && LNET_NIDNET(ni->ni_nid) == dstnet) {
5312                         matched_dstnet = true;
5313                         /* We matched the destination net, but we may have
5314                          * additional local NIs to inspect.
5315                          *
5316                          * We record the nid and order as appropriate, but
5317                          * they may be overwritten if we match local NI above.
5318                          */
5319                         if (srcnidp)
5320                                 *srcnidp = ni->ni_nid;
5321
5322                         if (orderp) {
5323                                 /* Check if ni was originally created in
5324                                  * current net namespace.
5325                                  * If not, assign order above 0xffff0000,
5326                                  * to make this ni not a priority.
5327                                  */
5328                                 if (current->nsproxy &&
5329                                     !net_eq(ni->ni_net_ns,
5330                                             current->nsproxy->net_ns))
5331                                         *orderp = order + 0xffff0000;
5332                                 else
5333                                         *orderp = order;
5334                         }
5335                 }
5336
5337                 order++;
5338         }
5339
5340         if (matched_dstnet) {
5341                 lnet_net_unlock(cpt);
5342                 return 1;
5343         }
5344
5345         rn_list = lnet_net2rnethash(dstnet);
5346         list_for_each(e, rn_list) {
5347                 rnet = list_entry(e, struct lnet_remotenet, lrn_list);
5348
5349                 if (rnet->lrn_net == dstnet) {
5350                         struct lnet_route *route;
5351                         struct lnet_route *shortest = NULL;
5352                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5353                         __u32 route_hops;
5354
5355                         LASSERT(!list_empty(&rnet->lrn_routes));
5356
5357                         list_for_each_entry(route, &rnet->lrn_routes,
5358                                             lr_list) {
5359                                 route_hops = route->lr_hops;
5360                                 if (route_hops == LNET_UNDEFINED_HOPS)
5361                                         route_hops = 1;
5362                                 if (shortest == NULL ||
5363                                     route_hops < shortest_hops) {
5364                                         shortest = route;
5365                                         shortest_hops = route_hops;
5366                                 }
5367                         }
5368
5369                         LASSERT(shortest != NULL);
5370                         hops = shortest_hops;
5371                         if (srcnidp != NULL) {
5372                                 struct lnet_net *net;
5373                                 net = lnet_get_net_locked(shortest->lr_lnet);
5374                                 LASSERT(net);
5375                                 ni = lnet_get_next_ni_locked(net, NULL);
5376                                 *srcnidp = ni->ni_nid;
5377                         }
5378                         if (orderp != NULL)
5379                                 *orderp = order;
5380                         lnet_net_unlock(cpt);
5381                         return hops + 1;
5382                 }
5383                 order++;
5384         }
5385
5386         lnet_net_unlock(cpt);
5387         return -EHOSTUNREACH;
5388 }
5389 EXPORT_SYMBOL(LNetDist);