Whamcloud - gitweb
LU-9121 lnet: Select NI/peer NI with highest prio
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/lnet/lib-move.c
33  *
34  * Data movement routines
35  */
36
37 #define DEBUG_SUBSYSTEM S_LNET
38
39 #include <linux/pagemap.h>
40
41 #include <lnet/lib-lnet.h>
42 #include <linux/nsproxy.h>
43 #include <net/net_namespace.h>
44
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48
49 struct lnet_send_data {
50         struct lnet_ni *sd_best_ni;
51         struct lnet_peer_ni *sd_best_lpni;
52         struct lnet_peer_ni *sd_final_dst_lpni;
53         struct lnet_peer *sd_peer;
54         struct lnet_peer *sd_gw_peer;
55         struct lnet_peer_ni *sd_gw_lpni;
56         struct lnet_peer_net *sd_peer_net;
57         struct lnet_msg *sd_msg;
58         lnet_nid_t sd_dst_nid;
59         lnet_nid_t sd_src_nid;
60         lnet_nid_t sd_rtr_nid;
61         int sd_cpt;
62         int sd_md_cpt;
63         __u32 sd_send_case;
64 };
65
66 static inline bool
67 lnet_msg_is_response(struct lnet_msg *msg)
68 {
69         return msg->msg_type == LNET_MSG_ACK || msg->msg_type == LNET_MSG_REPLY;
70 }
71
72 static inline bool
73 lnet_response_tracking_enabled(__u32 msg_type, unsigned int md_options)
74 {
75         if (md_options & LNET_MD_NO_TRACK_RESPONSE)
76                 /* Explicitly disabled in MD options */
77                 return false;
78
79         if (md_options & LNET_MD_TRACK_RESPONSE)
80                 /* Explicity enabled in MD options */
81                 return true;
82
83         if (lnet_response_tracking == 3)
84                 /* Enabled for all message types */
85                 return true;
86
87         if (msg_type == LNET_MSG_PUT)
88                 return lnet_response_tracking == 2;
89
90         if (msg_type == LNET_MSG_GET)
91                 return lnet_response_tracking == 1;
92
93         return false;
94 }
95
96 static inline struct lnet_comm_count *
97 get_stats_counts(struct lnet_element_stats *stats,
98                  enum lnet_stats_type stats_type)
99 {
100         switch (stats_type) {
101         case LNET_STATS_TYPE_SEND:
102                 return &stats->el_send_stats;
103         case LNET_STATS_TYPE_RECV:
104                 return &stats->el_recv_stats;
105         case LNET_STATS_TYPE_DROP:
106                 return &stats->el_drop_stats;
107         default:
108                 CERROR("Unknown stats type\n");
109         }
110
111         return NULL;
112 }
113
114 void lnet_incr_stats(struct lnet_element_stats *stats,
115                      enum lnet_msg_type msg_type,
116                      enum lnet_stats_type stats_type)
117 {
118         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
119         if (!counts)
120                 return;
121
122         switch (msg_type) {
123         case LNET_MSG_ACK:
124                 atomic_inc(&counts->co_ack_count);
125                 break;
126         case LNET_MSG_PUT:
127                 atomic_inc(&counts->co_put_count);
128                 break;
129         case LNET_MSG_GET:
130                 atomic_inc(&counts->co_get_count);
131                 break;
132         case LNET_MSG_REPLY:
133                 atomic_inc(&counts->co_reply_count);
134                 break;
135         case LNET_MSG_HELLO:
136                 atomic_inc(&counts->co_hello_count);
137                 break;
138         default:
139                 CERROR("There is a BUG in the code. Unknown message type\n");
140                 break;
141         }
142 }
143
144 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
145                      enum lnet_stats_type stats_type)
146 {
147         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
148         if (!counts)
149                 return 0;
150
151         return (atomic_read(&counts->co_ack_count) +
152                 atomic_read(&counts->co_put_count) +
153                 atomic_read(&counts->co_get_count) +
154                 atomic_read(&counts->co_reply_count) +
155                 atomic_read(&counts->co_hello_count));
156 }
157
158 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
159                                 struct lnet_comm_count *counts)
160 {
161         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
162         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
163         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
164         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
165         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
166 }
167
168 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
169                               struct lnet_element_stats *stats)
170 {
171         struct lnet_comm_count *counts;
172
173         LASSERT(msg_stats);
174         LASSERT(stats);
175
176         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
177         if (!counts)
178                 return;
179         assign_stats(&msg_stats->im_send_stats, counts);
180
181         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
182         if (!counts)
183                 return;
184         assign_stats(&msg_stats->im_recv_stats, counts);
185
186         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
187         if (!counts)
188                 return;
189         assign_stats(&msg_stats->im_drop_stats, counts);
190 }
191
192 int
193 lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
194 {
195         struct lnet_test_peer *tp;
196         struct list_head *el;
197         struct list_head *next;
198         LIST_HEAD(cull);
199
200         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
201         if (threshold != 0) {
202                 /* Adding a new entry */
203                 LIBCFS_ALLOC(tp, sizeof(*tp));
204                 if (tp == NULL)
205                         return -ENOMEM;
206
207                 tp->tp_nid = nid;
208                 tp->tp_threshold = threshold;
209
210                 lnet_net_lock(0);
211                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
212                 lnet_net_unlock(0);
213                 return 0;
214         }
215
216         lnet_net_lock(0);
217
218         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
219                 tp = list_entry(el, struct lnet_test_peer, tp_list);
220
221                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
222                     nid == LNET_NID_ANY ||      /* removing all entries */
223                     tp->tp_nid == nid) {        /* matched this one */
224                         list_move(&tp->tp_list, &cull);
225                 }
226         }
227
228         lnet_net_unlock(0);
229
230         while (!list_empty(&cull)) {
231                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
232
233                 list_del(&tp->tp_list);
234                 LIBCFS_FREE(tp, sizeof(*tp));
235         }
236         return 0;
237 }
238
239 static int
240 fail_peer (lnet_nid_t nid, int outgoing)
241 {
242         struct lnet_test_peer *tp;
243         struct list_head *el;
244         struct list_head *next;
245         LIST_HEAD(cull);
246         int fail = 0;
247
248         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
249         lnet_net_lock(0);
250
251         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
252                 tp = list_entry(el, struct lnet_test_peer, tp_list);
253
254                 if (tp->tp_threshold == 0) {
255                         /* zombie entry */
256                         if (outgoing) {
257                                 /* only cull zombies on outgoing tests,
258                                  * since we may be at interrupt priority on
259                                  * incoming messages. */
260                                 list_move(&tp->tp_list, &cull);
261                         }
262                         continue;
263                 }
264
265                 if (tp->tp_nid == LNET_NID_ANY ||       /* fail every peer */
266                     nid == tp->tp_nid) {                /* fail this peer */
267                         fail = 1;
268
269                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
270                                 tp->tp_threshold--;
271                                 if (outgoing &&
272                                     tp->tp_threshold == 0) {
273                                         /* see above */
274                                         list_move(&tp->tp_list, &cull);
275                                 }
276                         }
277                         break;
278                 }
279         }
280
281         lnet_net_unlock(0);
282
283         while (!list_empty(&cull)) {
284                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
285                 list_del(&tp->tp_list);
286
287                 LIBCFS_FREE(tp, sizeof(*tp));
288         }
289
290         return fail;
291 }
292
293 unsigned int
294 lnet_iov_nob(unsigned int niov, struct kvec *iov)
295 {
296         unsigned int nob = 0;
297
298         LASSERT(niov == 0 || iov != NULL);
299         while (niov-- > 0)
300                 nob += (iov++)->iov_len;
301
302         return (nob);
303 }
304 EXPORT_SYMBOL(lnet_iov_nob);
305
306 void
307 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
308                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
309                   unsigned int nob)
310 {
311         /* NB diov, siov are READ-ONLY */
312         unsigned int this_nob;
313
314         if (nob == 0)
315                 return;
316
317         /* skip complete frags before 'doffset' */
318         LASSERT(ndiov > 0);
319         while (doffset >= diov->iov_len) {
320                 doffset -= diov->iov_len;
321                 diov++;
322                 ndiov--;
323                 LASSERT(ndiov > 0);
324         }
325
326         /* skip complete frags before 'soffset' */
327         LASSERT(nsiov > 0);
328         while (soffset >= siov->iov_len) {
329                 soffset -= siov->iov_len;
330                 siov++;
331                 nsiov--;
332                 LASSERT(nsiov > 0);
333         }
334
335         do {
336                 LASSERT(ndiov > 0);
337                 LASSERT(nsiov > 0);
338                 this_nob = min3((unsigned int)diov->iov_len - doffset,
339                                 (unsigned int)siov->iov_len - soffset,
340                                 nob);
341
342                 memcpy((char *)diov->iov_base + doffset,
343                        (char *)siov->iov_base + soffset, this_nob);
344                 nob -= this_nob;
345
346                 if (diov->iov_len > doffset + this_nob) {
347                         doffset += this_nob;
348                 } else {
349                         diov++;
350                         ndiov--;
351                         doffset = 0;
352                 }
353
354                 if (siov->iov_len > soffset + this_nob) {
355                         soffset += this_nob;
356                 } else {
357                         siov++;
358                         nsiov--;
359                         soffset = 0;
360                 }
361         } while (nob > 0);
362 }
363 EXPORT_SYMBOL(lnet_copy_iov2iov);
364
365 unsigned int
366 lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
367 {
368         unsigned int  nob = 0;
369
370         LASSERT(niov == 0 || kiov != NULL);
371         while (niov-- > 0)
372                 nob += (kiov++)->bv_len;
373
374         return (nob);
375 }
376 EXPORT_SYMBOL(lnet_kiov_nob);
377
378 void
379 lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov,
380                     unsigned int doffset,
381                     unsigned int nsiov, struct bio_vec *siov,
382                     unsigned int soffset,
383                     unsigned int nob)
384 {
385         /* NB diov, siov are READ-ONLY */
386         unsigned int    this_nob;
387         char           *daddr = NULL;
388         char           *saddr = NULL;
389
390         if (nob == 0)
391                 return;
392
393         LASSERT (!in_interrupt ());
394
395         LASSERT (ndiov > 0);
396         while (doffset >= diov->bv_len) {
397                 doffset -= diov->bv_len;
398                 diov++;
399                 ndiov--;
400                 LASSERT(ndiov > 0);
401         }
402
403         LASSERT(nsiov > 0);
404         while (soffset >= siov->bv_len) {
405                 soffset -= siov->bv_len;
406                 siov++;
407                 nsiov--;
408                 LASSERT(nsiov > 0);
409         }
410
411         do {
412                 LASSERT(ndiov > 0);
413                 LASSERT(nsiov > 0);
414                 this_nob = min3(diov->bv_len - doffset,
415                                 siov->bv_len - soffset,
416                                 nob);
417
418                 if (daddr == NULL)
419                         daddr = ((char *)kmap(diov->bv_page)) +
420                                 diov->bv_offset + doffset;
421                 if (saddr == NULL)
422                         saddr = ((char *)kmap(siov->bv_page)) +
423                                 siov->bv_offset + soffset;
424
425                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
426                  * However in practice at least one of the kiovs will be mapped
427                  * kernel pages and the map/unmap will be NOOPs */
428
429                 memcpy (daddr, saddr, this_nob);
430                 nob -= this_nob;
431
432                 if (diov->bv_len > doffset + this_nob) {
433                         daddr += this_nob;
434                         doffset += this_nob;
435                 } else {
436                         kunmap(diov->bv_page);
437                         daddr = NULL;
438                         diov++;
439                         ndiov--;
440                         doffset = 0;
441                 }
442
443                 if (siov->bv_len > soffset + this_nob) {
444                         saddr += this_nob;
445                         soffset += this_nob;
446                 } else {
447                         kunmap(siov->bv_page);
448                         saddr = NULL;
449                         siov++;
450                         nsiov--;
451                         soffset = 0;
452                 }
453         } while (nob > 0);
454
455         if (daddr != NULL)
456                 kunmap(diov->bv_page);
457         if (saddr != NULL)
458                 kunmap(siov->bv_page);
459 }
460 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
461
462 void
463 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
464                     unsigned int nkiov, struct bio_vec *kiov,
465                     unsigned int kiovoffset,
466                     unsigned int nob)
467 {
468         /* NB iov, kiov are READ-ONLY */
469         unsigned int    this_nob;
470         char           *addr = NULL;
471
472         if (nob == 0)
473                 return;
474
475         LASSERT (!in_interrupt ());
476
477         LASSERT (niov > 0);
478         while (iovoffset >= iov->iov_len) {
479                 iovoffset -= iov->iov_len;
480                 iov++;
481                 niov--;
482                 LASSERT(niov > 0);
483         }
484
485         LASSERT(nkiov > 0);
486         while (kiovoffset >= kiov->bv_len) {
487                 kiovoffset -= kiov->bv_len;
488                 kiov++;
489                 nkiov--;
490                 LASSERT(nkiov > 0);
491         }
492
493         do {
494                 LASSERT(niov > 0);
495                 LASSERT(nkiov > 0);
496                 this_nob = min3((unsigned int)iov->iov_len - iovoffset,
497                                 (unsigned int)kiov->bv_len - kiovoffset,
498                                 nob);
499
500                 if (addr == NULL)
501                         addr = ((char *)kmap(kiov->bv_page)) +
502                                 kiov->bv_offset + kiovoffset;
503
504                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
505                 nob -= this_nob;
506
507                 if (iov->iov_len > iovoffset + this_nob) {
508                         iovoffset += this_nob;
509                 } else {
510                         iov++;
511                         niov--;
512                         iovoffset = 0;
513                 }
514
515                 if (kiov->bv_len > kiovoffset + this_nob) {
516                         addr += this_nob;
517                         kiovoffset += this_nob;
518                 } else {
519                         kunmap(kiov->bv_page);
520                         addr = NULL;
521                         kiov++;
522                         nkiov--;
523                         kiovoffset = 0;
524                 }
525
526         } while (nob > 0);
527
528         if (addr != NULL)
529                 kunmap(kiov->bv_page);
530 }
531 EXPORT_SYMBOL(lnet_copy_kiov2iov);
532
533 void
534 lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
535                    unsigned int kiovoffset,
536                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
537                    unsigned int nob)
538 {
539         /* NB kiov, iov are READ-ONLY */
540         unsigned int    this_nob;
541         char           *addr = NULL;
542
543         if (nob == 0)
544                 return;
545
546         LASSERT (!in_interrupt ());
547
548         LASSERT (nkiov > 0);
549         while (kiovoffset >= kiov->bv_len) {
550                 kiovoffset -= kiov->bv_len;
551                 kiov++;
552                 nkiov--;
553                 LASSERT(nkiov > 0);
554         }
555
556         LASSERT(niov > 0);
557         while (iovoffset >= iov->iov_len) {
558                 iovoffset -= iov->iov_len;
559                 iov++;
560                 niov--;
561                 LASSERT(niov > 0);
562         }
563
564         do {
565                 LASSERT(nkiov > 0);
566                 LASSERT(niov > 0);
567                 this_nob = min3((unsigned int)kiov->bv_len - kiovoffset,
568                                 (unsigned int)iov->iov_len - iovoffset,
569                                 nob);
570
571                 if (addr == NULL)
572                         addr = ((char *)kmap(kiov->bv_page)) +
573                                 kiov->bv_offset + kiovoffset;
574
575                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
576                 nob -= this_nob;
577
578                 if (kiov->bv_len > kiovoffset + this_nob) {
579                         addr += this_nob;
580                         kiovoffset += this_nob;
581                 } else {
582                         kunmap(kiov->bv_page);
583                         addr = NULL;
584                         kiov++;
585                         nkiov--;
586                         kiovoffset = 0;
587                 }
588
589                 if (iov->iov_len > iovoffset + this_nob) {
590                         iovoffset += this_nob;
591                 } else {
592                         iov++;
593                         niov--;
594                         iovoffset = 0;
595                 }
596         } while (nob > 0);
597
598         if (addr != NULL)
599                 kunmap(kiov->bv_page);
600 }
601 EXPORT_SYMBOL(lnet_copy_iov2kiov);
602
603 int
604 lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
605                   int src_niov, struct bio_vec *src,
606                   unsigned int offset, unsigned int len)
607 {
608         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
609          * for exactly 'len' bytes, and return the number of entries.
610          * NB not destructive to 'src' */
611         unsigned int    frag_len;
612         unsigned int    niov;
613
614         if (len == 0)                           /* no data => */
615                 return (0);                     /* no frags */
616
617         LASSERT(src_niov > 0);
618         while (offset >= src->bv_len) {      /* skip initial frags */
619                 offset -= src->bv_len;
620                 src_niov--;
621                 src++;
622                 LASSERT(src_niov > 0);
623         }
624
625         niov = 1;
626         for (;;) {
627                 LASSERT(src_niov > 0);
628                 LASSERT((int)niov <= dst_niov);
629
630                 frag_len = src->bv_len - offset;
631                 dst->bv_page = src->bv_page;
632                 dst->bv_offset = src->bv_offset + offset;
633
634                 if (len <= frag_len) {
635                         dst->bv_len = len;
636                         LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
637                         return niov;
638                 }
639
640                 dst->bv_len = frag_len;
641                 LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
642
643                 len -= frag_len;
644                 dst++;
645                 src++;
646                 niov++;
647                 src_niov--;
648                 offset = 0;
649         }
650 }
651 EXPORT_SYMBOL(lnet_extract_kiov);
652
653 void
654 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
655              int delayed, unsigned int offset, unsigned int mlen,
656              unsigned int rlen)
657 {
658         unsigned int niov = 0;
659         struct kvec *iov = NULL;
660         struct bio_vec  *kiov = NULL;
661         int rc;
662
663         LASSERT (!in_interrupt ());
664         LASSERT (mlen == 0 || msg != NULL);
665
666         if (msg != NULL) {
667                 LASSERT(msg->msg_receiving);
668                 LASSERT(!msg->msg_sending);
669                 LASSERT(rlen == msg->msg_len);
670                 LASSERT(mlen <= msg->msg_len);
671                 LASSERT(msg->msg_offset == offset);
672                 LASSERT(msg->msg_wanted == mlen);
673
674                 msg->msg_receiving = 0;
675
676                 if (mlen != 0) {
677                         niov = msg->msg_niov;
678                         kiov = msg->msg_kiov;
679
680                         LASSERT (niov > 0);
681                         LASSERT ((iov == NULL) != (kiov == NULL));
682                 }
683         }
684
685         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
686                                              niov, kiov, offset, mlen,
687                                              rlen);
688         if (rc < 0)
689                 lnet_finalize(msg, rc);
690 }
691
692 static void
693 lnet_setpayloadbuffer(struct lnet_msg *msg)
694 {
695         struct lnet_libmd *md = msg->msg_md;
696
697         LASSERT(msg->msg_len > 0);
698         LASSERT(!msg->msg_routing);
699         LASSERT(md != NULL);
700         LASSERT(msg->msg_niov == 0);
701         LASSERT(msg->msg_kiov == NULL);
702
703         msg->msg_niov = md->md_niov;
704         msg->msg_kiov = md->md_kiov;
705 }
706
707 void
708 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target,
709                unsigned int offset, unsigned int len)
710 {
711         msg->msg_type = type;
712         msg->msg_target = target;
713         msg->msg_len = len;
714         msg->msg_offset = offset;
715
716         if (len != 0)
717                 lnet_setpayloadbuffer(msg);
718
719         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
720         msg->msg_hdr.type           = cpu_to_le32(type);
721         /* dest_nid will be overwritten by lnet_select_pathway() */
722         msg->msg_hdr.dest_nid       = cpu_to_le64(target.nid);
723         msg->msg_hdr.dest_pid       = cpu_to_le32(target.pid);
724         /* src_nid will be set later */
725         msg->msg_hdr.src_pid        = cpu_to_le32(the_lnet.ln_pid);
726         msg->msg_hdr.payload_length = cpu_to_le32(len);
727 }
728
729 static void
730 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
731 {
732         void *priv = msg->msg_private;
733         int rc;
734
735         LASSERT(!in_interrupt());
736         LASSERT(ni->ni_nid == LNET_NID_LO_0 ||
737                 (msg->msg_txcredit && msg->msg_peertxcredit));
738
739         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
740         if (rc < 0) {
741                 msg->msg_no_resend = true;
742                 lnet_finalize(msg, rc);
743         }
744 }
745
746 static int
747 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
748 {
749         int     rc;
750
751         LASSERT(!msg->msg_sending);
752         LASSERT(msg->msg_receiving);
753         LASSERT(!msg->msg_rx_ready_delay);
754         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
755
756         msg->msg_rx_ready_delay = 1;
757         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
758                                                   &msg->msg_private);
759         if (rc != 0) {
760                 CERROR("recv from %s / send to %s aborted: "
761                        "eager_recv failed %d\n",
762                        libcfs_nid2str(msg->msg_rxpeer->lpni_nid),
763                        libcfs_id2str(msg->msg_target), rc);
764                 LASSERT(rc < 0); /* required by my callers */
765         }
766
767         return rc;
768 }
769
770 static bool
771 lnet_is_peer_deadline_passed(struct lnet_peer_ni *lpni, time64_t now)
772 {
773         time64_t deadline;
774
775         deadline = lpni->lpni_last_alive +
776                    lpni->lpni_net->net_tunables.lct_peer_timeout;
777
778         /*
779          * assume peer_ni is alive as long as we're within the configured
780          * peer timeout
781          */
782         if (deadline > now)
783                 return false;
784
785         return true;
786 }
787
788 /* NB: returns 1 when alive, 0 when dead, negative when error;
789  *     may drop the lnet_net_lock */
790 static int
791 lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
792                        struct lnet_msg *msg)
793 {
794         time64_t now = ktime_get_seconds();
795
796         if (!lnet_peer_aliveness_enabled(lpni))
797                 return -ENODEV;
798
799         /*
800          * If we're resending a message, let's attempt to send it even if
801          * the peer is down to fulfill our resend quota on the message
802          */
803         if (msg->msg_retry_count > 0)
804                 return 1;
805
806         /* try and send recovery messages irregardless */
807         if (msg->msg_recovery)
808                 return 1;
809
810         /* always send any responses */
811         if (lnet_msg_is_response(msg))
812                 return 1;
813
814         if (!lnet_is_peer_deadline_passed(lpni, now))
815                 return true;
816
817         return lnet_is_peer_ni_alive(lpni);
818 }
819
820 /**
821  * \param msg The message to be sent.
822  * \param do_send True if lnet_ni_send() should be called in this function.
823  *        lnet_send() is going to lnet_net_unlock immediately after this, so
824  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
825  *
826  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
827  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
828  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
829  * \retval -ECANCELED If the MD of the message has been unlinked.
830  */
831 static int
832 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
833 {
834         struct lnet_peer_ni     *lp = msg->msg_txpeer;
835         struct lnet_ni          *ni = msg->msg_txni;
836         int                     cpt = msg->msg_tx_cpt;
837         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
838
839         /* non-lnet_send() callers have checked before */
840         LASSERT(!do_send || msg->msg_tx_delayed);
841         LASSERT(!msg->msg_receiving);
842         LASSERT(msg->msg_tx_committed);
843         /* can't get here if we're sending to the loopback interface */
844         LASSERT(lp->lpni_nid != the_lnet.ln_loni->ni_nid);
845
846         /* NB 'lp' is always the next hop */
847         if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
848             lnet_peer_alive_locked(ni, lp, msg) == 0) {
849                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
850                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
851                         msg->msg_len;
852                 lnet_net_unlock(cpt);
853                 if (msg->msg_txpeer)
854                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
855                                         msg->msg_type,
856                                         LNET_STATS_TYPE_DROP);
857                 if (msg->msg_txni)
858                         lnet_incr_stats(&msg->msg_txni->ni_stats,
859                                         msg->msg_type,
860                                         LNET_STATS_TYPE_DROP);
861
862                 CNETERR("Dropping message for %s: peer not alive\n",
863                         libcfs_id2str(msg->msg_target));
864                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
865                 if (do_send)
866                         lnet_finalize(msg, -EHOSTUNREACH);
867
868                 lnet_net_lock(cpt);
869                 return -EHOSTUNREACH;
870         }
871
872         if (msg->msg_md != NULL &&
873             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
874                 lnet_net_unlock(cpt);
875
876                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
877                         "called on the MD/ME.\n",
878                         libcfs_id2str(msg->msg_target));
879                 if (do_send) {
880                         msg->msg_no_resend = true;
881                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
882                                msg, libcfs_id2str(msg->msg_target));
883                         lnet_finalize(msg, -ECANCELED);
884                 }
885
886                 lnet_net_lock(cpt);
887                 return -ECANCELED;
888         }
889
890         if (!msg->msg_peertxcredit) {
891                 spin_lock(&lp->lpni_lock);
892                 LASSERT((lp->lpni_txcredits < 0) ==
893                         !list_empty(&lp->lpni_txq));
894
895                 msg->msg_peertxcredit = 1;
896                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr);
897                 lp->lpni_txcredits--;
898
899                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
900                         lp->lpni_mintxcredits = lp->lpni_txcredits;
901
902                 if (lp->lpni_txcredits < 0) {
903                         msg->msg_tx_delayed = 1;
904                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
905                         spin_unlock(&lp->lpni_lock);
906                         return LNET_CREDIT_WAIT;
907                 }
908                 spin_unlock(&lp->lpni_lock);
909         }
910
911         if (!msg->msg_txcredit) {
912                 LASSERT((tq->tq_credits < 0) ==
913                         !list_empty(&tq->tq_delayed));
914
915                 msg->msg_txcredit = 1;
916                 tq->tq_credits--;
917                 atomic_dec(&ni->ni_tx_credits);
918
919                 if (tq->tq_credits < tq->tq_credits_min)
920                         tq->tq_credits_min = tq->tq_credits;
921
922                 if (tq->tq_credits < 0) {
923                         msg->msg_tx_delayed = 1;
924                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
925                         return LNET_CREDIT_WAIT;
926                 }
927         }
928
929         /* unset the tx_delay flag as we're going to send it now */
930         msg->msg_tx_delayed = 0;
931
932         if (do_send) {
933                 lnet_net_unlock(cpt);
934                 lnet_ni_send(ni, msg);
935                 lnet_net_lock(cpt);
936         }
937         return LNET_CREDIT_OK;
938 }
939
940
941 static struct lnet_rtrbufpool *
942 lnet_msg2bufpool(struct lnet_msg *msg)
943 {
944         struct lnet_rtrbufpool  *rbp;
945         int                     cpt;
946
947         LASSERT(msg->msg_rx_committed);
948
949         cpt = msg->msg_rx_cpt;
950         rbp = &the_lnet.ln_rtrpools[cpt][0];
951
952         LASSERT(msg->msg_len <= LNET_MTU);
953         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
954                 rbp++;
955                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
956         }
957
958         return rbp;
959 }
960
961 static int
962 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
963 {
964         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
965          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
966          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
967          * received or OK to receive */
968         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
969         struct lnet_peer *lp;
970         struct lnet_rtrbufpool *rbp;
971         struct lnet_rtrbuf *rb;
972
973         LASSERT(msg->msg_kiov == NULL);
974         LASSERT(msg->msg_niov == 0);
975         LASSERT(msg->msg_routing);
976         LASSERT(msg->msg_receiving);
977         LASSERT(!msg->msg_sending);
978         LASSERT(lpni->lpni_peer_net);
979         LASSERT(lpni->lpni_peer_net->lpn_peer);
980
981         lp = lpni->lpni_peer_net->lpn_peer;
982
983         /* non-lnet_parse callers only receive delayed messages */
984         LASSERT(!do_recv || msg->msg_rx_delayed);
985
986         if (!msg->msg_peerrtrcredit) {
987                 /* lpni_lock protects the credit manipulation */
988                 spin_lock(&lpni->lpni_lock);
989
990                 msg->msg_peerrtrcredit = 1;
991                 lpni->lpni_rtrcredits--;
992                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
993                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
994
995                 if (lpni->lpni_rtrcredits < 0) {
996                         spin_unlock(&lpni->lpni_lock);
997                         /* must have checked eager_recv before here */
998                         LASSERT(msg->msg_rx_ready_delay);
999                         msg->msg_rx_delayed = 1;
1000                         /* lp_lock protects the lp_rtrq */
1001                         spin_lock(&lp->lp_lock);
1002                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1003                         spin_unlock(&lp->lp_lock);
1004                         return LNET_CREDIT_WAIT;
1005                 }
1006                 spin_unlock(&lpni->lpni_lock);
1007         }
1008
1009         rbp = lnet_msg2bufpool(msg);
1010
1011         if (!msg->msg_rtrcredit) {
1012                 msg->msg_rtrcredit = 1;
1013                 rbp->rbp_credits--;
1014                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1015                         rbp->rbp_mincredits = rbp->rbp_credits;
1016
1017                 if (rbp->rbp_credits < 0) {
1018                         /* must have checked eager_recv before here */
1019                         LASSERT(msg->msg_rx_ready_delay);
1020                         msg->msg_rx_delayed = 1;
1021                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1022                         return LNET_CREDIT_WAIT;
1023                 }
1024         }
1025
1026         LASSERT(!list_empty(&rbp->rbp_bufs));
1027         rb = list_entry(rbp->rbp_bufs.next, struct lnet_rtrbuf, rb_list);
1028         list_del(&rb->rb_list);
1029
1030         msg->msg_niov = rbp->rbp_npages;
1031         msg->msg_kiov = &rb->rb_kiov[0];
1032
1033         /* unset the msg-rx_delayed flag since we're receiving the message */
1034         msg->msg_rx_delayed = 0;
1035
1036         if (do_recv) {
1037                 int cpt = msg->msg_rx_cpt;
1038
1039                 lnet_net_unlock(cpt);
1040                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1041                              0, msg->msg_len, msg->msg_len);
1042                 lnet_net_lock(cpt);
1043         }
1044         return LNET_CREDIT_OK;
1045 }
1046
1047 void
1048 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1049 {
1050         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1051         struct lnet_ni          *txni = msg->msg_txni;
1052         struct lnet_msg         *msg2;
1053
1054         if (msg->msg_txcredit) {
1055                 struct lnet_ni       *ni = msg->msg_txni;
1056                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1057
1058                 /* give back NI txcredits */
1059                 msg->msg_txcredit = 0;
1060
1061                 LASSERT((tq->tq_credits < 0) ==
1062                         !list_empty(&tq->tq_delayed));
1063
1064                 tq->tq_credits++;
1065                 atomic_inc(&ni->ni_tx_credits);
1066                 if (tq->tq_credits <= 0) {
1067                         msg2 = list_entry(tq->tq_delayed.next,
1068                                           struct lnet_msg, msg_list);
1069                         list_del(&msg2->msg_list);
1070
1071                         LASSERT(msg2->msg_txni == ni);
1072                         LASSERT(msg2->msg_tx_delayed);
1073                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1074
1075                         (void) lnet_post_send_locked(msg2, 1);
1076                 }
1077         }
1078
1079         if (msg->msg_peertxcredit) {
1080                 /* give back peer txcredits */
1081                 msg->msg_peertxcredit = 0;
1082
1083                 spin_lock(&txpeer->lpni_lock);
1084                 LASSERT((txpeer->lpni_txcredits < 0) ==
1085                         !list_empty(&txpeer->lpni_txq));
1086
1087                 txpeer->lpni_txqnob -= msg->msg_len + sizeof(struct lnet_hdr);
1088                 LASSERT(txpeer->lpni_txqnob >= 0);
1089
1090                 txpeer->lpni_txcredits++;
1091                 if (txpeer->lpni_txcredits <= 0) {
1092                         int msg2_cpt;
1093
1094                         msg2 = list_entry(txpeer->lpni_txq.next,
1095                                               struct lnet_msg, msg_list);
1096                         list_del(&msg2->msg_list);
1097                         spin_unlock(&txpeer->lpni_lock);
1098
1099                         LASSERT(msg2->msg_txpeer == txpeer);
1100                         LASSERT(msg2->msg_tx_delayed);
1101
1102                         msg2_cpt = msg2->msg_tx_cpt;
1103
1104                         /*
1105                          * The msg_cpt can be different from the msg2_cpt
1106                          * so we need to make sure we lock the correct cpt
1107                          * for msg2.
1108                          * Once we call lnet_post_send_locked() it is no
1109                          * longer safe to access msg2, since it could've
1110                          * been freed by lnet_finalize(), but we still
1111                          * need to relock the correct cpt, so we cache the
1112                          * msg2_cpt for the purpose of the check that
1113                          * follows the call to lnet_pose_send_locked().
1114                          */
1115                         if (msg2_cpt != msg->msg_tx_cpt) {
1116                                 lnet_net_unlock(msg->msg_tx_cpt);
1117                                 lnet_net_lock(msg2_cpt);
1118                         }
1119                         (void) lnet_post_send_locked(msg2, 1);
1120                         if (msg2_cpt != msg->msg_tx_cpt) {
1121                                 lnet_net_unlock(msg2_cpt);
1122                                 lnet_net_lock(msg->msg_tx_cpt);
1123                         }
1124                 } else {
1125                         spin_unlock(&txpeer->lpni_lock);
1126                 }
1127         }
1128
1129         if (txni != NULL) {
1130                 msg->msg_txni = NULL;
1131                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1132         }
1133
1134         if (txpeer != NULL) {
1135                 msg->msg_txpeer = NULL;
1136                 lnet_peer_ni_decref_locked(txpeer);
1137         }
1138 }
1139
1140 void
1141 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1142 {
1143         struct lnet_msg *msg;
1144
1145         if (list_empty(&rbp->rbp_msgs))
1146                 return;
1147         msg = list_entry(rbp->rbp_msgs.next,
1148                          struct lnet_msg, msg_list);
1149         list_del(&msg->msg_list);
1150
1151         (void)lnet_post_routed_recv_locked(msg, 1);
1152 }
1153
1154 void
1155 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1156 {
1157         struct lnet_msg *msg;
1158         struct lnet_msg *tmp;
1159
1160         lnet_net_unlock(cpt);
1161
1162         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1163                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1164                              0, 0, 0, msg->msg_hdr.payload_length);
1165                 list_del_init(&msg->msg_list);
1166                 msg->msg_no_resend = true;
1167                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1168                 lnet_finalize(msg, -ECANCELED);
1169         }
1170
1171         lnet_net_lock(cpt);
1172 }
1173
1174 void
1175 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1176 {
1177         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1178         struct lnet_peer *lp;
1179         struct lnet_ni *rxni = msg->msg_rxni;
1180         struct lnet_msg *msg2;
1181
1182         if (msg->msg_rtrcredit) {
1183                 /* give back global router credits */
1184                 struct lnet_rtrbuf *rb;
1185                 struct lnet_rtrbufpool *rbp;
1186
1187                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1188                  * there until it gets one allocated, or aborts the wait
1189                  * itself */
1190                 LASSERT(msg->msg_kiov != NULL);
1191
1192                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1193                 rbp = rb->rb_pool;
1194
1195                 msg->msg_kiov = NULL;
1196                 msg->msg_rtrcredit = 0;
1197
1198                 LASSERT(rbp == lnet_msg2bufpool(msg));
1199
1200                 LASSERT((rbp->rbp_credits > 0) ==
1201                         !list_empty(&rbp->rbp_bufs));
1202
1203                 /* If routing is now turned off, we just drop this buffer and
1204                  * don't bother trying to return credits.  */
1205                 if (!the_lnet.ln_routing) {
1206                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1207                         goto routing_off;
1208                 }
1209
1210                 /* It is possible that a user has lowered the desired number of
1211                  * buffers in this pool.  Make sure we never put back
1212                  * more buffers than the stated number. */
1213                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1214                         /* Discard this buffer so we don't have too
1215                          * many. */
1216                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1217                         rbp->rbp_nbuffers--;
1218                 } else {
1219                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1220                         rbp->rbp_credits++;
1221                         if (rbp->rbp_credits <= 0)
1222                                 lnet_schedule_blocked_locked(rbp);
1223                 }
1224         }
1225
1226 routing_off:
1227         if (msg->msg_peerrtrcredit) {
1228                 LASSERT(rxpeerni);
1229                 LASSERT(rxpeerni->lpni_peer_net);
1230                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1231
1232                 /* give back peer router credits */
1233                 msg->msg_peerrtrcredit = 0;
1234
1235                 spin_lock(&rxpeerni->lpni_lock);
1236                 rxpeerni->lpni_rtrcredits++;
1237                 spin_unlock(&rxpeerni->lpni_lock);
1238
1239                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1240                 spin_lock(&lp->lp_lock);
1241
1242                 /* drop all messages which are queued to be routed on that
1243                  * peer. */
1244                 if (!the_lnet.ln_routing) {
1245                         LIST_HEAD(drop);
1246                         list_splice_init(&lp->lp_rtrq, &drop);
1247                         spin_unlock(&lp->lp_lock);
1248                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1249                 } else if (!list_empty(&lp->lp_rtrq)) {
1250                         int msg2_cpt;
1251
1252                         msg2 = list_entry(lp->lp_rtrq.next,
1253                                           struct lnet_msg, msg_list);
1254                         list_del(&msg2->msg_list);
1255                         msg2_cpt = msg2->msg_rx_cpt;
1256                         spin_unlock(&lp->lp_lock);
1257                         /*
1258                          * messages on the lp_rtrq can be from any NID in
1259                          * the peer, which means they might have different
1260                          * cpts. We need to make sure we lock the right
1261                          * one.
1262                          */
1263                         if (msg2_cpt != msg->msg_rx_cpt) {
1264                                 lnet_net_unlock(msg->msg_rx_cpt);
1265                                 lnet_net_lock(msg2_cpt);
1266                         }
1267                         (void) lnet_post_routed_recv_locked(msg2, 1);
1268                         if (msg2_cpt != msg->msg_rx_cpt) {
1269                                 lnet_net_unlock(msg2_cpt);
1270                                 lnet_net_lock(msg->msg_rx_cpt);
1271                         }
1272                 } else {
1273                         spin_unlock(&lp->lp_lock);
1274                 }
1275         }
1276         if (rxni != NULL) {
1277                 msg->msg_rxni = NULL;
1278                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1279         }
1280         if (rxpeerni != NULL) {
1281                 msg->msg_rxpeer = NULL;
1282                 lnet_peer_ni_decref_locked(rxpeerni);
1283         }
1284 }
1285
1286 static struct lnet_peer_ni *
1287 lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid,
1288                     struct lnet_peer *peer,
1289                     struct lnet_peer_ni *best_lpni,
1290                     struct lnet_peer_net *peer_net)
1291 {
1292         /*
1293          * Look at the peer NIs for the destination peer that connect
1294          * to the chosen net. If a peer_ni is preferred when using the
1295          * best_ni to communicate, we use that one. If there is no
1296          * preferred peer_ni, or there are multiple preferred peer_ni,
1297          * the available transmit credits are used. If the transmit
1298          * credits are equal, we round-robin over the peer_ni.
1299          */
1300         struct lnet_peer_ni *lpni = NULL;
1301         int best_lpni_credits = (best_lpni) ? best_lpni->lpni_txcredits :
1302                 INT_MIN;
1303         int best_lpni_healthv = (best_lpni) ?
1304                 atomic_read(&best_lpni->lpni_healthv) : 0;
1305         bool best_lpni_is_preferred = false;
1306         bool lpni_is_preferred;
1307         int lpni_healthv;
1308         __u32 lpni_sel_prio;
1309         __u32 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1310
1311         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1312                 /*
1313                  * if the best_ni we've chosen aleady has this lpni
1314                  * preferred, then let's use it
1315                  */
1316                 if (best_ni) {
1317                         lpni_is_preferred = lnet_peer_is_pref_nid_locked(lpni,
1318                                                                 best_ni->ni_nid);
1319                         CDEBUG(D_NET, "%s lpni_is_preferred = %d\n",
1320                                libcfs_nid2str(best_ni->ni_nid),
1321                                lpni_is_preferred);
1322                 } else {
1323                         lpni_is_preferred = false;
1324                 }
1325
1326                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1327                 lpni_sel_prio = lpni->lpni_sel_priority;
1328
1329                 if (best_lpni)
1330                         CDEBUG(D_NET, "n:[%s, %s] h:[%d, %d] p:[%d, %d] c:[%d, %d] s:[%d, %d]\n",
1331                                 libcfs_nid2str(lpni->lpni_nid),
1332                                 libcfs_nid2str(best_lpni->lpni_nid),
1333                                 lpni_healthv, best_lpni_healthv,
1334                                 lpni_sel_prio, best_sel_prio,
1335                                 lpni->lpni_txcredits, best_lpni_credits,
1336                                 lpni->lpni_seq, best_lpni->lpni_seq);
1337                 else
1338                         goto select_lpni;
1339
1340                 /* pick the healthiest peer ni */
1341                 if (lpni_healthv < best_lpni_healthv)
1342                         continue;
1343                 else if (lpni_healthv > best_lpni_healthv) {
1344                         if (best_lpni_is_preferred)
1345                                 best_lpni_is_preferred = false;
1346                         goto select_lpni;
1347                 }
1348
1349                 if (lpni_sel_prio > best_sel_prio)
1350                         continue;
1351                 else if (lpni_sel_prio < best_sel_prio) {
1352                         if (best_lpni_is_preferred)
1353                                 best_lpni_is_preferred = false;
1354                         goto select_lpni;
1355                 }
1356
1357                 /* if this is a preferred peer use it */
1358                 if (!best_lpni_is_preferred && lpni_is_preferred) {
1359                         best_lpni_is_preferred = true;
1360                         goto select_lpni;
1361                 } else if (best_lpni_is_preferred && !lpni_is_preferred) {
1362                         /* this is not the preferred peer so let's ignore
1363                          * it.
1364                          */
1365                         continue;
1366                 }
1367
1368                 if (lpni->lpni_txcredits < best_lpni_credits)
1369                         /* We already have a peer that has more credits
1370                          * available than this one. No need to consider
1371                          * this peer further.
1372                          */
1373                         continue;
1374                 else if (lpni->lpni_txcredits > best_lpni_credits)
1375                         goto select_lpni;
1376
1377                 /* The best peer found so far and the current peer
1378                  * have the same number of available credits let's
1379                  * make sure to select between them using Round Robin
1380                  */
1381                 if (best_lpni && (best_lpni->lpni_seq <= lpni->lpni_seq))
1382                         continue;
1383 select_lpni:
1384                 best_lpni_is_preferred = lpni_is_preferred;
1385                 best_lpni_healthv = lpni_healthv;
1386                 best_sel_prio = lpni_sel_prio;
1387                 best_lpni = lpni;
1388                 best_lpni_credits = lpni->lpni_txcredits;
1389         }
1390
1391         /* if we still can't find a peer ni then we can't reach it */
1392         if (!best_lpni) {
1393                 __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1394                         LNET_NIDNET(dst_nid);
1395                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1396                                 libcfs_net2str(net_id));
1397                 return NULL;
1398         }
1399
1400         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1401                libcfs_nid2str(best_lpni->lpni_nid));
1402
1403         return best_lpni;
1404 }
1405
1406 /*
1407  * Prerequisite: the best_ni should already be set in the sd
1408  * Find the best lpni.
1409  * If the net id is provided then restrict lpni selection on
1410  * that particular net.
1411  * Otherwise find any reachable lpni. When dealing with an MR
1412  * gateway and it has multiple lpnis which we can use
1413  * we want to select the best one from the list of reachable
1414  * ones.
1415  */
1416 static inline struct lnet_peer_ni *
1417 lnet_find_best_lpni(struct lnet_ni *lni, lnet_nid_t dst_nid,
1418                     struct lnet_peer *peer, __u32 net_id)
1419 {
1420         struct lnet_peer_net *peer_net;
1421
1422         /* find the best_lpni on any local network */
1423         if (net_id == LNET_NET_ANY) {
1424                 struct lnet_peer_ni *best_lpni = NULL;
1425                 struct lnet_peer_net *lpn;
1426                 list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
1427                         /* no net specified find any reachable peer ni */
1428                         if (!lnet_islocalnet_locked(lpn->lpn_net_id))
1429                                 continue;
1430                         best_lpni = lnet_select_peer_ni(lni, dst_nid, peer,
1431                                                         best_lpni, lpn);
1432                 }
1433
1434                 return best_lpni;
1435         }
1436         /* restrict on the specified net */
1437         peer_net = lnet_peer_get_net_locked(peer, net_id);
1438         if (peer_net)
1439                 return lnet_select_peer_ni(lni, dst_nid, peer, NULL, peer_net);
1440
1441         return NULL;
1442 }
1443
1444 static int
1445 lnet_compare_gw_lpnis(struct lnet_peer_ni *lpni1, struct lnet_peer_ni *lpni2)
1446 {
1447         if (lpni1->lpni_txqnob < lpni2->lpni_txqnob)
1448                 return 1;
1449
1450         if (lpni1->lpni_txqnob > lpni2->lpni_txqnob)
1451                 return -1;
1452
1453         if (lpni1->lpni_txcredits > lpni2->lpni_txcredits)
1454                 return 1;
1455
1456         if (lpni1->lpni_txcredits < lpni2->lpni_txcredits)
1457                 return -1;
1458
1459         return 0;
1460 }
1461
1462 /* Compare route priorities and hop counts */
1463 static int
1464 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
1465 {
1466         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1467         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1468
1469         if (r1->lr_priority < r2->lr_priority)
1470                 return 1;
1471
1472         if (r1->lr_priority > r2->lr_priority)
1473                 return -1;
1474
1475         if (r1_hops < r2_hops)
1476                 return 1;
1477
1478         if (r1_hops > r2_hops)
1479                 return -1;
1480
1481         return 0;
1482 }
1483
1484 static struct lnet_route *
1485 lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net,
1486                        struct lnet_peer_ni *remote_lpni,
1487                        struct lnet_route **prev_route,
1488                        struct lnet_peer_ni **gwni)
1489 {
1490         struct lnet_peer_ni *lpni, *best_gw_ni = NULL;
1491         struct lnet_route *best_route;
1492         struct lnet_route *last_route;
1493         struct lnet_route *route;
1494         int rc;
1495         bool best_rte_is_preferred = false;
1496         lnet_nid_t gw_pnid;
1497
1498         CDEBUG(D_NET, "Looking up a route to %s, from %s\n",
1499                libcfs_net2str(rnet->lrn_net), libcfs_net2str(src_net));
1500
1501         best_route = last_route = NULL;
1502         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1503                 if (!lnet_is_route_alive(route))
1504                         continue;
1505                 gw_pnid = route->lr_gateway->lp_primary_nid;
1506
1507                 /* no protection on below fields, but it's harmless */
1508                 if (last_route && (last_route->lr_seq - route->lr_seq < 0))
1509                         last_route = route;
1510
1511                 /* if the best route found is in the preferred list then
1512                  * tag it as preferred and use it later on. But if we
1513                  * didn't find any routes which are on the preferred list
1514                  * then just use the best route possible.
1515                  */
1516                 rc = lnet_peer_is_pref_rtr_locked(remote_lpni, gw_pnid);
1517
1518                 if (!best_route || (rc && !best_rte_is_preferred)) {
1519                         /* Restrict the selection of the router NI on the
1520                          * src_net provided. If the src_net is LNET_NID_ANY,
1521                          * then select the best interface available.
1522                          */
1523                         lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1524                                                    route->lr_gateway,
1525                                                    src_net);
1526                         if (!lpni) {
1527                                 CDEBUG(D_NET,
1528                                        "Gateway %s does not have a peer NI on net %s\n",
1529                                        libcfs_nid2str(gw_pnid),
1530                                        libcfs_net2str(src_net));
1531                                 continue;
1532                         }
1533                 }
1534
1535                 if (rc && !best_rte_is_preferred) {
1536                         /* This is the first preferred route we found,
1537                          * so it beats any route found previously
1538                          */
1539                         best_route = route;
1540                         if (!last_route)
1541                                 last_route = route;
1542                         best_gw_ni = lpni;
1543                         best_rte_is_preferred = true;
1544                         CDEBUG(D_NET, "preferred gw = %s\n",
1545                                libcfs_nid2str(gw_pnid));
1546                         continue;
1547                 } else if ((!rc) && best_rte_is_preferred)
1548                         /* The best route we found so far is in the preferred
1549                          * list, so it beats any non-preferred route
1550                          */
1551                         continue;
1552
1553                 if (!best_route) {
1554                         best_route = last_route = route;
1555                         best_gw_ni = lpni;
1556                         continue;
1557                 }
1558
1559                 rc = lnet_compare_routes(route, best_route);
1560                 if (rc == -1)
1561                         continue;
1562
1563                 /* Restrict the selection of the router NI on the
1564                  * src_net provided. If the src_net is LNET_NID_ANY,
1565                  * then select the best interface available.
1566                  */
1567                 lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1568                                            route->lr_gateway,
1569                                            src_net);
1570                 if (!lpni) {
1571                         CDEBUG(D_NET,
1572                                "Gateway %s does not have a peer NI on net %s\n",
1573                                libcfs_nid2str(gw_pnid),
1574                                libcfs_net2str(src_net));
1575                         continue;
1576                 }
1577
1578                 if (rc == 1) {
1579                         best_route = route;
1580                         best_gw_ni = lpni;
1581                         continue;
1582                 }
1583
1584                 rc = lnet_compare_gw_lpnis(lpni, best_gw_ni);
1585                 if (rc == -1)
1586                         continue;
1587
1588                 if (rc == 1 || route->lr_seq <= best_route->lr_seq) {
1589                         best_route = route;
1590                         best_gw_ni = lpni;
1591                         continue;
1592                 }
1593         }
1594
1595         *prev_route = last_route;
1596         *gwni = best_gw_ni;
1597
1598         return best_route;
1599 }
1600
1601 static struct lnet_ni *
1602 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1603                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1604                  int md_cpt)
1605 {
1606         struct lnet_ni *ni = NULL;
1607         unsigned int shortest_distance;
1608         int best_credits;
1609         int best_healthv;
1610         __u32 best_sel_prio;
1611
1612         /*
1613          * If there is no peer_ni that we can send to on this network,
1614          * then there is no point in looking for a new best_ni here.
1615         */
1616         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1617                 return best_ni;
1618
1619         if (best_ni == NULL) {
1620                 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1621                 shortest_distance = UINT_MAX;
1622                 best_credits = INT_MIN;
1623                 best_healthv = 0;
1624         } else {
1625                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1626                                                      best_ni->ni_dev_cpt);
1627                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1628                 best_healthv = atomic_read(&best_ni->ni_healthv);
1629                 best_sel_prio = best_ni->ni_sel_priority;
1630         }
1631
1632         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1633                 unsigned int distance;
1634                 int ni_credits;
1635                 int ni_healthv;
1636                 int ni_fatal;
1637                 __u32 ni_sel_prio;
1638
1639                 ni_credits = atomic_read(&ni->ni_tx_credits);
1640                 ni_healthv = atomic_read(&ni->ni_healthv);
1641                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1642                 ni_sel_prio = ni->ni_sel_priority;
1643
1644                 /*
1645                  * calculate the distance from the CPT on which
1646                  * the message memory is allocated to the CPT of
1647                  * the NI's physical device
1648                  */
1649                 distance = cfs_cpt_distance(lnet_cpt_table(),
1650                                             md_cpt,
1651                                             ni->ni_dev_cpt);
1652
1653                 /*
1654                  * All distances smaller than the NUMA range
1655                  * are treated equally.
1656                  */
1657                 if (distance < lnet_numa_range)
1658                         distance = lnet_numa_range;
1659
1660                 /*
1661                  * Select on health, shorter distance, available
1662                  * credits, then round-robin.
1663                  */
1664                 if (ni_fatal)
1665                         continue;
1666
1667                 if (best_ni)
1668                         CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d, p:%u] with best_ni %s [c:%d, d:%d, s:%d, p:%u]\n",
1669                                libcfs_nid2str(ni->ni_nid), ni_credits, distance,
1670                                ni->ni_seq, ni_sel_prio,
1671                                (best_ni) ? libcfs_nid2str(best_ni->ni_nid)
1672                                : "not selected", best_credits, shortest_distance,
1673                                (best_ni) ? best_ni->ni_seq : 0,
1674                                best_sel_prio);
1675                 else
1676                         goto select_ni;
1677
1678                 if (ni_healthv < best_healthv)
1679                         continue;
1680                 else if (ni_healthv > best_healthv)
1681                         goto select_ni;
1682
1683                 if (ni_sel_prio > best_sel_prio)
1684                         continue;
1685                 else if (ni_sel_prio < best_sel_prio)
1686                         goto select_ni;
1687
1688                 if (distance > shortest_distance)
1689                         continue;
1690                 else if (distance < shortest_distance)
1691                         goto select_ni;
1692
1693                 if (ni_credits < best_credits)
1694                         continue;
1695                 else if (ni_credits > best_credits)
1696                         goto select_ni;
1697
1698                 if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1699                         continue;
1700
1701 select_ni:
1702                 best_sel_prio = ni_sel_prio;
1703                 shortest_distance = distance;
1704                 best_healthv = ni_healthv;
1705                 best_ni = ni;
1706                 best_credits = ni_credits;
1707         }
1708
1709         CDEBUG(D_NET, "selected best_ni %s\n",
1710                (best_ni) ? libcfs_nid2str(best_ni->ni_nid) : "no selection");
1711
1712         return best_ni;
1713 }
1714
1715 /*
1716  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1717  * because such traffic is required to perform discovery. We therefore
1718  * exclude all GET and PUT on that portal. We also exclude all ACK and
1719  * REPLY traffic, but that is because the portal is not tracked in the
1720  * message structure for these message types. We could restrict this
1721  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1722  */
1723 static bool
1724 lnet_msg_discovery(struct lnet_msg *msg)
1725 {
1726         if (msg->msg_type == LNET_MSG_PUT) {
1727                 if (msg->msg_hdr.msg.put.ptl_index != LNET_RESERVED_PORTAL)
1728                         return true;
1729         } else if (msg->msg_type == LNET_MSG_GET) {
1730                 if (msg->msg_hdr.msg.get.ptl_index != LNET_RESERVED_PORTAL)
1731                         return true;
1732         }
1733         return false;
1734 }
1735
1736 #define SRC_SPEC        0x0001
1737 #define SRC_ANY         0x0002
1738 #define LOCAL_DST       0x0004
1739 #define REMOTE_DST      0x0008
1740 #define MR_DST          0x0010
1741 #define NMR_DST         0x0020
1742 #define SND_RESP        0x0040
1743
1744 /* The following to defines are used for return codes */
1745 #define REPEAT_SEND     0x1000
1746 #define PASS_THROUGH    0x2000
1747
1748 /* The different cases lnet_select pathway needs to handle */
1749 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1750 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1751 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1752 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1753 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1754 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1755 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1756 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1757
1758 static int
1759 lnet_handle_lo_send(struct lnet_send_data *sd)
1760 {
1761         struct lnet_msg *msg = sd->sd_msg;
1762         int cpt = sd->sd_cpt;
1763
1764         /* No send credit hassles with LOLND */
1765         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1766         msg->msg_hdr.dest_nid = cpu_to_le64(the_lnet.ln_loni->ni_nid);
1767         if (!msg->msg_routing)
1768                 msg->msg_hdr.src_nid =
1769                         cpu_to_le64(the_lnet.ln_loni->ni_nid);
1770         msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
1771         lnet_msg_commit(msg, cpt);
1772         msg->msg_txni = the_lnet.ln_loni;
1773
1774         return LNET_CREDIT_OK;
1775 }
1776
1777 static int
1778 lnet_handle_send(struct lnet_send_data *sd)
1779 {
1780         struct lnet_ni *best_ni = sd->sd_best_ni;
1781         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1782         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1783         struct lnet_msg *msg = sd->sd_msg;
1784         int cpt2;
1785         __u32 send_case = sd->sd_send_case;
1786         int rc;
1787         __u32 routing = send_case & REMOTE_DST;
1788          struct lnet_rsp_tracker *rspt;
1789
1790         /*
1791          * Increment sequence number of the selected peer so that we
1792          * pick the next one in Round Robin.
1793          */
1794         best_lpni->lpni_seq++;
1795
1796         /*
1797          * grab a reference on the peer_ni so it sticks around even if
1798          * we need to drop and relock the lnet_net_lock below.
1799          */
1800         lnet_peer_ni_addref_locked(best_lpni);
1801
1802         /*
1803          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1804          * message. This ensures that we get a CPT that is correct for
1805          * the NI when the NI has been restricted to a subset of all CPTs.
1806          * If the selected CPT differs from the one currently locked, we
1807          * must unlock and relock the lnet_net_lock(), and then check whether
1808          * the configuration has changed. We don't have a hold on the best_ni
1809          * yet, and it may have vanished.
1810          */
1811         cpt2 = lnet_cpt_of_nid_locked(best_lpni->lpni_nid, best_ni);
1812         if (sd->sd_cpt != cpt2) {
1813                 __u32 seq = lnet_get_dlc_seq_locked();
1814                 lnet_net_unlock(sd->sd_cpt);
1815                 sd->sd_cpt = cpt2;
1816                 lnet_net_lock(sd->sd_cpt);
1817                 if (seq != lnet_get_dlc_seq_locked()) {
1818                         lnet_peer_ni_decref_locked(best_lpni);
1819                         return REPEAT_SEND;
1820                 }
1821         }
1822
1823         /*
1824          * store the best_lpni in the message right away to avoid having
1825          * to do the same operation under different conditions
1826          */
1827         msg->msg_txpeer = best_lpni;
1828         msg->msg_txni = best_ni;
1829
1830         /*
1831          * grab a reference for the best_ni since now it's in use in this
1832          * send. The reference will be dropped in lnet_finalize()
1833          */
1834         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1835
1836         /*
1837          * Always set the target.nid to the best peer picked. Either the
1838          * NID will be one of the peer NIDs selected, or the same NID as
1839          * what was originally set in the target or it will be the NID of
1840          * a router if this message should be routed
1841          */
1842         msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1843
1844         /*
1845          * lnet_msg_commit assigns the correct cpt to the message, which
1846          * is used to decrement the correct refcount on the ni when it's
1847          * time to return the credits
1848          */
1849         lnet_msg_commit(msg, sd->sd_cpt);
1850
1851         /*
1852          * If we are routing the message then we keep the src_nid that was
1853          * set by the originator. If we are not routing then we are the
1854          * originator and set it here.
1855          */
1856         if (!msg->msg_routing)
1857                 msg->msg_hdr.src_nid = cpu_to_le64(msg->msg_txni->ni_nid);
1858
1859         if (routing) {
1860                 msg->msg_target_is_router = 1;
1861                 msg->msg_target.pid = LNET_PID_LUSTRE;
1862                 /*
1863                  * since we're routing we want to ensure that the
1864                  * msg_hdr.dest_nid is set to the final destination. When
1865                  * the router receives this message it knows how to route
1866                  * it.
1867                  *
1868                  * final_dst_lpni is set at the beginning of the
1869                  * lnet_select_pathway() function and is never changed.
1870                  * It's safe to use it here.
1871                  */
1872                 msg->msg_hdr.dest_nid = cpu_to_le64(final_dst_lpni->lpni_nid);
1873         } else {
1874                 /*
1875                  * if we're not routing set the dest_nid to the best peer
1876                  * ni NID that we picked earlier in the algorithm.
1877                  */
1878                 msg->msg_hdr.dest_nid = cpu_to_le64(msg->msg_txpeer->lpni_nid);
1879         }
1880
1881         /*
1882          * if we have response tracker block update it with the next hop
1883          * nid
1884          */
1885         if (msg->msg_md) {
1886                 rspt = msg->msg_md->md_rspt_ptr;
1887                 if (rspt) {
1888                         rspt->rspt_next_hop_nid = msg->msg_txpeer->lpni_nid;
1889                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1890                                libcfs_nid2str(rspt->rspt_next_hop_nid));
1891                 }
1892         }
1893
1894         rc = lnet_post_send_locked(msg, 0);
1895
1896         if (!rc)
1897                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
1898                        libcfs_nid2str(msg->msg_hdr.src_nid),
1899                        libcfs_nid2str(msg->msg_txni->ni_nid),
1900                        libcfs_nid2str(sd->sd_src_nid),
1901                        libcfs_nid2str(msg->msg_hdr.dest_nid),
1902                        libcfs_nid2str(sd->sd_dst_nid),
1903                        libcfs_nid2str(msg->msg_txpeer->lpni_nid),
1904                        libcfs_nid2str(sd->sd_rtr_nid),
1905                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
1906
1907         return rc;
1908 }
1909
1910 static inline void
1911 lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni,
1912                          struct lnet_msg *msg)
1913 {
1914         if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) &&
1915             !lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) {
1916                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
1917                        libcfs_nid2str(lni->ni_nid),
1918                        libcfs_nid2str(lpni->lpni_nid));
1919                 lnet_peer_ni_set_non_mr_pref_nid(lpni, lni->ni_nid);
1920         }
1921 }
1922
1923 /*
1924  * Source Specified
1925  * Local Destination
1926  * non-mr peer
1927  *
1928  * use the source and destination NIDs as the pathway
1929  */
1930 static int
1931 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
1932 {
1933         /* the destination lpni is set before we get here. */
1934
1935         /* find local NI */
1936         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
1937         if (!sd->sd_best_ni) {
1938                 CERROR("Can't send to %s: src %s is not a "
1939                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
1940                                 libcfs_nid2str(sd->sd_src_nid));
1941                 return -EINVAL;
1942         }
1943
1944         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
1945
1946         return lnet_handle_send(sd);
1947 }
1948
1949 /*
1950  * Source Specified
1951  * Local Destination
1952  * MR Peer
1953  *
1954  * Don't run the selection algorithm on the peer NIs. By specifying the
1955  * local NID, we're also saying that we should always use the destination NID
1956  * provided. This handles the case where we should be using the same
1957  * destination NID for the all the messages which belong to the same RPC
1958  * request.
1959  */
1960 static int
1961 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
1962 {
1963         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
1964         if (!sd->sd_best_ni) {
1965                 CERROR("Can't send to %s: src %s is not a "
1966                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
1967                                 libcfs_nid2str(sd->sd_src_nid));
1968                 return -EINVAL;
1969         }
1970
1971         if (sd->sd_best_lpni &&
1972             sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid)
1973                 return lnet_handle_lo_send(sd);
1974         else if (sd->sd_best_lpni)
1975                 return lnet_handle_send(sd);
1976
1977         CERROR("can't send to %s. no NI on %s\n",
1978                libcfs_nid2str(sd->sd_dst_nid),
1979                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
1980
1981         return -EHOSTUNREACH;
1982 }
1983
1984 struct lnet_ni *
1985 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
1986                               struct lnet_peer *peer,
1987                               struct lnet_peer_net *peer_net,
1988                               int cpt,
1989                               bool incr_seq)
1990 {
1991         struct lnet_net *local_net;
1992         struct lnet_ni *best_ni;
1993
1994         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
1995         if (!local_net)
1996                 return NULL;
1997
1998         /*
1999          * Iterate through the NIs in this local Net and select
2000          * the NI to send from. The selection is determined by
2001          * these 3 criterion in the following priority:
2002          *      1. NUMA
2003          *      2. NI available credits
2004          *      3. Round Robin
2005          */
2006         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
2007                                    peer, peer_net, cpt);
2008
2009         if (incr_seq && best_ni)
2010                 best_ni->ni_seq++;
2011
2012         return best_ni;
2013 }
2014
2015 static int
2016 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg,
2017                              int cpt)
2018 {
2019         struct lnet_peer *peer;
2020         int rc;
2021
2022         lnet_peer_ni_addref_locked(lpni);
2023
2024         peer = lpni->lpni_peer_net->lpn_peer;
2025
2026         if (lnet_peer_gw_discovery(peer)) {
2027                 lnet_peer_ni_decref_locked(lpni);
2028                 return 0;
2029         }
2030
2031         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
2032                 lnet_peer_ni_decref_locked(lpni);
2033                 return 0;
2034         }
2035
2036         rc = lnet_discover_peer_locked(lpni, cpt, false);
2037         if (rc) {
2038                 lnet_peer_ni_decref_locked(lpni);
2039                 return rc;
2040         }
2041         /* The peer may have changed. */
2042         peer = lpni->lpni_peer_net->lpn_peer;
2043         spin_lock(&peer->lp_lock);
2044         if (lnet_peer_is_uptodate_locked(peer)) {
2045                 spin_unlock(&peer->lp_lock);
2046                 lnet_peer_ni_decref_locked(lpni);
2047                 return 0;
2048         }
2049         /* queue message and return */
2050         msg->msg_sending = 0;
2051         msg->msg_txpeer = NULL;
2052         list_add_tail(&msg->msg_list, &peer->lp_dc_pendq);
2053         spin_unlock(&peer->lp_lock);
2054
2055         lnet_peer_ni_decref_locked(lpni);
2056
2057         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2058                msg, libcfs_nid2str(peer->lp_primary_nid));
2059
2060         return LNET_DC_WAIT;
2061 }
2062
2063 static int
2064 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2065                              lnet_nid_t dst_nid,
2066                              struct lnet_peer_ni **gw_lpni,
2067                              struct lnet_peer **gw_peer)
2068 {
2069         int rc;
2070         __u32 local_lnet;
2071         struct lnet_peer *gw;
2072         struct lnet_peer *lp;
2073         struct lnet_peer_net *lpn;
2074         struct lnet_peer_net *best_lpn = NULL;
2075         struct lnet_remotenet *rnet, *best_rnet = NULL;
2076         struct lnet_route *best_route = NULL;
2077         struct lnet_route *last_route = NULL;
2078         struct lnet_peer_ni *lpni = NULL;
2079         struct lnet_peer_ni *gwni = NULL;
2080         bool route_found = false;
2081         lnet_nid_t src_nid = (sd->sd_src_nid != LNET_NID_ANY) ? sd->sd_src_nid :
2082                 (sd->sd_best_ni != NULL) ? sd->sd_best_ni->ni_nid :
2083                 LNET_NID_ANY;
2084         int best_lpn_healthv = 0;
2085         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2086
2087         CDEBUG(D_NET, "using src nid %s for route restriction\n",
2088                libcfs_nid2str(src_nid));
2089
2090         /* If a router nid was specified then we are replying to a GET or
2091          * sending an ACK. In this case we use the gateway associated with the
2092          * specified router nid.
2093          */
2094         if (sd->sd_rtr_nid != LNET_NID_ANY) {
2095                 gwni = lnet_find_peer_ni_locked(sd->sd_rtr_nid);
2096                 if (gwni) {
2097                         gw = gwni->lpni_peer_net->lpn_peer;
2098                         lnet_peer_ni_decref_locked(gwni);
2099                         if (gw->lp_rtr_refcount) {
2100                                 local_lnet = LNET_NIDNET(sd->sd_rtr_nid);
2101                                 route_found = true;
2102                         }
2103                 } else {
2104                         CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n",
2105                                libcfs_nid2str(sd->sd_rtr_nid));
2106                 }
2107         }
2108
2109         if (!route_found) {
2110                 if (sd->sd_msg->msg_routing) {
2111                         /* If I'm routing this message then I need to find the
2112                          * next hop based on the destination NID
2113                          */
2114                         best_rnet = lnet_find_rnet_locked(LNET_NIDNET(sd->sd_dst_nid));
2115                         if (!best_rnet) {
2116                                 CERROR("Unable to route message to %s - Route table may be misconfigured\n",
2117                                        libcfs_nid2str(sd->sd_dst_nid));
2118                                 return -EHOSTUNREACH;
2119                         }
2120                 } else {
2121                         /* we've already looked up the initial lpni using
2122                          * dst_nid
2123                          */
2124                         lpni = sd->sd_best_lpni;
2125                         /* the peer tree must be in existence */
2126                         LASSERT(lpni && lpni->lpni_peer_net &&
2127                                 lpni->lpni_peer_net->lpn_peer);
2128                         lp = lpni->lpni_peer_net->lpn_peer;
2129
2130                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2131                                 /* is this remote network reachable?  */
2132                                 rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2133                                 if (!rnet)
2134                                         continue;
2135
2136                                 if (!best_lpn) {
2137                                         best_lpn = lpn;
2138                                         best_rnet = rnet;
2139                                 }
2140
2141                                 /* select the preferred peer net */
2142                                 if (best_lpn_healthv > lpn->lpn_healthv)
2143                                         continue;
2144                                 else if (best_lpn_healthv < lpn->lpn_healthv)
2145                                         goto use_lpn;
2146
2147                                 if (best_lpn_sel_prio < lpn->lpn_sel_priority)
2148                                         continue;
2149                                 else if (best_lpn_sel_prio > lpn->lpn_sel_priority)
2150                                         goto use_lpn;
2151
2152                                 if (best_lpn->lpn_seq <= lpn->lpn_seq)
2153                                         continue;
2154 use_lpn:
2155                                 best_lpn_healthv = lpn->lpn_healthv;
2156                                 best_lpn_sel_prio = lpn->lpn_sel_priority;
2157                                 best_lpn = lpn;
2158                                 best_rnet = rnet;
2159                         }
2160
2161                         if (!best_lpn) {
2162                                 CERROR("peer %s has no available nets\n",
2163                                        libcfs_nid2str(sd->sd_dst_nid));
2164                                 return -EHOSTUNREACH;
2165                         }
2166
2167                         sd->sd_best_lpni = lnet_find_best_lpni(sd->sd_best_ni,
2168                                                                sd->sd_dst_nid,
2169                                                                lp,
2170                                                                best_lpn->lpn_net_id);
2171                         if (!sd->sd_best_lpni) {
2172                                 CERROR("peer %s is unreachable\n",
2173                                        libcfs_nid2str(sd->sd_dst_nid));
2174                                 return -EHOSTUNREACH;
2175                         }
2176
2177                         /* We're attempting to round robin over the remote peer
2178                          * NI's so update the final destination we selected
2179                          */
2180                         sd->sd_final_dst_lpni = sd->sd_best_lpni;
2181
2182                         /* Increment the sequence number of the remote lpni so
2183                          * we can round robin over the different interfaces of
2184                          * the remote lpni
2185                          */
2186                         sd->sd_best_lpni->lpni_seq++;
2187                 }
2188
2189                 /*
2190                  * find the best route. Restrict the selection on the net of the
2191                  * local NI if we've already picked the local NI to send from.
2192                  * Otherwise, let's pick any route we can find and then find
2193                  * a local NI we can reach the route's gateway on. Any route we
2194                  * select will be reachable by virtue of the restriction we have
2195                  * when adding a route.
2196                  */
2197                 best_route = lnet_find_route_locked(best_rnet,
2198                                                     LNET_NIDNET(src_nid),
2199                                                     sd->sd_best_lpni,
2200                                                     &last_route, &gwni);
2201
2202                 if (!best_route) {
2203                         CERROR("no route to %s from %s\n",
2204                                libcfs_nid2str(dst_nid),
2205                                libcfs_nid2str(src_nid));
2206                         return -EHOSTUNREACH;
2207                 }
2208
2209                 if (!gwni) {
2210                         CERROR("Internal Error. Route expected to %s from %s\n",
2211                                libcfs_nid2str(dst_nid),
2212                                libcfs_nid2str(src_nid));
2213                         return -EFAULT;
2214                 }
2215
2216                 gw = best_route->lr_gateway;
2217                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2218                 local_lnet = best_route->lr_lnet;
2219         }
2220
2221         /*
2222          * Discover this gateway if it hasn't already been discovered.
2223          * This means we might delay the message until discovery has
2224          * completed
2225          */
2226         sd->sd_msg->msg_src_nid_param = sd->sd_src_nid;
2227         rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt);
2228         if (rc)
2229                 return rc;
2230
2231         if (!sd->sd_best_ni)
2232                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw,
2233                                         lnet_peer_get_net_locked(gw,
2234                                                                  local_lnet),
2235                                         sd->sd_md_cpt,
2236                                         true);
2237
2238         if (!sd->sd_best_ni) {
2239                 CERROR("Internal Error. Expected local ni on %s but non found :%s\n",
2240                        libcfs_net2str(local_lnet),
2241                        libcfs_nid2str(sd->sd_src_nid));
2242                 return -EFAULT;
2243         }
2244
2245         *gw_lpni = gwni;
2246         *gw_peer = gw;
2247
2248         /*
2249          * increment the sequence numbers since now we're sure we're
2250          * going to use this path
2251          */
2252         if (sd->sd_rtr_nid == LNET_NID_ANY) {
2253                 LASSERT(best_route && last_route);
2254                 best_route->lr_seq = last_route->lr_seq + 1;
2255                 if (best_lpn)
2256                         best_lpn->lpn_seq++;
2257         }
2258
2259         return 0;
2260 }
2261
2262 /*
2263  * Handle two cases:
2264  *
2265  * Case 1:
2266  *  Source specified
2267  *  Remote destination
2268  *  Non-MR destination
2269  *
2270  * Case 2:
2271  *  Source specified
2272  *  Remote destination
2273  *  MR destination
2274  *
2275  * The handling of these two cases is similar. Even though the destination
2276  * can be MR or non-MR, we'll deal directly with the router.
2277  */
2278 static int
2279 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2280 {
2281         int rc;
2282         struct lnet_peer_ni *gw_lpni = NULL;
2283         struct lnet_peer *gw_peer = NULL;
2284
2285         /* find local NI */
2286         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
2287         if (!sd->sd_best_ni) {
2288                 CERROR("Can't send to %s: src %s is not a "
2289                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
2290                                 libcfs_nid2str(sd->sd_src_nid));
2291                 return -EINVAL;
2292         }
2293
2294         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2295                                      &gw_peer);
2296         if (rc)
2297                 return rc;
2298
2299         if (sd->sd_send_case & NMR_DST)
2300                 /*
2301                  * since the final destination is non-MR let's set its preferred
2302                  * NID before we send
2303                  */
2304                 lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni,
2305                                          sd->sd_msg);
2306
2307         /*
2308          * We're going to send to the gw found so let's set its
2309          * info
2310          */
2311         sd->sd_peer = gw_peer;
2312         sd->sd_best_lpni = gw_lpni;
2313
2314         return lnet_handle_send(sd);
2315 }
2316
2317 struct lnet_ni *
2318 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2319                                bool discovery)
2320 {
2321         struct lnet_peer_net *peer_net = NULL;
2322         struct lnet_ni *best_ni = NULL;
2323         int lpn_healthv = 0;
2324
2325         /*
2326          * The peer can have multiple interfaces, some of them can be on
2327          * the local network and others on a routed network. We should
2328          * prefer the local network. However if the local network is not
2329          * available then we need to try the routed network
2330          */
2331
2332         /* go through all the peer nets and find the best_ni */
2333         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
2334                 /*
2335                  * The peer's list of nets can contain non-local nets. We
2336                  * want to only examine the local ones.
2337                  */
2338                 if (!lnet_get_net_locked(peer_net->lpn_net_id))
2339                         continue;
2340
2341                 /* always select the lpn with the best health */
2342                 if (lpn_healthv <= peer_net->lpn_healthv)
2343                         lpn_healthv = peer_net->lpn_healthv;
2344                 else
2345                         continue;
2346
2347                 best_ni = lnet_find_best_ni_on_spec_net(best_ni, peer, peer_net,
2348                                                         md_cpt, false);
2349
2350                 /*
2351                  * if this is a discovery message and lp_disc_net_id is
2352                  * specified then use that net to send the discovery on.
2353                  */
2354                 if (peer->lp_disc_net_id == peer_net->lpn_net_id &&
2355                     discovery)
2356                         break;
2357         }
2358
2359         if (best_ni)
2360                 /* increment sequence number so we can round robin */
2361                 best_ni->ni_seq++;
2362
2363         return best_ni;
2364 }
2365
2366 static struct lnet_ni *
2367 lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt)
2368 {
2369         struct lnet_ni *best_ni = NULL;
2370         struct lnet_peer_net *peer_net = lpni->lpni_peer_net;
2371         struct lnet_peer_ni *lpni_entry;
2372
2373         /*
2374          * We must use a consistent source address when sending to a
2375          * non-MR peer. However, a non-MR peer can have multiple NIDs
2376          * on multiple networks, and we may even need to talk to this
2377          * peer on multiple networks -- certain types of
2378          * load-balancing configuration do this.
2379          *
2380          * So we need to pick the NI the peer prefers for this
2381          * particular network.
2382          */
2383         LASSERT(peer_net);
2384         list_for_each_entry(lpni_entry, &peer_net->lpn_peer_nis,
2385                             lpni_peer_nis) {
2386                 if (lpni_entry->lpni_pref_nnids == 0)
2387                         continue;
2388                 LASSERT(lpni_entry->lpni_pref_nnids == 1);
2389                 best_ni = lnet_nid2ni_locked(lpni_entry->lpni_pref.nid, cpt);
2390                 break;
2391         }
2392
2393         return best_ni;
2394 }
2395
2396 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2397 static int
2398 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2399 {
2400         struct lnet_ni *best_ni = NULL;
2401         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
2402
2403         /*
2404          * We must use a consistent source address when sending to a
2405          * non-MR peer. However, a non-MR peer can have multiple NIDs
2406          * on multiple networks, and we may even need to talk to this
2407          * peer on multiple networks -- certain types of
2408          * load-balancing configuration do this.
2409          *
2410          * So we need to pick the NI the peer prefers for this
2411          * particular network.
2412          */
2413
2414         best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2415                                                        sd->sd_cpt);
2416
2417         /* if best_ni is still not set just pick one */
2418         if (!best_ni) {
2419                 best_ni =
2420                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2421                                                 sd->sd_best_lpni->lpni_peer_net,
2422                                                 sd->sd_md_cpt, true);
2423                 /* If there is no best_ni we don't have a route */
2424                 if (!best_ni) {
2425                         CERROR("no path to %s from net %s\n",
2426                                 libcfs_nid2str(best_lpni->lpni_nid),
2427                                 libcfs_net2str(best_lpni->lpni_net->net_id));
2428                         return -EHOSTUNREACH;
2429                 }
2430         }
2431
2432         sd->sd_best_ni = best_ni;
2433
2434         /* Set preferred NI if necessary. */
2435         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2436
2437         return 0;
2438 }
2439
2440
2441 /*
2442  * Source not specified
2443  * Local destination
2444  * Non-MR Peer
2445  *
2446  * always use the same source NID for NMR peers
2447  * If we've talked to that peer before then we already have a preferred
2448  * source NI associated with it. Otherwise, we select a preferred local NI
2449  * and store it in the peer
2450  */
2451 static int
2452 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2453 {
2454         int rc = 0;
2455
2456         /* sd->sd_best_lpni is already set to the final destination */
2457
2458         /*
2459          * At this point we should've created the peer ni and peer. If we
2460          * can't find it, then something went wrong. Instead of assert
2461          * output a relevant message and fail the send
2462          */
2463         if (!sd->sd_best_lpni) {
2464                 CERROR("Internal fault. Unable to send msg %s to %s. "
2465                        "NID not known\n",
2466                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2467                        libcfs_nid2str(sd->sd_dst_nid));
2468                 return -EFAULT;
2469         }
2470
2471         if (sd->sd_msg->msg_routing) {
2472                 /* If I'm forwarding this message then I can choose any NI
2473                  * on the destination peer net
2474                  */
2475                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL,
2476                                                                sd->sd_peer,
2477                                                                sd->sd_best_lpni->lpni_peer_net,
2478                                                                sd->sd_md_cpt,
2479                                                                true);
2480                 if (!sd->sd_best_ni) {
2481                         CERROR("Unable to forward message to %s. No local NI available\n",
2482                                libcfs_nid2str(sd->sd_dst_nid));
2483                         rc = -EHOSTUNREACH;
2484                 }
2485         } else
2486                 rc = lnet_select_preferred_best_ni(sd);
2487
2488         if (!rc)
2489                 rc = lnet_handle_send(sd);
2490
2491         return rc;
2492 }
2493
2494 static int
2495 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2496 {
2497         /*
2498          * NOTE we've already handled the remote peer case. So we only
2499          * need to worry about the local case here.
2500          *
2501          * if we're sending a response, ACK or reply, we need to send it
2502          * to the destination NID given to us. At this point we already
2503          * have the peer_ni we're suppose to send to, so just find the
2504          * best_ni on the peer net and use that. Since we're sending to an
2505          * MR peer then we can just run the selection algorithm on our
2506          * local NIs and pick the best one.
2507          */
2508         if (sd->sd_send_case & SND_RESP) {
2509                 sd->sd_best_ni =
2510                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2511                                                 sd->sd_best_lpni->lpni_peer_net,
2512                                                 sd->sd_md_cpt, true);
2513
2514                 if (!sd->sd_best_ni) {
2515                         /*
2516                          * We're not going to deal with not able to send
2517                          * a response to the provided final destination
2518                          */
2519                         CERROR("Can't send response to %s. "
2520                                "No local NI available\n",
2521                                 libcfs_nid2str(sd->sd_dst_nid));
2522                         return -EHOSTUNREACH;
2523                 }
2524
2525                 return lnet_handle_send(sd);
2526         }
2527
2528         /*
2529          * If we get here that means we're sending a fresh request, PUT or
2530          * GET, so we need to run our standard selection algorithm.
2531          * First find the best local interface that's on any of the peer's
2532          * networks.
2533          */
2534         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2535                                         sd->sd_md_cpt,
2536                                         lnet_msg_discovery(sd->sd_msg));
2537         if (sd->sd_best_ni) {
2538                 sd->sd_best_lpni =
2539                   lnet_find_best_lpni(sd->sd_best_ni, sd->sd_dst_nid,
2540                                       sd->sd_peer,
2541                                       sd->sd_best_ni->ni_net->net_id);
2542
2543                 /*
2544                  * if we're successful in selecting a peer_ni on the local
2545                  * network, then send to it. Otherwise fall through and
2546                  * try and see if we can reach it over another routed
2547                  * network
2548                  */
2549                 if (sd->sd_best_lpni &&
2550                     sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid) {
2551                         /*
2552                          * in case we initially started with a routed
2553                          * destination, let's reset to local
2554                          */
2555                         sd->sd_send_case &= ~REMOTE_DST;
2556                         sd->sd_send_case |= LOCAL_DST;
2557                         return lnet_handle_lo_send(sd);
2558                 } else if (sd->sd_best_lpni) {
2559                         /*
2560                          * in case we initially started with a routed
2561                          * destination, let's reset to local
2562                          */
2563                         sd->sd_send_case &= ~REMOTE_DST;
2564                         sd->sd_send_case |= LOCAL_DST;
2565                         return lnet_handle_send(sd);
2566                 }
2567
2568                 CERROR("Internal Error. Expected to have a best_lpni: "
2569                        "%s -> %s\n",
2570                        libcfs_nid2str(sd->sd_src_nid),
2571                        libcfs_nid2str(sd->sd_dst_nid));
2572
2573                 return -EFAULT;
2574         }
2575
2576         /*
2577          * Peer doesn't have a local network. Let's see if there is
2578          * a remote network we can reach it on.
2579          */
2580         return PASS_THROUGH;
2581 }
2582
2583 /*
2584  * Case 1:
2585  *      Source NID not specified
2586  *      Local destination
2587  *      MR peer
2588  *
2589  * Case 2:
2590  *      Source NID not speified
2591  *      Remote destination
2592  *      MR peer
2593  *
2594  * In both of these cases if we're sending a response, ACK or REPLY, then
2595  * we need to send to the destination NID provided.
2596  *
2597  * In the remote case let's deal with MR routers.
2598  *
2599  */
2600
2601 static int
2602 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2603 {
2604         int rc = 0;
2605         struct lnet_peer *gw_peer = NULL;
2606         struct lnet_peer_ni *gw_lpni = NULL;
2607
2608         /*
2609          * handle sending a response to a remote peer here so we don't
2610          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2611          */
2612         if (sd->sd_send_case & REMOTE_DST &&
2613             sd->sd_send_case & SND_RESP) {
2614                 struct lnet_peer_ni *gw;
2615                 struct lnet_peer *gw_peer;
2616
2617                 rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw,
2618                                                   &gw_peer);
2619                 if (rc < 0) {
2620                         CERROR("Can't send response to %s. "
2621                                "No route available\n",
2622                                 libcfs_nid2str(sd->sd_dst_nid));
2623                         return -EHOSTUNREACH;
2624                 } else if (rc > 0) {
2625                         return rc;
2626                 }
2627
2628                 sd->sd_best_lpni = gw;
2629                 sd->sd_peer = gw_peer;
2630
2631                 return lnet_handle_send(sd);
2632         }
2633
2634         /*
2635          * Even though the NID for the peer might not be on a local network,
2636          * since the peer is MR there could be other interfaces on the
2637          * local network. In that case we'd still like to prefer the local
2638          * network over the routed network. If we're unable to do that
2639          * then we select the best router among the different routed networks,
2640          * and if the router is MR then we can deal with it as such.
2641          */
2642         rc = lnet_handle_any_mr_dsta(sd);
2643         if (rc != PASS_THROUGH)
2644                 return rc;
2645
2646         /*
2647          * Now that we must route to the destination, we must consider the
2648          * MR case, where the destination has multiple interfaces, some of
2649          * which we can route to and others we do not. For this reason we
2650          * need to select the destination which we can route to and if
2651          * there are multiple, we need to round robin.
2652          */
2653         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2654                                           &gw_peer);
2655         if (rc)
2656                 return rc;
2657
2658         sd->sd_send_case &= ~LOCAL_DST;
2659         sd->sd_send_case |= REMOTE_DST;
2660
2661         sd->sd_peer = gw_peer;
2662         sd->sd_best_lpni = gw_lpni;
2663
2664         return lnet_handle_send(sd);
2665 }
2666
2667 /*
2668  * Source not specified
2669  * Remote destination
2670  * Non-MR peer
2671  *
2672  * Must send to the specified peer NID using the same source NID that
2673  * we've used before. If it's the first time to talk to that peer then
2674  * find the source NI and assign it as preferred to that peer
2675  */
2676 static int
2677 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2678 {
2679         int rc;
2680         struct lnet_peer_ni *gw_lpni = NULL;
2681         struct lnet_peer *gw_peer = NULL;
2682
2683         /*
2684          * Let's see if we have a preferred NI to talk to this NMR peer
2685          */
2686         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2687                                                               sd->sd_cpt);
2688
2689         /*
2690          * find the router and that'll find the best NI if we didn't find
2691          * it already.
2692          */
2693         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2694                                           &gw_peer);
2695         if (rc)
2696                 return rc;
2697
2698         /*
2699          * set the best_ni we've chosen as the preferred one for
2700          * this peer
2701          */
2702         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2703
2704         /* we'll be sending to the gw */
2705         sd->sd_best_lpni = gw_lpni;
2706         sd->sd_peer = gw_peer;
2707
2708         return lnet_handle_send(sd);
2709 }
2710
2711 static int
2712 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2713 {
2714         /*
2715          * turn off the SND_RESP bit.
2716          * It will be checked in the case handling
2717          */
2718         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2719
2720         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2721                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2722                 (send_case & SRC_SPEC) ? libcfs_nid2str(sd->sd_src_nid) : "",
2723                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2724                 libcfs_nid2str(sd->sd_dst_nid),
2725                 (send_case & LOCAL_DST) ? "local" : "routed");
2726
2727         switch (send_case) {
2728         /*
2729          * For all cases where the source is specified, we should always
2730          * use the destination NID, whether it's an MR destination or not,
2731          * since we're continuing a series of related messages for the
2732          * same RPC
2733          */
2734         case SRC_SPEC_LOCAL_NMR_DST:
2735                 return lnet_handle_spec_local_nmr_dst(sd);
2736         case SRC_SPEC_LOCAL_MR_DST:
2737                 return lnet_handle_spec_local_mr_dst(sd);
2738         case SRC_SPEC_ROUTER_NMR_DST:
2739         case SRC_SPEC_ROUTER_MR_DST:
2740                 return lnet_handle_spec_router_dst(sd);
2741         case SRC_ANY_LOCAL_NMR_DST:
2742                 return lnet_handle_any_local_nmr_dst(sd);
2743         case SRC_ANY_LOCAL_MR_DST:
2744         case SRC_ANY_ROUTER_MR_DST:
2745                 return lnet_handle_any_mr_dst(sd);
2746         case SRC_ANY_ROUTER_NMR_DST:
2747                 return lnet_handle_any_router_nmr_dst(sd);
2748         default:
2749                 CERROR("Unknown send case\n");
2750                 return -1;
2751         }
2752 }
2753
2754 static int
2755 lnet_select_pathway(lnet_nid_t src_nid, lnet_nid_t dst_nid,
2756                     struct lnet_msg *msg, lnet_nid_t rtr_nid)
2757 {
2758         struct lnet_peer_ni *lpni;
2759         struct lnet_peer *peer;
2760         struct lnet_send_data send_data;
2761         int cpt, rc;
2762         int md_cpt;
2763         __u32 send_case = 0;
2764         bool final_hop;
2765         bool mr_forwarding_allowed;
2766
2767         memset(&send_data, 0, sizeof(send_data));
2768
2769         /*
2770          * get an initial CPT to use for locking. The idea here is not to
2771          * serialize the calls to select_pathway, so that as many
2772          * operations can run concurrently as possible. To do that we use
2773          * the CPT where this call is being executed. Later on when we
2774          * determine the CPT to use in lnet_message_commit, we switch the
2775          * lock and check if there was any configuration change.  If none,
2776          * then we proceed, if there is, then we restart the operation.
2777          */
2778         cpt = lnet_net_lock_current();
2779
2780         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
2781         if (md_cpt == CFS_CPT_ANY)
2782                 md_cpt = cpt;
2783
2784 again:
2785
2786         /*
2787          * If we're being asked to send to the loopback interface, there
2788          * is no need to go through any selection. We can just shortcut
2789          * the entire process and send over lolnd
2790          */
2791         send_data.sd_msg = msg;
2792         send_data.sd_cpt = cpt;
2793         if (dst_nid == LNET_NID_LO_0) {
2794                 rc = lnet_handle_lo_send(&send_data);
2795                 lnet_net_unlock(cpt);
2796                 return rc;
2797         }
2798
2799         /*
2800          * find an existing peer_ni, or create one and mark it as having been
2801          * created due to network traffic. This call will create the
2802          * peer->peer_net->peer_ni tree.
2803          */
2804         lpni = lnet_nid2peerni_locked(dst_nid, LNET_NID_ANY, cpt);
2805         if (IS_ERR(lpni)) {
2806                 lnet_net_unlock(cpt);
2807                 return PTR_ERR(lpni);
2808         }
2809
2810         /*
2811          * Cache the original src_nid and rtr_nid. If we need to resend the
2812          * message then we'll need to know whether the src_nid was originally
2813          * specified for this message. If it was originally specified,
2814          * then we need to keep using the same src_nid since it's
2815          * continuing the same sequence of messages. Similarly, rtr_nid will
2816          * affect our choice of next hop.
2817          */
2818         msg->msg_src_nid_param = src_nid;
2819         msg->msg_rtr_nid_param = rtr_nid;
2820
2821         /*
2822          * If necessary, perform discovery on the peer that owns this peer_ni.
2823          * Note, this can result in the ownership of this peer_ni changing
2824          * to another peer object.
2825          */
2826         rc = lnet_initiate_peer_discovery(lpni, msg, cpt);
2827         if (rc) {
2828                 lnet_peer_ni_decref_locked(lpni);
2829                 lnet_net_unlock(cpt);
2830                 return rc;
2831         }
2832         lnet_peer_ni_decref_locked(lpni);
2833
2834         peer = lpni->lpni_peer_net->lpn_peer;
2835
2836         /*
2837          * Identify the different send cases
2838          */
2839         if (src_nid == LNET_NID_ANY)
2840                 send_case |= SRC_ANY;
2841         else
2842                 send_case |= SRC_SPEC;
2843
2844         if (lnet_get_net_locked(LNET_NIDNET(dst_nid)))
2845                 send_case |= LOCAL_DST;
2846         else
2847                 send_case |= REMOTE_DST;
2848
2849         final_hop = false;
2850         if (msg->msg_routing && (send_case & LOCAL_DST))
2851                 final_hop = true;
2852
2853         /* Determine whether to allow MR forwarding for this message.
2854          * NB: MR forwarding is allowed if the message originator and the
2855          * destination are both MR capable, and the destination lpni that was
2856          * originally chosen by the originator is unhealthy or down.
2857          * We check the MR capability of the destination further below
2858          */
2859         mr_forwarding_allowed = false;
2860         if (final_hop) {
2861                 struct lnet_peer *src_lp;
2862                 struct lnet_peer_ni *src_lpni;
2863
2864                 src_lpni = lnet_nid2peerni_locked(msg->msg_hdr.src_nid,
2865                                                   LNET_NID_ANY, cpt);
2866                 /* We don't fail the send if we hit any errors here. We'll just
2867                  * try to send it via non-multi-rail criteria
2868                  */
2869                 if (!IS_ERR(src_lpni)) {
2870                         /* Drop ref taken by lnet_nid2peerni_locked() */
2871                         lnet_peer_ni_decref_locked(src_lpni);
2872                         src_lp = lpni->lpni_peer_net->lpn_peer;
2873                         if (lnet_peer_is_multi_rail(src_lp) &&
2874                             !lnet_is_peer_ni_alive(lpni))
2875                                 mr_forwarding_allowed = true;
2876
2877                 }
2878                 CDEBUG(D_NET, "msg %p MR forwarding %s\n", msg,
2879                        mr_forwarding_allowed ? "allowed" : "not allowed");
2880         }
2881
2882         /*
2883          * Deal with the peer as NMR in the following cases:
2884          * 1. the peer is NMR
2885          * 2. We're trying to recover a specific peer NI
2886          * 3. I'm a router sending to the final destination and MR forwarding is
2887          *    not allowed for this message (as determined above).
2888          *    In this case the source of the message would've
2889          *    already selected the final destination so my job
2890          *    is to honor the selection.
2891          */
2892         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery ||
2893             (final_hop && !mr_forwarding_allowed))
2894                 send_case |= NMR_DST;
2895         else
2896                 send_case |= MR_DST;
2897
2898         if (lnet_msg_is_response(msg))
2899                 send_case |= SND_RESP;
2900
2901         /* assign parameters to the send_data */
2902         send_data.sd_rtr_nid = rtr_nid;
2903         send_data.sd_src_nid = src_nid;
2904         send_data.sd_dst_nid = dst_nid;
2905         send_data.sd_best_lpni = lpni;
2906         /*
2907          * keep a pointer to the final destination in case we're going to
2908          * route, so we'll need to access it later
2909          */
2910         send_data.sd_final_dst_lpni = lpni;
2911         send_data.sd_peer = peer;
2912         send_data.sd_md_cpt = md_cpt;
2913         send_data.sd_send_case = send_case;
2914
2915         rc = lnet_handle_send_case_locked(&send_data);
2916
2917         /*
2918          * Update the local cpt since send_data.sd_cpt might've been
2919          * updated as a result of calling lnet_handle_send_case_locked().
2920          */
2921         cpt = send_data.sd_cpt;
2922
2923         if (rc == REPEAT_SEND)
2924                 goto again;
2925
2926         lnet_net_unlock(cpt);
2927
2928         return rc;
2929 }
2930
2931 int
2932 lnet_send(lnet_nid_t src_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid)
2933 {
2934         lnet_nid_t              dst_nid = msg->msg_target.nid;
2935         int                     rc;
2936
2937         /*
2938          * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
2939          * but we might want to use pre-determined router for ACK/REPLY
2940          * in the future
2941          */
2942         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
2943         LASSERT(msg->msg_txpeer == NULL);
2944         LASSERT(msg->msg_txni == NULL);
2945         LASSERT(!msg->msg_sending);
2946         LASSERT(!msg->msg_target_is_router);
2947         LASSERT(!msg->msg_receiving);
2948
2949         msg->msg_sending = 1;
2950
2951         LASSERT(!msg->msg_tx_committed);
2952
2953         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
2954         if (rc < 0) {
2955                 if (rc == -EHOSTUNREACH)
2956                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
2957                 else
2958                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
2959                 return rc;
2960         }
2961
2962         if (rc == LNET_CREDIT_OK)
2963                 lnet_ni_send(msg->msg_txni, msg);
2964
2965         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
2966         return 0;
2967 }
2968
2969 enum lnet_mt_event_type {
2970         MT_TYPE_LOCAL_NI = 0,
2971         MT_TYPE_PEER_NI
2972 };
2973
2974 struct lnet_mt_event_info {
2975         enum lnet_mt_event_type mt_type;
2976         lnet_nid_t mt_nid;
2977 };
2978
2979 /* called with res_lock held */
2980 void
2981 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
2982 {
2983         struct lnet_rsp_tracker *rspt;
2984
2985         /*
2986          * msg has a refcount on the MD so the MD is not going away.
2987          * The rspt queue for the cpt is protected by
2988          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
2989          */
2990         if (!md->md_rspt_ptr)
2991                 return;
2992
2993         rspt = md->md_rspt_ptr;
2994
2995         /* debug code */
2996         LASSERT(rspt->rspt_cpt == cpt);
2997
2998         md->md_rspt_ptr = NULL;
2999
3000         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3001                 /*
3002                  * The monitor thread has invalidated this handle because the
3003                  * response timed out, but it failed to lookup the MD. That
3004                  * means this response tracker is on the zombie list. We can
3005                  * safely remove it under the resource lock (held by caller) and
3006                  * free the response tracker block.
3007                  */
3008                 list_del(&rspt->rspt_on_list);
3009                 lnet_rspt_free(rspt, cpt);
3010         } else {
3011                 /*
3012                  * invalidate the handle to indicate that a response has been
3013                  * received, which will then lead the monitor thread to clean up
3014                  * the rspt block.
3015                  */
3016                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
3017         }
3018 }
3019
3020 void
3021 lnet_clean_zombie_rstqs(void)
3022 {
3023         struct lnet_rsp_tracker *rspt, *tmp;
3024         int i;
3025
3026         cfs_cpt_for_each(i, lnet_cpt_table()) {
3027                 list_for_each_entry_safe(rspt, tmp,
3028                                          the_lnet.ln_mt_zombie_rstqs[i],
3029                                          rspt_on_list) {
3030                         list_del(&rspt->rspt_on_list);
3031                         lnet_rspt_free(rspt, i);
3032                 }
3033         }
3034
3035         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
3036 }
3037
3038 static void
3039 lnet_finalize_expired_responses(void)
3040 {
3041         struct lnet_libmd *md;
3042         struct lnet_rsp_tracker *rspt, *tmp;
3043         ktime_t now;
3044         int i;
3045
3046         if (the_lnet.ln_mt_rstq == NULL)
3047                 return;
3048
3049         cfs_cpt_for_each(i, lnet_cpt_table()) {
3050                 LIST_HEAD(local_queue);
3051
3052                 lnet_net_lock(i);
3053                 if (!the_lnet.ln_mt_rstq[i]) {
3054                         lnet_net_unlock(i);
3055                         continue;
3056                 }
3057                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
3058                 lnet_net_unlock(i);
3059
3060                 now = ktime_get();
3061
3062                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
3063                         /*
3064                          * The rspt mdh will be invalidated when a response
3065                          * is received or whenever we want to discard the
3066                          * block the monitor thread will walk the queue
3067                          * and clean up any rsts with an invalid mdh.
3068                          * The monitor thread will walk the queue until
3069                          * the first unexpired rspt block. This means that
3070                          * some rspt blocks which received their
3071                          * corresponding responses will linger in the
3072                          * queue until they are cleaned up eventually.
3073                          */
3074                         lnet_res_lock(i);
3075                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3076                                 lnet_res_unlock(i);
3077                                 list_del(&rspt->rspt_on_list);
3078                                 lnet_rspt_free(rspt, i);
3079                                 continue;
3080                         }
3081
3082                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
3083                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
3084                                 struct lnet_peer_ni *lpni;
3085                                 lnet_nid_t nid;
3086
3087                                 md = lnet_handle2md(&rspt->rspt_mdh);
3088                                 if (!md) {
3089                                         /* MD has been queued for unlink, but
3090                                          * rspt hasn't been detached (Note we've
3091                                          * checked above that the rspt_mdh is
3092                                          * valid). Since we cannot lookup the MD
3093                                          * we're unable to detach the rspt
3094                                          * ourselves. Thus, move the rspt to the
3095                                          * zombie list where we'll wait for
3096                                          * either:
3097                                          *   1. The remaining operations on the
3098                                          *   MD to complete. In this case the
3099                                          *   final operation will result in
3100                                          *   lnet_msg_detach_md()->
3101                                          *   lnet_detach_rsp_tracker() where
3102                                          *   we will clean up this response
3103                                          *   tracker.
3104                                          *   2. LNet to shutdown. In this case
3105                                          *   we'll wait until after all LND Nets
3106                                          *   have shutdown and then we can
3107                                          *   safely free any remaining response
3108                                          *   tracker blocks on the zombie list.
3109                                          * Note: We need to hold the resource
3110                                          * lock when adding to the zombie list
3111                                          * because we may have concurrent access
3112                                          * with lnet_detach_rsp_tracker().
3113                                          */
3114                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
3115                                         list_move(&rspt->rspt_on_list,
3116                                                   the_lnet.ln_mt_zombie_rstqs[i]);
3117                                         lnet_res_unlock(i);
3118                                         continue;
3119                                 }
3120                                 LASSERT(md->md_rspt_ptr == rspt);
3121                                 md->md_rspt_ptr = NULL;
3122                                 lnet_res_unlock(i);
3123
3124                                 LNetMDUnlink(rspt->rspt_mdh);
3125
3126                                 nid = rspt->rspt_next_hop_nid;
3127
3128                                 list_del(&rspt->rspt_on_list);
3129                                 lnet_rspt_free(rspt, i);
3130
3131                                 /* If we're shutting down we just want to clean
3132                                  * up the rspt blocks
3133                                  */
3134                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3135                                         continue;
3136
3137                                 lnet_net_lock(i);
3138                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
3139                                 lnet_net_unlock(i);
3140
3141                                 CDEBUG(D_NET,
3142                                        "Response timeout: md = %p: nid = %s\n",
3143                                        md, libcfs_nid2str(nid));
3144
3145                                 /*
3146                                  * If there is a timeout on the response
3147                                  * from the next hop decrement its health
3148                                  * value so that we don't use it
3149                                  */
3150                                 lnet_net_lock(0);
3151                                 lpni = lnet_find_peer_ni_locked(nid);
3152                                 if (lpni) {
3153                                         lnet_handle_remote_failure_locked(lpni);
3154                                         lnet_peer_ni_decref_locked(lpni);
3155                                 }
3156                                 lnet_net_unlock(0);
3157                         } else {
3158                                 lnet_res_unlock(i);
3159                                 break;
3160                         }
3161                 }
3162
3163                 if (!list_empty(&local_queue)) {
3164                         lnet_net_lock(i);
3165                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
3166                         lnet_net_unlock(i);
3167                 }
3168         }
3169 }
3170
3171 static void
3172 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
3173 {
3174         struct lnet_msg *msg;
3175
3176         while (!list_empty(resendq)) {
3177                 struct lnet_peer_ni *lpni;
3178
3179                 msg = list_entry(resendq->next, struct lnet_msg,
3180                                  msg_list);
3181
3182                 list_del_init(&msg->msg_list);
3183
3184                 lpni = lnet_find_peer_ni_locked(msg->msg_hdr.dest_nid);
3185                 if (!lpni) {
3186                         lnet_net_unlock(cpt);
3187                         CERROR("Expected that a peer is already created for %s\n",
3188                                libcfs_nid2str(msg->msg_hdr.dest_nid));
3189                         msg->msg_no_resend = true;
3190                         lnet_finalize(msg, -EFAULT);
3191                         lnet_net_lock(cpt);
3192                 } else {
3193                         int rc;
3194
3195                         lnet_peer_ni_decref_locked(lpni);
3196
3197                         lnet_net_unlock(cpt);
3198                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3199                                libcfs_nid2str(msg->msg_src_nid_param),
3200                                libcfs_id2str(msg->msg_target),
3201                                lnet_msgtyp2str(msg->msg_type),
3202                                msg->msg_recovery,
3203                                msg->msg_retry_count);
3204                         rc = lnet_send(msg->msg_src_nid_param, msg,
3205                                        msg->msg_rtr_nid_param);
3206                         if (rc) {
3207                                 CERROR("Error sending %s to %s: %d\n",
3208                                        lnet_msgtyp2str(msg->msg_type),
3209                                        libcfs_id2str(msg->msg_target), rc);
3210                                 msg->msg_no_resend = true;
3211                                 lnet_finalize(msg, rc);
3212                         }
3213                         lnet_net_lock(cpt);
3214                         if (!rc)
3215                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3216                 }
3217         }
3218 }
3219
3220 static void
3221 lnet_resend_pending_msgs(void)
3222 {
3223         int i;
3224
3225         cfs_cpt_for_each(i, lnet_cpt_table()) {
3226                 lnet_net_lock(i);
3227                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3228                 lnet_net_unlock(i);
3229         }
3230 }
3231
3232 /* called with cpt and ni_lock held */
3233 static void
3234 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3235 {
3236         struct lnet_handle_md recovery_mdh;
3237
3238         LNetInvalidateMDHandle(&recovery_mdh);
3239
3240         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3241             force) {
3242                 recovery_mdh = ni->ni_ping_mdh;
3243                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3244         }
3245         lnet_ni_unlock(ni);
3246         lnet_net_unlock(cpt);
3247         if (!LNetMDHandleIsInvalid(recovery_mdh))
3248                 LNetMDUnlink(recovery_mdh);
3249         lnet_net_lock(cpt);
3250         lnet_ni_lock(ni);
3251 }
3252
3253 static void
3254 lnet_recover_local_nis(void)
3255 {
3256         struct lnet_mt_event_info *ev_info;
3257         LIST_HEAD(processed_list);
3258         LIST_HEAD(local_queue);
3259         struct lnet_handle_md mdh;
3260         struct lnet_ni *tmp;
3261         struct lnet_ni *ni;
3262         lnet_nid_t nid;
3263         int healthv;
3264         int rc;
3265
3266         /*
3267          * splice the recovery queue on a local queue. We will iterate
3268          * through the local queue and update it as needed. Once we're
3269          * done with the traversal, we'll splice the local queue back on
3270          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3271          * will be traversed in the next iteration.
3272          */
3273         lnet_net_lock(0);
3274         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3275                          &local_queue);
3276         lnet_net_unlock(0);
3277
3278         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3279                 /*
3280                  * if an NI is being deleted or it is now healthy, there
3281                  * is no need to keep it around in the recovery queue.
3282                  * The monitor thread is the only thread responsible for
3283                  * removing the NI from the recovery queue.
3284                  * Multiple threads can be adding NIs to the recovery
3285                  * queue.
3286                  */
3287                 healthv = atomic_read(&ni->ni_healthv);
3288
3289                 lnet_net_lock(0);
3290                 lnet_ni_lock(ni);
3291                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3292                     healthv == LNET_MAX_HEALTH_VALUE) {
3293                         list_del_init(&ni->ni_recovery);
3294                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3295                         lnet_ni_unlock(ni);
3296                         lnet_ni_decref_locked(ni, 0);
3297                         lnet_net_unlock(0);
3298                         continue;
3299                 }
3300
3301                 /*
3302                  * if the local NI failed recovery we must unlink the md.
3303                  * But we want to keep the local_ni on the recovery queue
3304                  * so we can continue the attempts to recover it.
3305                  */
3306                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3307                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3308                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3309                 }
3310
3311                 lnet_ni_unlock(ni);
3312                 lnet_net_unlock(0);
3313
3314
3315                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3316                        libcfs_nid2str(ni->ni_nid));
3317
3318                 lnet_ni_lock(ni);
3319                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3320                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3321                         lnet_ni_unlock(ni);
3322
3323                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3324                         if (!ev_info) {
3325                                 CERROR("out of memory. Can't recover %s\n",
3326                                        libcfs_nid2str(ni->ni_nid));
3327                                 lnet_ni_lock(ni);
3328                                 ni->ni_recovery_state &=
3329                                   ~LNET_NI_RECOVERY_PENDING;
3330                                 lnet_ni_unlock(ni);
3331                                 continue;
3332                         }
3333
3334                         mdh = ni->ni_ping_mdh;
3335                         /*
3336                          * Invalidate the ni mdh in case it's deleted.
3337                          * We'll unlink the mdh in this case below.
3338                          */
3339                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3340                         nid = ni->ni_nid;
3341
3342                         /*
3343                          * remove the NI from the local queue and drop the
3344                          * reference count to it while we're recovering
3345                          * it. The reason for that, is that the NI could
3346                          * be deleted, and the way the code is structured
3347                          * is if we don't drop the NI, then the deletion
3348                          * code will enter a loop waiting for the
3349                          * reference count to be removed while holding the
3350                          * ln_mutex_lock(). When we look up the peer to
3351                          * send to in lnet_select_pathway() we will try to
3352                          * lock the ln_mutex_lock() as well, leading to
3353                          * a deadlock. By dropping the refcount and
3354                          * removing it from the list, we allow for the NI
3355                          * to be removed, then we use the cached NID to
3356                          * look it up again. If it's gone, then we just
3357                          * continue examining the rest of the queue.
3358                          */
3359                         lnet_net_lock(0);
3360                         list_del_init(&ni->ni_recovery);
3361                         lnet_ni_decref_locked(ni, 0);
3362                         lnet_net_unlock(0);
3363
3364                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3365                         ev_info->mt_nid = nid;
3366                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3367                                             ev_info, the_lnet.ln_mt_handler,
3368                                             true);
3369                         /* lookup the nid again */
3370                         lnet_net_lock(0);
3371                         ni = lnet_nid2ni_locked(nid, 0);
3372                         if (!ni) {
3373                                 /*
3374                                  * the NI has been deleted when we dropped
3375                                  * the ref count
3376                                  */
3377                                 lnet_net_unlock(0);
3378                                 LNetMDUnlink(mdh);
3379                                 continue;
3380                         }
3381                         /*
3382                          * Same note as in lnet_recover_peer_nis(). When
3383                          * we're sending the ping, the NI is free to be
3384                          * deleted or manipulated. By this point it
3385                          * could've been added back on the recovery queue,
3386                          * and a refcount taken on it.
3387                          * So we can't just add it blindly again or we'll
3388                          * corrupt the queue. We must check under lock if
3389                          * it's not on any list and if not then add it
3390                          * to the processed list, which will eventually be
3391                          * spliced back on to the recovery queue.
3392                          */
3393                         ni->ni_ping_mdh = mdh;
3394                         if (list_empty(&ni->ni_recovery)) {
3395                                 list_add_tail(&ni->ni_recovery, &processed_list);
3396                                 lnet_ni_addref_locked(ni, 0);
3397                         }
3398                         lnet_net_unlock(0);
3399
3400                         lnet_ni_lock(ni);
3401                         if (rc)
3402                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3403                 }
3404                 lnet_ni_unlock(ni);
3405         }
3406
3407         /*
3408          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3409          * reexamined in the next iteration.
3410          */
3411         list_splice_init(&processed_list, &local_queue);
3412         lnet_net_lock(0);
3413         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3414         lnet_net_unlock(0);
3415 }
3416
3417 static int
3418 lnet_resendqs_create(void)
3419 {
3420         struct list_head **resendqs;
3421         resendqs = lnet_create_array_of_queues();
3422
3423         if (!resendqs)
3424                 return -ENOMEM;
3425
3426         lnet_net_lock(LNET_LOCK_EX);
3427         the_lnet.ln_mt_resendqs = resendqs;
3428         lnet_net_unlock(LNET_LOCK_EX);
3429
3430         return 0;
3431 }
3432
3433 static void
3434 lnet_clean_local_ni_recoveryq(void)
3435 {
3436         struct lnet_ni *ni;
3437
3438         /* This is only called when the monitor thread has stopped */
3439         lnet_net_lock(0);
3440
3441         while (!list_empty(&the_lnet.ln_mt_localNIRecovq)) {
3442                 ni = list_entry(the_lnet.ln_mt_localNIRecovq.next,
3443                                 struct lnet_ni, ni_recovery);
3444                 list_del_init(&ni->ni_recovery);
3445                 lnet_ni_lock(ni);
3446                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3447                 lnet_ni_unlock(ni);
3448                 lnet_ni_decref_locked(ni, 0);
3449         }
3450
3451         lnet_net_unlock(0);
3452 }
3453
3454 static void
3455 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3456                                      bool force)
3457 {
3458         struct lnet_handle_md recovery_mdh;
3459
3460         LNetInvalidateMDHandle(&recovery_mdh);
3461
3462         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3463                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3464                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3465         }
3466         spin_unlock(&lpni->lpni_lock);
3467         lnet_net_unlock(cpt);
3468         if (!LNetMDHandleIsInvalid(recovery_mdh))
3469                 LNetMDUnlink(recovery_mdh);
3470         lnet_net_lock(cpt);
3471         spin_lock(&lpni->lpni_lock);
3472 }
3473
3474 static void
3475 lnet_clean_peer_ni_recoveryq(void)
3476 {
3477         struct lnet_peer_ni *lpni, *tmp;
3478
3479         lnet_net_lock(LNET_LOCK_EX);
3480
3481         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3482                                  lpni_recovery) {
3483                 list_del_init(&lpni->lpni_recovery);
3484                 spin_lock(&lpni->lpni_lock);
3485                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3486                 spin_unlock(&lpni->lpni_lock);
3487                 lnet_peer_ni_decref_locked(lpni);
3488         }
3489
3490         lnet_net_unlock(LNET_LOCK_EX);
3491 }
3492
3493 static void
3494 lnet_clean_resendqs(void)
3495 {
3496         struct lnet_msg *msg, *tmp;
3497         LIST_HEAD(msgs);
3498         int i;
3499
3500         cfs_cpt_for_each(i, lnet_cpt_table()) {
3501                 lnet_net_lock(i);
3502                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3503                 lnet_net_unlock(i);
3504                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3505                         list_del_init(&msg->msg_list);
3506                         msg->msg_no_resend = true;
3507                         lnet_finalize(msg, -ESHUTDOWN);
3508                 }
3509         }
3510
3511         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3512 }
3513
3514 static void
3515 lnet_recover_peer_nis(void)
3516 {
3517         struct lnet_mt_event_info *ev_info;
3518         LIST_HEAD(processed_list);
3519         LIST_HEAD(local_queue);
3520         struct lnet_handle_md mdh;
3521         struct lnet_peer_ni *lpni;
3522         struct lnet_peer_ni *tmp;
3523         lnet_nid_t nid;
3524         int healthv;
3525         int rc;
3526
3527         /*
3528          * Always use cpt 0 for locking across all interactions with
3529          * ln_mt_peerNIRecovq
3530          */
3531         lnet_net_lock(0);
3532         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3533                          &local_queue);
3534         lnet_net_unlock(0);
3535
3536         list_for_each_entry_safe(lpni, tmp, &local_queue,
3537                                  lpni_recovery) {
3538                 /*
3539                  * The same protection strategy is used here as is in the
3540                  * local recovery case.
3541                  */
3542                 lnet_net_lock(0);
3543                 healthv = atomic_read(&lpni->lpni_healthv);
3544                 spin_lock(&lpni->lpni_lock);
3545                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3546                     healthv == LNET_MAX_HEALTH_VALUE) {
3547                         list_del_init(&lpni->lpni_recovery);
3548                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3549                         spin_unlock(&lpni->lpni_lock);
3550                         lnet_peer_ni_decref_locked(lpni);
3551                         lnet_net_unlock(0);
3552                         continue;
3553                 }
3554
3555                 /*
3556                  * If the peer NI has failed recovery we must unlink the
3557                  * md. But we want to keep the peer ni on the recovery
3558                  * queue so we can try to continue recovering it
3559                  */
3560                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3561                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3562                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3563                 }
3564
3565                 spin_unlock(&lpni->lpni_lock);
3566                 lnet_net_unlock(0);
3567
3568                 /*
3569                  * NOTE: we're racing with peer deletion from user space.
3570                  * It's possible that a peer is deleted after we check its
3571                  * state. In this case the recovery can create a new peer
3572                  */
3573                 spin_lock(&lpni->lpni_lock);
3574                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3575                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3576                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3577                         spin_unlock(&lpni->lpni_lock);
3578
3579                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3580                         if (!ev_info) {
3581                                 CERROR("out of memory. Can't recover %s\n",
3582                                        libcfs_nid2str(lpni->lpni_nid));
3583                                 spin_lock(&lpni->lpni_lock);
3584                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3585                                 spin_unlock(&lpni->lpni_lock);
3586                                 continue;
3587                         }
3588
3589                         /* look at the comments in lnet_recover_local_nis() */
3590                         mdh = lpni->lpni_recovery_ping_mdh;
3591                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3592                         nid = lpni->lpni_nid;
3593                         lnet_net_lock(0);
3594                         list_del_init(&lpni->lpni_recovery);
3595                         lnet_peer_ni_decref_locked(lpni);
3596                         lnet_net_unlock(0);
3597
3598                         ev_info->mt_type = MT_TYPE_PEER_NI;
3599                         ev_info->mt_nid = nid;
3600                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3601                                             ev_info, the_lnet.ln_mt_handler,
3602                                             true);
3603                         lnet_net_lock(0);
3604                         /*
3605                          * lnet_find_peer_ni_locked() grabs a refcount for
3606                          * us. No need to take it explicitly.
3607                          */
3608                         lpni = lnet_find_peer_ni_locked(nid);
3609                         if (!lpni) {
3610                                 lnet_net_unlock(0);
3611                                 LNetMDUnlink(mdh);
3612                                 continue;
3613                         }
3614
3615                         lpni->lpni_recovery_ping_mdh = mdh;
3616                         /*
3617                          * While we're unlocked the lpni could've been
3618                          * readded on the recovery queue. In this case we
3619                          * don't need to add it to the local queue, since
3620                          * it's already on there and the thread that added
3621                          * it would've incremented the refcount on the
3622                          * peer, which means we need to decref the refcount
3623                          * that was implicitly grabbed by find_peer_ni_locked.
3624                          * Otherwise, if the lpni is still not on
3625                          * the recovery queue, then we'll add it to the
3626                          * processed list.
3627                          */
3628                         if (list_empty(&lpni->lpni_recovery))
3629                                 list_add_tail(&lpni->lpni_recovery, &processed_list);
3630                         else
3631                                 lnet_peer_ni_decref_locked(lpni);
3632                         lnet_net_unlock(0);
3633
3634                         spin_lock(&lpni->lpni_lock);
3635                         if (rc)
3636                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3637                 }
3638                 spin_unlock(&lpni->lpni_lock);
3639         }
3640
3641         list_splice_init(&processed_list, &local_queue);
3642         lnet_net_lock(0);
3643         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3644         lnet_net_unlock(0);
3645 }
3646
3647 static int
3648 lnet_monitor_thread(void *arg)
3649 {
3650         time64_t recovery_timeout = 0;
3651         time64_t rsp_timeout = 0;
3652         int interval;
3653         time64_t now;
3654
3655         wait_for_completion(&the_lnet.ln_started);
3656         /*
3657          * The monitor thread takes care of the following:
3658          *  1. Checks the aliveness of routers
3659          *  2. Checks if there are messages on the resend queue to resend
3660          *     them.
3661          *  3. Check if there are any NIs on the local recovery queue and
3662          *     pings them
3663          *  4. Checks if there are any NIs on the remote recovery queue
3664          *     and pings them.
3665          */
3666         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
3667                 now = ktime_get_real_seconds();
3668
3669                 if (lnet_router_checker_active())
3670                         lnet_check_routers();
3671
3672                 lnet_resend_pending_msgs();
3673
3674                 if (now >= rsp_timeout) {
3675                         lnet_finalize_expired_responses();
3676                         rsp_timeout = now + (lnet_transaction_timeout / 2);
3677                 }
3678
3679                 if (now >= recovery_timeout) {
3680                         lnet_recover_local_nis();
3681                         lnet_recover_peer_nis();
3682                         recovery_timeout = now + lnet_recovery_interval;
3683                 }
3684
3685                 /*
3686                  * TODO do we need to check if we should sleep without
3687                  * timeout?  Technically, an active system will always
3688                  * have messages in flight so this check will always
3689                  * evaluate to false. And on an idle system do we care
3690                  * if we wake up every 1 second? Although, we've seen
3691                  * cases where we get a complaint that an idle thread
3692                  * is waking up unnecessarily.
3693                  *
3694                  * Take into account the current net_count when you wake
3695                  * up for alive router checking, since we need to check
3696                  * possibly as many networks as we have configured.
3697                  */
3698                 interval = min(lnet_recovery_interval,
3699                                min((unsigned int) alive_router_check_interval /
3700                                         lnet_current_net_count,
3701                                    lnet_transaction_timeout / 2));
3702                 wait_for_completion_interruptible_timeout(
3703                         &the_lnet.ln_mt_wait_complete,
3704                         cfs_time_seconds(interval));
3705                 /* Must re-init the completion before testing anything,
3706                  * including ln_mt_state.
3707                  */
3708                 reinit_completion(&the_lnet.ln_mt_wait_complete);
3709         }
3710
3711         /* Shutting down */
3712         lnet_net_lock(LNET_LOCK_EX);
3713         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3714         lnet_net_unlock(LNET_LOCK_EX);
3715
3716         /* signal that the monitor thread is exiting */
3717         up(&the_lnet.ln_mt_signal);
3718
3719         return 0;
3720 }
3721
3722 /*
3723  * lnet_send_ping
3724  * Sends a ping.
3725  * Returns == 0 if success
3726  * Returns > 0 if LNetMDBind or prior fails
3727  * Returns < 0 if LNetGet fails
3728  */
3729 int
3730 lnet_send_ping(lnet_nid_t dest_nid,
3731                struct lnet_handle_md *mdh, int nnis,
3732                void *user_data, lnet_handler_t handler, bool recovery)
3733 {
3734         struct lnet_md md = { NULL };
3735         struct lnet_process_id id;
3736         struct lnet_ping_buffer *pbuf;
3737         int rc;
3738
3739         if (dest_nid == LNET_NID_ANY) {
3740                 rc = -EHOSTUNREACH;
3741                 goto fail_error;
3742         }
3743
3744         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
3745         if (!pbuf) {
3746                 rc = ENOMEM;
3747                 goto fail_error;
3748         }
3749
3750         /* initialize md content */
3751         md.start     = &pbuf->pb_info;
3752         md.length    = LNET_PING_INFO_SIZE(nnis);
3753         md.threshold = 2; /* GET/REPLY */
3754         md.max_size  = 0;
3755         md.options   = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE;
3756         md.user_ptr  = user_data;
3757         md.handler   = handler;
3758
3759         rc = LNetMDBind(&md, LNET_UNLINK, mdh);
3760         if (rc) {
3761                 lnet_ping_buffer_decref(pbuf);
3762                 CERROR("Can't bind MD: %d\n", rc);
3763                 rc = -rc; /* change the rc to positive */
3764                 goto fail_error;
3765         }
3766         id.pid = LNET_PID_LUSTRE;
3767         id.nid = dest_nid;
3768
3769         rc = LNetGet(LNET_NID_ANY, *mdh, id,
3770                      LNET_RESERVED_PORTAL,
3771                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
3772
3773         if (rc)
3774                 goto fail_unlink_md;
3775
3776         return 0;
3777
3778 fail_unlink_md:
3779         LNetMDUnlink(*mdh);
3780         LNetInvalidateMDHandle(mdh);
3781 fail_error:
3782         return rc;
3783 }
3784
3785 static void
3786 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
3787                            int status, bool send, bool unlink_event)
3788 {
3789         lnet_nid_t nid = ev_info->mt_nid;
3790
3791         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
3792                 struct lnet_ni *ni;
3793
3794                 lnet_net_lock(0);
3795                 ni = lnet_nid2ni_locked(nid, 0);
3796                 if (!ni) {
3797                         lnet_net_unlock(0);
3798                         return;
3799                 }
3800                 lnet_ni_lock(ni);
3801                 if (!send || (send && status != 0))
3802                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3803                 if (status)
3804                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
3805                 lnet_ni_unlock(ni);
3806                 lnet_net_unlock(0);
3807
3808                 if (status != 0) {
3809                         CERROR("local NI (%s) recovery failed with %d\n",
3810                                libcfs_nid2str(nid), status);
3811                         return;
3812                 }
3813                 /*
3814                  * need to increment healthv for the ni here, because in
3815                  * the lnet_finalize() path we don't have access to this
3816                  * NI. And in order to get access to it, we'll need to
3817                  * carry forward too much information.
3818                  * In the peer case, it'll naturally be incremented
3819                  */
3820                 if (!unlink_event)
3821                         lnet_inc_healthv(&ni->ni_healthv,
3822                                          lnet_health_sensitivity);
3823         } else {
3824                 struct lnet_peer_ni *lpni;
3825                 int cpt;
3826
3827                 cpt = lnet_net_lock_current();
3828                 lpni = lnet_find_peer_ni_locked(nid);
3829                 if (!lpni) {
3830                         lnet_net_unlock(cpt);
3831                         return;
3832                 }
3833                 spin_lock(&lpni->lpni_lock);
3834                 if (!send || (send && status != 0))
3835                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3836                 if (status)
3837                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
3838                 spin_unlock(&lpni->lpni_lock);
3839                 lnet_peer_ni_decref_locked(lpni);
3840                 lnet_net_unlock(cpt);
3841
3842                 if (status != 0)
3843                         CERROR("peer NI (%s) recovery failed with %d\n",
3844                                libcfs_nid2str(nid), status);
3845         }
3846 }
3847
3848 void
3849 lnet_mt_event_handler(struct lnet_event *event)
3850 {
3851         struct lnet_mt_event_info *ev_info = event->md_user_ptr;
3852         struct lnet_ping_buffer *pbuf;
3853
3854         /* TODO: remove assert */
3855         LASSERT(event->type == LNET_EVENT_REPLY ||
3856                 event->type == LNET_EVENT_SEND ||
3857                 event->type == LNET_EVENT_UNLINK);
3858
3859         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
3860                event->status);
3861
3862         switch (event->type) {
3863         case LNET_EVENT_UNLINK:
3864                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
3865                        libcfs_nid2str(ev_info->mt_nid));
3866                 /* fallthrough */
3867         case LNET_EVENT_REPLY:
3868                 lnet_handle_recovery_reply(ev_info, event->status, false,
3869                                            event->type == LNET_EVENT_UNLINK);
3870                 break;
3871         case LNET_EVENT_SEND:
3872                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
3873                                libcfs_nid2str(ev_info->mt_nid),
3874                                (event->status) ? "unsuccessfully" :
3875                                "successfully", event->status);
3876                 lnet_handle_recovery_reply(ev_info, event->status, true, false);
3877                 break;
3878         default:
3879                 CERROR("Unexpected event: %d\n", event->type);
3880                 break;
3881         }
3882         if (event->unlinked) {
3883                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
3884                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
3885                 lnet_ping_buffer_decref(pbuf);
3886         }
3887 }
3888
3889 static int
3890 lnet_rsp_tracker_create(void)
3891 {
3892         struct list_head **rstqs;
3893         rstqs = lnet_create_array_of_queues();
3894
3895         if (!rstqs)
3896                 return -ENOMEM;
3897
3898         the_lnet.ln_mt_rstq = rstqs;
3899
3900         return 0;
3901 }
3902
3903 static void
3904 lnet_rsp_tracker_clean(void)
3905 {
3906         lnet_finalize_expired_responses();
3907
3908         cfs_percpt_free(the_lnet.ln_mt_rstq);
3909         the_lnet.ln_mt_rstq = NULL;
3910 }
3911
3912 int lnet_monitor_thr_start(void)
3913 {
3914         int rc = 0;
3915         struct task_struct *task;
3916
3917         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
3918                 return -EALREADY;
3919
3920         rc = lnet_resendqs_create();
3921         if (rc)
3922                 return rc;
3923
3924         rc = lnet_rsp_tracker_create();
3925         if (rc)
3926                 goto clean_queues;
3927
3928         sema_init(&the_lnet.ln_mt_signal, 0);
3929
3930         lnet_net_lock(LNET_LOCK_EX);
3931         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
3932         lnet_net_unlock(LNET_LOCK_EX);
3933         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
3934         if (IS_ERR(task)) {
3935                 rc = PTR_ERR(task);
3936                 CERROR("Can't start monitor thread: %d\n", rc);
3937                 goto clean_thread;
3938         }
3939
3940         return 0;
3941
3942 clean_thread:
3943         lnet_net_lock(LNET_LOCK_EX);
3944         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
3945         lnet_net_unlock(LNET_LOCK_EX);
3946         /* block until event callback signals exit */
3947         down(&the_lnet.ln_mt_signal);
3948         /* clean up */
3949         lnet_net_lock(LNET_LOCK_EX);
3950         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3951         lnet_net_unlock(LNET_LOCK_EX);
3952         lnet_rsp_tracker_clean();
3953         lnet_clean_local_ni_recoveryq();
3954         lnet_clean_peer_ni_recoveryq();
3955         lnet_clean_resendqs();
3956         the_lnet.ln_mt_handler = NULL;
3957         return rc;
3958 clean_queues:
3959         lnet_rsp_tracker_clean();
3960         lnet_clean_local_ni_recoveryq();
3961         lnet_clean_peer_ni_recoveryq();
3962         lnet_clean_resendqs();
3963         return rc;
3964 }
3965
3966 void lnet_monitor_thr_stop(void)
3967 {
3968         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3969                 return;
3970
3971         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
3972         lnet_net_lock(LNET_LOCK_EX);
3973         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
3974         lnet_net_unlock(LNET_LOCK_EX);
3975
3976         /* tell the monitor thread that we're shutting down */
3977         complete(&the_lnet.ln_mt_wait_complete);
3978
3979         /* block until monitor thread signals that it's done */
3980         down(&the_lnet.ln_mt_signal);
3981         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
3982
3983         /* perform cleanup tasks */
3984         lnet_rsp_tracker_clean();
3985         lnet_clean_local_ni_recoveryq();
3986         lnet_clean_peer_ni_recoveryq();
3987         lnet_clean_resendqs();
3988 }
3989
3990 void
3991 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
3992                   __u32 msg_type)
3993 {
3994         lnet_net_lock(cpt);
3995         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
3996         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
3997         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
3998         lnet_net_unlock(cpt);
3999
4000         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
4001 }
4002
4003 static void
4004 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
4005 {
4006         struct lnet_hdr *hdr = &msg->msg_hdr;
4007
4008         if (msg->msg_wanted != 0)
4009                 lnet_setpayloadbuffer(msg);
4010
4011         lnet_build_msg_event(msg, LNET_EVENT_PUT);
4012
4013         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
4014          * it back into the ACK during lnet_finalize() */
4015         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
4016                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
4017
4018         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
4019                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
4020 }
4021
4022 static int
4023 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
4024 {
4025         struct lnet_hdr         *hdr = &msg->msg_hdr;
4026         struct lnet_match_info  info;
4027         int                     rc;
4028         bool                    ready_delay;
4029
4030         /* Convert put fields to host byte order */
4031         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
4032         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
4033         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
4034
4035         /* Primary peer NID. */
4036         info.mi_id.nid  = msg->msg_initiator;
4037         info.mi_id.pid  = hdr->src_pid;
4038         info.mi_opc     = LNET_MD_OP_PUT;
4039         info.mi_portal  = hdr->msg.put.ptl_index;
4040         info.mi_rlength = hdr->payload_length;
4041         info.mi_roffset = hdr->msg.put.offset;
4042         info.mi_mbits   = hdr->msg.put.match_bits;
4043         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
4044
4045         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
4046         ready_delay = msg->msg_rx_ready_delay;
4047
4048  again:
4049         rc = lnet_ptl_match_md(&info, msg);
4050         switch (rc) {
4051         default:
4052                 LBUG();
4053
4054         case LNET_MATCHMD_OK:
4055                 lnet_recv_put(ni, msg);
4056                 return 0;
4057
4058         case LNET_MATCHMD_NONE:
4059                 if (ready_delay)
4060                         /* no eager_recv or has already called it, should
4061                          * have been attached on delayed list */
4062                         return 0;
4063
4064                 rc = lnet_ni_eager_recv(ni, msg);
4065                 if (rc == 0) {
4066                         ready_delay = true;
4067                         goto again;
4068                 }
4069                 /* fall through */
4070
4071         case LNET_MATCHMD_DROP:
4072                 CNETERR("Dropping PUT from %s portal %d match %llu"
4073                         " offset %d length %d: %d\n",
4074                         libcfs_id2str(info.mi_id), info.mi_portal,
4075                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
4076
4077                 return -ENOENT; /* -ve: OK but no match */
4078         }
4079 }
4080
4081 static int
4082 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
4083 {
4084         struct lnet_match_info info;
4085         struct lnet_hdr *hdr = &msg->msg_hdr;
4086         struct lnet_process_id source_id;
4087         struct lnet_handle_wire reply_wmd;
4088         int rc;
4089
4090         /* Convert get fields to host byte order */
4091         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
4092         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
4093         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
4094         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
4095
4096         source_id.nid = hdr->src_nid;
4097         source_id.pid = hdr->src_pid;
4098         /* Primary peer NID */
4099         info.mi_id.nid  = msg->msg_initiator;
4100         info.mi_id.pid  = hdr->src_pid;
4101         info.mi_opc     = LNET_MD_OP_GET;
4102         info.mi_portal  = hdr->msg.get.ptl_index;
4103         info.mi_rlength = hdr->msg.get.sink_length;
4104         info.mi_roffset = hdr->msg.get.src_offset;
4105         info.mi_mbits   = hdr->msg.get.match_bits;
4106         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
4107
4108         rc = lnet_ptl_match_md(&info, msg);
4109         if (rc == LNET_MATCHMD_DROP) {
4110                 CNETERR("Dropping GET from %s portal %d match %llu"
4111                         " offset %d length %d\n",
4112                         libcfs_id2str(info.mi_id), info.mi_portal,
4113                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
4114                 return -ENOENT; /* -ve: OK but no match */
4115         }
4116
4117         LASSERT(rc == LNET_MATCHMD_OK);
4118
4119         lnet_build_msg_event(msg, LNET_EVENT_GET);
4120
4121         reply_wmd = hdr->msg.get.return_wmd;
4122
4123         lnet_prep_send(msg, LNET_MSG_REPLY, source_id,
4124                        msg->msg_offset, msg->msg_wanted);
4125
4126         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
4127
4128         if (rdma_get) {
4129                 /* The LND completes the REPLY from her recv procedure */
4130                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
4131                              msg->msg_offset, msg->msg_len, msg->msg_len);
4132                 return 0;
4133         }
4134
4135         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
4136         msg->msg_receiving = 0;
4137
4138         rc = lnet_send(ni->ni_nid, msg, msg->msg_from);
4139         if (rc < 0) {
4140                 /* didn't get as far as lnet_ni_send() */
4141                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
4142                        libcfs_nid2str(ni->ni_nid),
4143                        libcfs_id2str(info.mi_id), rc);
4144
4145                 lnet_finalize(msg, rc);
4146         }
4147
4148         return 0;
4149 }
4150
4151 static int
4152 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
4153 {
4154         void *private = msg->msg_private;
4155         struct lnet_hdr *hdr = &msg->msg_hdr;
4156         struct lnet_process_id src = {0};
4157         struct lnet_libmd *md;
4158         unsigned int rlength;
4159         unsigned int mlength;
4160         int cpt;
4161
4162         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4163         lnet_res_lock(cpt);
4164
4165         src.nid = hdr->src_nid;
4166         src.pid = hdr->src_pid;
4167
4168         /* NB handles only looked up by creator (no flips) */
4169         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4170         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4171                 CNETERR("%s: Dropping REPLY from %s for %s "
4172                         "MD %#llx.%#llx\n",
4173                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4174                         (md == NULL) ? "invalid" : "inactive",
4175                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4176                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4177                 if (md != NULL && md->md_me != NULL)
4178                         CERROR("REPLY MD also attached to portal %d\n",
4179                                md->md_me->me_portal);
4180
4181                 lnet_res_unlock(cpt);
4182                 return -ENOENT; /* -ve: OK but no match */
4183         }
4184
4185         LASSERT(md->md_offset == 0);
4186
4187         rlength = hdr->payload_length;
4188         mlength = min(rlength, md->md_length);
4189
4190         if (mlength < rlength &&
4191             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4192                 CNETERR("%s: Dropping REPLY from %s length %d "
4193                         "for MD %#llx would overflow (%d)\n",
4194                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4195                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4196                         mlength);
4197                 lnet_res_unlock(cpt);
4198                 return -ENOENT; /* -ve: OK but no match */
4199         }
4200
4201         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4202                libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4203                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4204
4205         lnet_msg_attach_md(msg, md, 0, mlength);
4206
4207         if (mlength != 0)
4208                 lnet_setpayloadbuffer(msg);
4209
4210         lnet_res_unlock(cpt);
4211
4212         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4213
4214         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4215         return 0;
4216 }
4217
4218 static int
4219 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4220 {
4221         struct lnet_hdr *hdr = &msg->msg_hdr;
4222         struct lnet_process_id src = {0};
4223         struct lnet_libmd *md;
4224         int cpt;
4225
4226         src.nid = hdr->src_nid;
4227         src.pid = hdr->src_pid;
4228
4229         /* Convert ack fields to host byte order */
4230         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4231         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4232
4233         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4234         lnet_res_lock(cpt);
4235
4236         /* NB handles only looked up by creator (no flips) */
4237         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4238         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4239                 /* Don't moan; this is expected */
4240                 CDEBUG(D_NET,
4241                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4242                        libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4243                        (md == NULL) ? "invalid" : "inactive",
4244                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4245                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4246                 if (md != NULL && md->md_me != NULL)
4247                         CERROR("Source MD also attached to portal %d\n",
4248                                md->md_me->me_portal);
4249
4250                 lnet_res_unlock(cpt);
4251                 return -ENOENT;                  /* -ve! */
4252         }
4253
4254         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4255                libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4256                hdr->msg.ack.dst_wmd.wh_object_cookie);
4257
4258         lnet_msg_attach_md(msg, md, 0, 0);
4259
4260         lnet_res_unlock(cpt);
4261
4262         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4263
4264         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4265         return 0;
4266 }
4267
4268 /**
4269  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4270  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4271  * \retval -ve                  error code
4272  */
4273 int
4274 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4275 {
4276         int     rc = 0;
4277
4278         if (!the_lnet.ln_routing)
4279                 return -ECANCELED;
4280
4281         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4282             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4283                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4284                         msg->msg_rx_ready_delay = 1;
4285                 } else {
4286                         lnet_net_unlock(msg->msg_rx_cpt);
4287                         rc = lnet_ni_eager_recv(ni, msg);
4288                         lnet_net_lock(msg->msg_rx_cpt);
4289                 }
4290         }
4291
4292         if (rc == 0)
4293                 rc = lnet_post_routed_recv_locked(msg, 0);
4294         return rc;
4295 }
4296
4297 int
4298 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4299 {
4300         int     rc;
4301
4302         switch (msg->msg_type) {
4303         case LNET_MSG_ACK:
4304                 rc = lnet_parse_ack(ni, msg);
4305                 break;
4306         case LNET_MSG_PUT:
4307                 rc = lnet_parse_put(ni, msg);
4308                 break;
4309         case LNET_MSG_GET:
4310                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4311                 break;
4312         case LNET_MSG_REPLY:
4313                 rc = lnet_parse_reply(ni, msg);
4314                 break;
4315         default: /* prevent an unused label if !kernel */
4316                 LASSERT(0);
4317                 return -EPROTO;
4318         }
4319
4320         LASSERT(rc == 0 || rc == -ENOENT);
4321         return rc;
4322 }
4323
4324 char *
4325 lnet_msgtyp2str (int type)
4326 {
4327         switch (type) {
4328         case LNET_MSG_ACK:
4329                 return ("ACK");
4330         case LNET_MSG_PUT:
4331                 return ("PUT");
4332         case LNET_MSG_GET:
4333                 return ("GET");
4334         case LNET_MSG_REPLY:
4335                 return ("REPLY");
4336         case LNET_MSG_HELLO:
4337                 return ("HELLO");
4338         default:
4339                 return ("<UNKNOWN>");
4340         }
4341 }
4342
4343 int
4344 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
4345            void *private, int rdma_req)
4346 {
4347         struct lnet_peer_ni *lpni;
4348         struct lnet_msg *msg;
4349         __u32 payload_length;
4350         lnet_pid_t dest_pid;
4351         lnet_nid_t dest_nid;
4352         lnet_nid_t src_nid;
4353         bool push = false;
4354         int for_me;
4355         __u32 type;
4356         int rc = 0;
4357         int cpt;
4358
4359         LASSERT (!in_interrupt ());
4360
4361         type = le32_to_cpu(hdr->type);
4362         src_nid = le64_to_cpu(hdr->src_nid);
4363         dest_nid = le64_to_cpu(hdr->dest_nid);
4364         dest_pid = le32_to_cpu(hdr->dest_pid);
4365         payload_length = le32_to_cpu(hdr->payload_length);
4366
4367         for_me = (ni->ni_nid == dest_nid);
4368         cpt = lnet_cpt_of_nid(from_nid, ni);
4369
4370         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4371                 libcfs_nid2str(dest_nid),
4372                 libcfs_nid2str(ni->ni_nid),
4373                 libcfs_nid2str(src_nid),
4374                 lnet_msgtyp2str(type),
4375                 (for_me) ? "for me" : "routed");
4376
4377         switch (type) {
4378         case LNET_MSG_ACK:
4379         case LNET_MSG_GET:
4380                 if (payload_length > 0) {
4381                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4382                                libcfs_nid2str(from_nid),
4383                                libcfs_nid2str(src_nid),
4384                                lnet_msgtyp2str(type), payload_length);
4385                         return -EPROTO;
4386                 }
4387                 break;
4388
4389         case LNET_MSG_PUT:
4390         case LNET_MSG_REPLY:
4391                 if (payload_length >
4392                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4393                         CERROR("%s, src %s: bad %s payload %d "
4394                                "(%d max expected)\n",
4395                                libcfs_nid2str(from_nid),
4396                                libcfs_nid2str(src_nid),
4397                                lnet_msgtyp2str(type),
4398                                payload_length,
4399                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4400                         return -EPROTO;
4401                 }
4402                 break;
4403
4404         default:
4405                 CERROR("%s, src %s: Bad message type 0x%x\n",
4406                        libcfs_nid2str(from_nid),
4407                        libcfs_nid2str(src_nid), type);
4408                 return -EPROTO;
4409         }
4410
4411         if (the_lnet.ln_routing &&
4412             ni->ni_net->net_last_alive != ktime_get_real_seconds()) {
4413                 lnet_ni_lock(ni);
4414                 spin_lock(&ni->ni_net->net_lock);
4415                 ni->ni_net->net_last_alive = ktime_get_real_seconds();
4416                 spin_unlock(&ni->ni_net->net_lock);
4417                 push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP);
4418                 lnet_ni_unlock(ni);
4419         }
4420
4421         if (push)
4422                 lnet_push_update_to_peers(1);
4423
4424         /* Regard a bad destination NID as a protocol error.  Senders should
4425          * know what they're doing; if they don't they're misconfigured, buggy
4426          * or malicious so we chop them off at the knees :) */
4427
4428         if (!for_me) {
4429                 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
4430                         /* should have gone direct */
4431                         CERROR("%s, src %s: Bad dest nid %s "
4432                                "(should have been sent direct)\n",
4433                                 libcfs_nid2str(from_nid),
4434                                 libcfs_nid2str(src_nid),
4435                                 libcfs_nid2str(dest_nid));
4436                         return -EPROTO;
4437                 }
4438
4439                 if (lnet_islocalnid(dest_nid)) {
4440                         /* dest is another local NI; sender should have used
4441                          * this node's NID on its own network */
4442                         CERROR("%s, src %s: Bad dest nid %s "
4443                                "(it's my nid but on a different network)\n",
4444                                 libcfs_nid2str(from_nid),
4445                                 libcfs_nid2str(src_nid),
4446                                 libcfs_nid2str(dest_nid));
4447                         return -EPROTO;
4448                 }
4449
4450                 if (rdma_req && type == LNET_MSG_GET) {
4451                         CERROR("%s, src %s: Bad optimized GET for %s "
4452                                "(final destination must be me)\n",
4453                                 libcfs_nid2str(from_nid),
4454                                 libcfs_nid2str(src_nid),
4455                                 libcfs_nid2str(dest_nid));
4456                         return -EPROTO;
4457                 }
4458
4459                 if (!the_lnet.ln_routing) {
4460                         CERROR("%s, src %s: Dropping message for %s "
4461                                "(routing not enabled)\n",
4462                                 libcfs_nid2str(from_nid),
4463                                 libcfs_nid2str(src_nid),
4464                                 libcfs_nid2str(dest_nid));
4465                         goto drop;
4466                 }
4467         }
4468
4469         /* Message looks OK; we're not going to return an error, so we MUST
4470          * call back lnd_recv() come what may... */
4471
4472         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4473             fail_peer(src_nid, 0)) {                    /* shall we now? */
4474                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4475                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4476                        lnet_msgtyp2str(type));
4477                 goto drop;
4478         }
4479
4480         if (!list_empty(&the_lnet.ln_drop_rules) &&
4481             lnet_drop_rule_match(hdr, ni->ni_nid, NULL)) {
4482                 CDEBUG(D_NET,
4483                        "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
4484                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4485                        libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
4486                 goto drop;
4487         }
4488
4489         if (lnet_drop_asym_route && for_me &&
4490             LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
4491                 struct lnet_net *net;
4492                 struct lnet_remotenet *rnet;
4493                 bool found = true;
4494
4495                 /* we are dealing with a routed message,
4496                  * so see if route to reach src_nid goes through from_nid
4497                  */
4498                 lnet_net_lock(cpt);
4499                 net = lnet_get_net_locked(LNET_NIDNET(ni->ni_nid));
4500                 if (!net) {
4501                         lnet_net_unlock(cpt);
4502                         CERROR("net %s not found\n",
4503                                libcfs_net2str(LNET_NIDNET(ni->ni_nid)));
4504                         return -EPROTO;
4505                 }
4506
4507                 rnet = lnet_find_rnet_locked(LNET_NIDNET(src_nid));
4508                 if (rnet) {
4509                         struct lnet_peer *gw = NULL;
4510                         struct lnet_peer_ni *lpni = NULL;
4511                         struct lnet_route *route;
4512
4513                         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
4514                                 found = false;
4515                                 gw = route->lr_gateway;
4516                                 if (route->lr_lnet != net->net_id)
4517                                         continue;
4518                                 /*
4519                                  * if the nid is one of the gateway's NIDs
4520                                  * then this is a valid gateway
4521                                  */
4522                                 while ((lpni = lnet_get_next_peer_ni_locked(gw,
4523                                                 NULL, lpni)) != NULL) {
4524                                         if (lpni->lpni_nid == from_nid) {
4525                                                 found = true;
4526                                                 break;
4527                                         }
4528                                 }
4529                         }
4530                 }
4531                 lnet_net_unlock(cpt);
4532                 if (!found) {
4533                         /* we would not use from_nid to route a message to
4534                          * src_nid
4535                          * => asymmetric routing detected but forbidden
4536                          */
4537                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4538                                libcfs_nid2str(from_nid),
4539                                libcfs_nid2str(src_nid), lnet_msgtyp2str(type));
4540                         goto drop;
4541                 }
4542         }
4543
4544         msg = lnet_msg_alloc();
4545         if (msg == NULL) {
4546                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4547                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4548                        lnet_msgtyp2str(type));
4549                 goto drop;
4550         }
4551
4552         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4553          * pointers NULL etc */
4554
4555         msg->msg_type = type;
4556         msg->msg_private = private;
4557         msg->msg_receiving = 1;
4558         msg->msg_rdma_get = rdma_req;
4559         msg->msg_len = msg->msg_wanted = payload_length;
4560         msg->msg_offset = 0;
4561         msg->msg_hdr = *hdr;
4562         /* for building message event */
4563         msg->msg_from = from_nid;
4564         if (!for_me) {
4565                 msg->msg_target.pid     = dest_pid;
4566                 msg->msg_target.nid     = dest_nid;
4567                 msg->msg_routing        = 1;
4568
4569         } else {
4570                 /* convert common msg->hdr fields to host byteorder */
4571                 msg->msg_hdr.type       = type;
4572                 msg->msg_hdr.src_nid    = src_nid;
4573                 msg->msg_hdr.src_pid    = le32_to_cpu(msg->msg_hdr.src_pid);
4574                 msg->msg_hdr.dest_nid   = dest_nid;
4575                 msg->msg_hdr.dest_pid   = dest_pid;
4576                 msg->msg_hdr.payload_length = payload_length;
4577         }
4578
4579         lnet_net_lock(cpt);
4580         lpni = lnet_nid2peerni_locked(from_nid, ni->ni_nid, cpt);
4581         if (IS_ERR(lpni)) {
4582                 lnet_net_unlock(cpt);
4583                 CERROR("%s, src %s: Dropping %s "
4584                        "(error %ld looking up sender)\n",
4585                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4586                        lnet_msgtyp2str(type), PTR_ERR(lpni));
4587                 lnet_msg_free(msg);
4588                 if (rc == -ESHUTDOWN)
4589                         /* We are shutting down.  Don't do anything more */
4590                         return 0;
4591                 goto drop;
4592         }
4593
4594         if (the_lnet.ln_routing)
4595                 lpni->lpni_last_alive = ktime_get_seconds();
4596
4597         msg->msg_rxpeer = lpni;
4598         msg->msg_rxni = ni;
4599         lnet_ni_addref_locked(ni, cpt);
4600         /* Multi-Rail: Primary NID of source. */
4601         msg->msg_initiator = lnet_peer_primary_nid_locked(src_nid);
4602
4603         /*
4604          * mark the status of this lpni as UP since we received a message
4605          * from it. The ping response reports back the ns_status which is
4606          * marked on the remote as up or down and we cache it here.
4607          */
4608         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
4609
4610         lnet_msg_commit(msg, cpt);
4611
4612         /* message delay simulation */
4613         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
4614                      lnet_delay_rule_match_locked(hdr, msg))) {
4615                 lnet_net_unlock(cpt);
4616                 return 0;
4617         }
4618
4619         if (!for_me) {
4620                 rc = lnet_parse_forward_locked(ni, msg);
4621                 lnet_net_unlock(cpt);
4622
4623                 if (rc < 0)
4624                         goto free_drop;
4625
4626                 if (rc == LNET_CREDIT_OK) {
4627                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
4628                                      0, payload_length, payload_length);
4629                 }
4630                 return 0;
4631         }
4632
4633         lnet_net_unlock(cpt);
4634
4635         rc = lnet_parse_local(ni, msg);
4636         if (rc != 0)
4637                 goto free_drop;
4638         return 0;
4639
4640  free_drop:
4641         LASSERT(msg->msg_md == NULL);
4642         lnet_finalize(msg, rc);
4643
4644  drop:
4645         lnet_drop_message(ni, cpt, private, payload_length, type);
4646         return 0;
4647 }
4648 EXPORT_SYMBOL(lnet_parse);
4649
4650 void
4651 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
4652 {
4653         while (!list_empty(head)) {
4654                 struct lnet_process_id id = {0};
4655                 struct lnet_msg *msg;
4656
4657                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4658                 list_del(&msg->msg_list);
4659
4660                 id.nid = msg->msg_hdr.src_nid;
4661                 id.pid = msg->msg_hdr.src_pid;
4662
4663                 LASSERT(msg->msg_md == NULL);
4664                 LASSERT(msg->msg_rx_delayed);
4665                 LASSERT(msg->msg_rxpeer != NULL);
4666                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4667
4668                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
4669                       " offset %d length %d: %s\n",
4670                       libcfs_id2str(id),
4671                       msg->msg_hdr.msg.put.ptl_index,
4672                       msg->msg_hdr.msg.put.match_bits,
4673                       msg->msg_hdr.msg.put.offset,
4674                       msg->msg_hdr.payload_length, reason);
4675
4676                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
4677                  * called lnet_drop_message(), so I just hang onto msg as well
4678                  * until that's done */
4679
4680                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
4681                                   msg->msg_private, msg->msg_len,
4682                                   msg->msg_type);
4683
4684                 msg->msg_no_resend = true;
4685                 /*
4686                  * NB: message will not generate event because w/o attached MD,
4687                  * but we still should give error code so lnet_msg_decommit()
4688                  * can skip counters operations and other checks.
4689                  */
4690                 lnet_finalize(msg, -ENOENT);
4691         }
4692 }
4693
4694 void
4695 lnet_recv_delayed_msg_list(struct list_head *head)
4696 {
4697         while (!list_empty(head)) {
4698                 struct lnet_msg *msg;
4699                 struct lnet_process_id id;
4700
4701                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4702                 list_del(&msg->msg_list);
4703
4704                 /* md won't disappear under me, since each msg
4705                  * holds a ref on it */
4706
4707                 id.nid = msg->msg_hdr.src_nid;
4708                 id.pid = msg->msg_hdr.src_pid;
4709
4710                 LASSERT(msg->msg_rx_delayed);
4711                 LASSERT(msg->msg_md != NULL);
4712                 LASSERT(msg->msg_rxpeer != NULL);
4713                 LASSERT(msg->msg_rxni != NULL);
4714                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4715
4716                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
4717                        "match %llu offset %d length %d.\n",
4718                         libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
4719                         msg->msg_hdr.msg.put.match_bits,
4720                         msg->msg_hdr.msg.put.offset,
4721                         msg->msg_hdr.payload_length);
4722
4723                 lnet_recv_put(msg->msg_rxni, msg);
4724         }
4725 }
4726
4727 static void
4728 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
4729                         struct lnet_libmd *md, struct lnet_handle_md mdh)
4730 {
4731         s64 timeout_ns;
4732         struct lnet_rsp_tracker *local_rspt;
4733
4734         /*
4735          * MD has a refcount taken by message so it's not going away.
4736          * The MD however can be looked up. We need to secure the access
4737          * to the md_rspt_ptr by taking the res_lock.
4738          * The rspt can be accessed without protection up to when it gets
4739          * added to the list.
4740          */
4741
4742         lnet_res_lock(cpt);
4743         local_rspt = md->md_rspt_ptr;
4744         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
4745         if (local_rspt != NULL) {
4746                 /*
4747                  * we already have an rspt attached to the md, so we'll
4748                  * update the deadline on that one.
4749                  */
4750                 lnet_rspt_free(rspt, cpt);
4751         } else {
4752                 /* new md */
4753                 rspt->rspt_mdh = mdh;
4754                 rspt->rspt_cpt = cpt;
4755                 /* store the rspt so we can access it when we get the REPLY */
4756                 md->md_rspt_ptr = rspt;
4757                 local_rspt = rspt;
4758         }
4759         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
4760
4761         /*
4762          * add to the list of tracked responses. It's added to tail of the
4763          * list in order to expire all the older entries first.
4764          */
4765         lnet_net_lock(cpt);
4766         list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
4767         lnet_net_unlock(cpt);
4768         lnet_res_unlock(cpt);
4769 }
4770
4771 /**
4772  * Initiate an asynchronous PUT operation.
4773  *
4774  * There are several events associated with a PUT: completion of the send on
4775  * the initiator node (LNET_EVENT_SEND), and when the send completes
4776  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
4777  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
4778  * used at the target node to indicate the completion of incoming data
4779  * delivery.
4780  *
4781  * The local events will be logged in the EQ associated with the MD pointed to
4782  * by \a mdh handle. Using a MD without an associated EQ results in these
4783  * events being discarded. In this case, the caller must have another
4784  * mechanism (e.g., a higher level protocol) for determining when it is safe
4785  * to modify the memory region associated with the MD.
4786  *
4787  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
4788  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
4789  *
4790  * \param self Indicates the NID of a local interface through which to send
4791  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
4792  * \param mdh A handle for the MD that describes the memory to be sent. The MD
4793  * must be "free floating" (See LNetMDBind()).
4794  * \param ack Controls whether an acknowledgment is requested.
4795  * Acknowledgments are only sent when they are requested by the initiating
4796  * process and the target MD enables them.
4797  * \param target A process identifier for the target process.
4798  * \param portal The index in the \a target's portal table.
4799  * \param match_bits The match bits to use for MD selection at the target
4800  * process.
4801  * \param offset The offset into the target MD (only used when the target
4802  * MD has the LNET_MD_MANAGE_REMOTE option set).
4803  * \param hdr_data 64 bits of user data that can be included in the message
4804  * header. This data is written to an event queue entry at the target if an
4805  * EQ is present on the matching MD.
4806  *
4807  * \retval  0      Success, and only in this case events will be generated
4808  * and logged to EQ (if it exists).
4809  * \retval -EIO    Simulated failure.
4810  * \retval -ENOMEM Memory allocation failure.
4811  * \retval -ENOENT Invalid MD object.
4812  *
4813  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
4814  */
4815 int
4816 LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
4817         struct lnet_process_id target, unsigned int portal,
4818         __u64 match_bits, unsigned int offset,
4819         __u64 hdr_data)
4820 {
4821         struct lnet_msg *msg;
4822         struct lnet_libmd *md;
4823         int cpt;
4824         int rc;
4825         struct lnet_rsp_tracker *rspt = NULL;
4826
4827         LASSERT(the_lnet.ln_refcount > 0);
4828
4829         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4830             fail_peer(target.nid, 1)) {                 /* shall we now? */
4831                 CERROR("Dropping PUT to %s: simulated failure\n",
4832                        libcfs_id2str(target));
4833                 return -EIO;
4834         }
4835
4836         msg = lnet_msg_alloc();
4837         if (msg == NULL) {
4838                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
4839                        libcfs_id2str(target));
4840                 return -ENOMEM;
4841         }
4842         msg->msg_vmflush = !!(current->flags & PF_MEMALLOC);
4843
4844         cpt = lnet_cpt_of_cookie(mdh.cookie);
4845
4846         if (ack == LNET_ACK_REQ) {
4847                 rspt = lnet_rspt_alloc(cpt);
4848                 if (!rspt) {
4849                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
4850                                 libcfs_id2str(target));
4851                         return -ENOMEM;
4852                 }
4853                 INIT_LIST_HEAD(&rspt->rspt_on_list);
4854         }
4855
4856         lnet_res_lock(cpt);
4857
4858         md = lnet_handle2md(&mdh);
4859         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4860                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
4861                        match_bits, portal, libcfs_id2str(target),
4862                        md == NULL ? -1 : md->md_threshold);
4863                 if (md != NULL && md->md_me != NULL)
4864                         CERROR("Source MD also attached to portal %d\n",
4865                                md->md_me->me_portal);
4866                 lnet_res_unlock(cpt);
4867
4868                 if (rspt)
4869                         lnet_rspt_free(rspt, cpt);
4870
4871                 lnet_msg_free(msg);
4872                 return -ENOENT;
4873         }
4874
4875         CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
4876
4877         lnet_msg_attach_md(msg, md, 0, 0);
4878
4879         lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
4880
4881         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
4882         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
4883         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
4884         msg->msg_hdr.msg.put.hdr_data = hdr_data;
4885
4886         /* NB handles only looked up by creator (no flips) */
4887         if (ack == LNET_ACK_REQ) {
4888                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
4889                         the_lnet.ln_interface_cookie;
4890                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
4891                         md->md_lh.lh_cookie;
4892         } else {
4893                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
4894                         LNET_WIRE_HANDLE_COOKIE_NONE;
4895                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
4896                         LNET_WIRE_HANDLE_COOKIE_NONE;
4897         }
4898
4899         lnet_res_unlock(cpt);
4900
4901         lnet_build_msg_event(msg, LNET_EVENT_SEND);
4902
4903         if (rspt && lnet_response_tracking_enabled(LNET_MSG_PUT,
4904                                                    md->md_options))
4905                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
4906         else if (rspt)
4907                 lnet_rspt_free(rspt, cpt);
4908
4909         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
4910                                  CFS_FAIL_ONCE))
4911                 rc = -EIO;
4912         else
4913                 rc = lnet_send(self, msg, LNET_NID_ANY);
4914
4915         if (rc != 0) {
4916                 CNETERR("Error sending PUT to %s: %d\n",
4917                         libcfs_id2str(target), rc);
4918                 msg->msg_no_resend = true;
4919                 lnet_finalize(msg, rc);
4920         }
4921
4922         /* completion will be signalled by an event */
4923         return 0;
4924 }
4925 EXPORT_SYMBOL(LNetPut);
4926
4927 /*
4928  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
4929  * returns a msg for the LND to pass to lnet_finalize() when the sink
4930  * data has been received.
4931  *
4932  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
4933  * lnet_finalize() is called on it, so the LND must call this first
4934  */
4935 struct lnet_msg *
4936 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
4937 {
4938         struct lnet_msg *msg = lnet_msg_alloc();
4939         struct lnet_libmd *getmd = getmsg->msg_md;
4940         struct lnet_process_id peer_id = getmsg->msg_target;
4941         int cpt;
4942
4943         LASSERT(!getmsg->msg_target_is_router);
4944         LASSERT(!getmsg->msg_routing);
4945
4946         if (msg == NULL) {
4947                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
4948                        libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
4949                 goto drop;
4950         }
4951
4952         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
4953         lnet_res_lock(cpt);
4954
4955         LASSERT(getmd->md_refcount > 0);
4956
4957         if (getmd->md_threshold == 0) {
4958                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
4959                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
4960                         getmd);
4961                 lnet_res_unlock(cpt);
4962                 goto drop;
4963         }
4964
4965         LASSERT(getmd->md_offset == 0);
4966
4967         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
4968                libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
4969
4970         /* setup information for lnet_build_msg_event */
4971         msg->msg_initiator = getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
4972         msg->msg_from = peer_id.nid;
4973         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
4974         msg->msg_hdr.src_nid = peer_id.nid;
4975         msg->msg_hdr.payload_length = getmd->md_length;
4976         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
4977
4978         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
4979         lnet_res_unlock(cpt);
4980
4981         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
4982
4983         lnet_net_lock(cpt);
4984         lnet_msg_commit(msg, cpt);
4985         lnet_net_unlock(cpt);
4986
4987         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4988
4989         return msg;
4990
4991  drop:
4992         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
4993
4994         lnet_net_lock(cpt);
4995         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
4996         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4997         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
4998                 getmd->md_length;
4999         lnet_net_unlock(cpt);
5000
5001         if (msg != NULL)
5002                 lnet_msg_free(msg);
5003
5004         return NULL;
5005 }
5006 EXPORT_SYMBOL(lnet_create_reply_msg);
5007
5008 void
5009 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
5010                        unsigned int len)
5011 {
5012         /* Set the REPLY length, now the RDMA that elides the REPLY message has
5013          * completed and I know it. */
5014         LASSERT(reply != NULL);
5015         LASSERT(reply->msg_type == LNET_MSG_GET);
5016         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
5017
5018         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
5019          * the end of my buffer, I might as well be dead. */
5020         LASSERT(len <= reply->msg_ev.mlength);
5021
5022         reply->msg_ev.mlength = len;
5023 }
5024 EXPORT_SYMBOL(lnet_set_reply_msg_len);
5025
5026 /**
5027  * Initiate an asynchronous GET operation.
5028  *
5029  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
5030  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
5031  * the target node in the REPLY has been written to local MD.
5032  *
5033  * On the target node, an LNET_EVENT_GET is logged when the GET request
5034  * arrives and is accepted into a MD.
5035  *
5036  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
5037  * \param mdh A handle for the MD that describes the memory into which the
5038  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
5039  *
5040  * \retval  0      Success, and only in this case events will be generated
5041  * and logged to EQ (if it exists) of the MD.
5042  * \retval -EIO    Simulated failure.
5043  * \retval -ENOMEM Memory allocation failure.
5044  * \retval -ENOENT Invalid MD object.
5045  */
5046 int
5047 LNetGet(lnet_nid_t self, struct lnet_handle_md mdh,
5048         struct lnet_process_id target, unsigned int portal,
5049         __u64 match_bits, unsigned int offset, bool recovery)
5050 {
5051         struct lnet_msg *msg;
5052         struct lnet_libmd *md;
5053         struct lnet_rsp_tracker *rspt;
5054         int cpt;
5055         int rc;
5056
5057         LASSERT(the_lnet.ln_refcount > 0);
5058
5059         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5060             fail_peer(target.nid, 1))                   /* shall we now? */
5061         {
5062                 CERROR("Dropping GET to %s: simulated failure\n",
5063                        libcfs_id2str(target));
5064                 return -EIO;
5065         }
5066
5067         msg = lnet_msg_alloc();
5068         if (!msg) {
5069                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
5070                        libcfs_id2str(target));
5071                 return -ENOMEM;
5072         }
5073
5074         cpt = lnet_cpt_of_cookie(mdh.cookie);
5075
5076         rspt = lnet_rspt_alloc(cpt);
5077         if (!rspt) {
5078                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
5079                        libcfs_id2str(target));
5080                 return -ENOMEM;
5081         }
5082         INIT_LIST_HEAD(&rspt->rspt_on_list);
5083
5084         msg->msg_recovery = recovery;
5085
5086         lnet_res_lock(cpt);
5087
5088         md = lnet_handle2md(&mdh);
5089         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5090                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
5091                        match_bits, portal, libcfs_id2str(target),
5092                        md == NULL ? -1 : md->md_threshold);
5093                 if (md != NULL && md->md_me != NULL)
5094                         CERROR("REPLY MD also attached to portal %d\n",
5095                                md->md_me->me_portal);
5096
5097                 lnet_res_unlock(cpt);
5098
5099                 lnet_msg_free(msg);
5100                 lnet_rspt_free(rspt, cpt);
5101                 return -ENOENT;
5102         }
5103
5104         CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
5105
5106         lnet_msg_attach_md(msg, md, 0, 0);
5107
5108         lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
5109
5110         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5111         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5112         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5113         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5114
5115         /* NB handles only looked up by creator (no flips) */
5116         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5117                 the_lnet.ln_interface_cookie;
5118         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5119                 md->md_lh.lh_cookie;
5120
5121         lnet_res_unlock(cpt);
5122
5123         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5124
5125         if (lnet_response_tracking_enabled(LNET_MSG_GET, md->md_options))
5126                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5127         else
5128                 lnet_rspt_free(rspt, cpt);
5129
5130         rc = lnet_send(self, msg, LNET_NID_ANY);
5131         if (rc < 0) {
5132                 CNETERR("Error sending GET to %s: %d\n",
5133                         libcfs_id2str(target), rc);
5134                 msg->msg_no_resend = true;
5135                 lnet_finalize(msg, rc);
5136         }
5137
5138         /* completion will be signalled by an event */
5139         return 0;
5140 }
5141 EXPORT_SYMBOL(LNetGet);
5142
5143 /**
5144  * Calculate distance to node at \a dstnid.
5145  *
5146  * \param dstnid Target NID.
5147  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5148  * is saved here.
5149  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5150  * here.
5151  *
5152  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5153  * local_nid_dist_zero is set, which is the default.
5154  * \retval positives Distance to target NID, i.e. number of hops plus one.
5155  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5156  */
5157 int
5158 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
5159 {
5160         struct list_head *e;
5161         struct lnet_ni *ni = NULL;
5162         struct lnet_remotenet *rnet;
5163         __u32 dstnet = LNET_NIDNET(dstnid);
5164         int hops;
5165         int cpt;
5166         __u32 order = 2;
5167         struct list_head *rn_list;
5168
5169         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5170          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5171          * keep order 0 free for 0@lo and order 1 free for a local NID
5172          * match */
5173
5174         LASSERT(the_lnet.ln_refcount > 0);
5175
5176         cpt = lnet_net_lock_current();
5177
5178         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5179                 if (ni->ni_nid == dstnid) {
5180                         if (srcnidp != NULL)
5181                                 *srcnidp = dstnid;
5182                         if (orderp != NULL) {
5183                                 if (dstnid == LNET_NID_LO_0)
5184                                         *orderp = 0;
5185                                 else
5186                                         *orderp = 1;
5187                         }
5188                         lnet_net_unlock(cpt);
5189
5190                         return local_nid_dist_zero ? 0 : 1;
5191                 }
5192
5193                 if (LNET_NIDNET(ni->ni_nid) == dstnet) {
5194                         /* Check if ni was originally created in
5195                          * current net namespace.
5196                          * If not, assign order above 0xffff0000,
5197                          * to make this ni not a priority. */
5198                         if (current->nsproxy &&
5199                             !net_eq(ni->ni_net_ns, current->nsproxy->net_ns))
5200                                         order += 0xffff0000;
5201                         if (srcnidp != NULL)
5202                                 *srcnidp = ni->ni_nid;
5203                         if (orderp != NULL)
5204                                 *orderp = order;
5205                         lnet_net_unlock(cpt);
5206                         return 1;
5207                 }
5208
5209                 order++;
5210         }
5211
5212         rn_list = lnet_net2rnethash(dstnet);
5213         list_for_each(e, rn_list) {
5214                 rnet = list_entry(e, struct lnet_remotenet, lrn_list);
5215
5216                 if (rnet->lrn_net == dstnet) {
5217                         struct lnet_route *route;
5218                         struct lnet_route *shortest = NULL;
5219                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5220                         __u32 route_hops;
5221
5222                         LASSERT(!list_empty(&rnet->lrn_routes));
5223
5224                         list_for_each_entry(route, &rnet->lrn_routes,
5225                                             lr_list) {
5226                                 route_hops = route->lr_hops;
5227                                 if (route_hops == LNET_UNDEFINED_HOPS)
5228                                         route_hops = 1;
5229                                 if (shortest == NULL ||
5230                                     route_hops < shortest_hops) {
5231                                         shortest = route;
5232                                         shortest_hops = route_hops;
5233                                 }
5234                         }
5235
5236                         LASSERT(shortest != NULL);
5237                         hops = shortest_hops;
5238                         if (srcnidp != NULL) {
5239                                 struct lnet_net *net;
5240                                 net = lnet_get_net_locked(shortest->lr_lnet);
5241                                 LASSERT(net);
5242                                 ni = lnet_get_next_ni_locked(net, NULL);
5243                                 *srcnidp = ni->ni_nid;
5244                         }
5245                         if (orderp != NULL)
5246                                 *orderp = order;
5247                         lnet_net_unlock(cpt);
5248                         return hops + 1;
5249                 }
5250                 order++;
5251         }
5252
5253         lnet_net_unlock(cpt);
5254         return -EHOSTUNREACH;
5255 }
5256 EXPORT_SYMBOL(LNetDist);