Whamcloud - gitweb
LU-9679 modules: convert MIN/MAX to kernel style
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/lnet/lib-move.c
33  *
34  * Data movement routines
35  */
36
37 #define DEBUG_SUBSYSTEM S_LNET
38
39 #include <linux/pagemap.h>
40
41 #include <lnet/lib-lnet.h>
42 #include <linux/nsproxy.h>
43 #include <net/net_namespace.h>
44
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48
49 struct lnet_send_data {
50         struct lnet_ni *sd_best_ni;
51         struct lnet_peer_ni *sd_best_lpni;
52         struct lnet_peer_ni *sd_final_dst_lpni;
53         struct lnet_peer *sd_peer;
54         struct lnet_peer *sd_gw_peer;
55         struct lnet_peer_ni *sd_gw_lpni;
56         struct lnet_peer_net *sd_peer_net;
57         struct lnet_msg *sd_msg;
58         lnet_nid_t sd_dst_nid;
59         lnet_nid_t sd_src_nid;
60         lnet_nid_t sd_rtr_nid;
61         int sd_cpt;
62         int sd_md_cpt;
63         __u32 sd_send_case;
64 };
65
66 static inline struct lnet_comm_count *
67 get_stats_counts(struct lnet_element_stats *stats,
68                  enum lnet_stats_type stats_type)
69 {
70         switch (stats_type) {
71         case LNET_STATS_TYPE_SEND:
72                 return &stats->el_send_stats;
73         case LNET_STATS_TYPE_RECV:
74                 return &stats->el_recv_stats;
75         case LNET_STATS_TYPE_DROP:
76                 return &stats->el_drop_stats;
77         default:
78                 CERROR("Unknown stats type\n");
79         }
80
81         return NULL;
82 }
83
84 void lnet_incr_stats(struct lnet_element_stats *stats,
85                      enum lnet_msg_type msg_type,
86                      enum lnet_stats_type stats_type)
87 {
88         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
89         if (!counts)
90                 return;
91
92         switch (msg_type) {
93         case LNET_MSG_ACK:
94                 atomic_inc(&counts->co_ack_count);
95                 break;
96         case LNET_MSG_PUT:
97                 atomic_inc(&counts->co_put_count);
98                 break;
99         case LNET_MSG_GET:
100                 atomic_inc(&counts->co_get_count);
101                 break;
102         case LNET_MSG_REPLY:
103                 atomic_inc(&counts->co_reply_count);
104                 break;
105         case LNET_MSG_HELLO:
106                 atomic_inc(&counts->co_hello_count);
107                 break;
108         default:
109                 CERROR("There is a BUG in the code. Unknown message type\n");
110                 break;
111         }
112 }
113
114 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
115                      enum lnet_stats_type stats_type)
116 {
117         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
118         if (!counts)
119                 return 0;
120
121         return (atomic_read(&counts->co_ack_count) +
122                 atomic_read(&counts->co_put_count) +
123                 atomic_read(&counts->co_get_count) +
124                 atomic_read(&counts->co_reply_count) +
125                 atomic_read(&counts->co_hello_count));
126 }
127
128 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
129                                 struct lnet_comm_count *counts)
130 {
131         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
132         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
133         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
134         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
135         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
136 }
137
138 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
139                               struct lnet_element_stats *stats)
140 {
141         struct lnet_comm_count *counts;
142
143         LASSERT(msg_stats);
144         LASSERT(stats);
145
146         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
147         if (!counts)
148                 return;
149         assign_stats(&msg_stats->im_send_stats, counts);
150
151         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
152         if (!counts)
153                 return;
154         assign_stats(&msg_stats->im_recv_stats, counts);
155
156         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
157         if (!counts)
158                 return;
159         assign_stats(&msg_stats->im_drop_stats, counts);
160 }
161
162 int
163 lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
164 {
165         struct lnet_test_peer *tp;
166         struct list_head *el;
167         struct list_head *next;
168         LIST_HEAD(cull);
169
170         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
171         if (threshold != 0) {
172                 /* Adding a new entry */
173                 LIBCFS_ALLOC(tp, sizeof(*tp));
174                 if (tp == NULL)
175                         return -ENOMEM;
176
177                 tp->tp_nid = nid;
178                 tp->tp_threshold = threshold;
179
180                 lnet_net_lock(0);
181                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
182                 lnet_net_unlock(0);
183                 return 0;
184         }
185
186         lnet_net_lock(0);
187
188         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
189                 tp = list_entry(el, struct lnet_test_peer, tp_list);
190
191                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
192                     nid == LNET_NID_ANY ||      /* removing all entries */
193                     tp->tp_nid == nid) {        /* matched this one */
194                         list_move(&tp->tp_list, &cull);
195                 }
196         }
197
198         lnet_net_unlock(0);
199
200         while (!list_empty(&cull)) {
201                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
202
203                 list_del(&tp->tp_list);
204                 LIBCFS_FREE(tp, sizeof(*tp));
205         }
206         return 0;
207 }
208
209 static int
210 fail_peer (lnet_nid_t nid, int outgoing)
211 {
212         struct lnet_test_peer *tp;
213         struct list_head *el;
214         struct list_head *next;
215         LIST_HEAD(cull);
216         int fail = 0;
217
218         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
219         lnet_net_lock(0);
220
221         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
222                 tp = list_entry(el, struct lnet_test_peer, tp_list);
223
224                 if (tp->tp_threshold == 0) {
225                         /* zombie entry */
226                         if (outgoing) {
227                                 /* only cull zombies on outgoing tests,
228                                  * since we may be at interrupt priority on
229                                  * incoming messages. */
230                                 list_move(&tp->tp_list, &cull);
231                         }
232                         continue;
233                 }
234
235                 if (tp->tp_nid == LNET_NID_ANY ||       /* fail every peer */
236                     nid == tp->tp_nid) {                /* fail this peer */
237                         fail = 1;
238
239                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
240                                 tp->tp_threshold--;
241                                 if (outgoing &&
242                                     tp->tp_threshold == 0) {
243                                         /* see above */
244                                         list_move(&tp->tp_list, &cull);
245                                 }
246                         }
247                         break;
248                 }
249         }
250
251         lnet_net_unlock(0);
252
253         while (!list_empty(&cull)) {
254                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
255                 list_del(&tp->tp_list);
256
257                 LIBCFS_FREE(tp, sizeof(*tp));
258         }
259
260         return fail;
261 }
262
263 unsigned int
264 lnet_iov_nob(unsigned int niov, struct kvec *iov)
265 {
266         unsigned int nob = 0;
267
268         LASSERT(niov == 0 || iov != NULL);
269         while (niov-- > 0)
270                 nob += (iov++)->iov_len;
271
272         return (nob);
273 }
274 EXPORT_SYMBOL(lnet_iov_nob);
275
276 void
277 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
278                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
279                   unsigned int nob)
280 {
281         /* NB diov, siov are READ-ONLY */
282         unsigned int this_nob;
283
284         if (nob == 0)
285                 return;
286
287         /* skip complete frags before 'doffset' */
288         LASSERT(ndiov > 0);
289         while (doffset >= diov->iov_len) {
290                 doffset -= diov->iov_len;
291                 diov++;
292                 ndiov--;
293                 LASSERT(ndiov > 0);
294         }
295
296         /* skip complete frags before 'soffset' */
297         LASSERT(nsiov > 0);
298         while (soffset >= siov->iov_len) {
299                 soffset -= siov->iov_len;
300                 siov++;
301                 nsiov--;
302                 LASSERT(nsiov > 0);
303         }
304
305         do {
306                 LASSERT(ndiov > 0);
307                 LASSERT(nsiov > 0);
308                 this_nob = min3((unsigned int)diov->iov_len - doffset,
309                                 (unsigned int)siov->iov_len - soffset,
310                                 nob);
311
312                 memcpy((char *)diov->iov_base + doffset,
313                        (char *)siov->iov_base + soffset, this_nob);
314                 nob -= this_nob;
315
316                 if (diov->iov_len > doffset + this_nob) {
317                         doffset += this_nob;
318                 } else {
319                         diov++;
320                         ndiov--;
321                         doffset = 0;
322                 }
323
324                 if (siov->iov_len > soffset + this_nob) {
325                         soffset += this_nob;
326                 } else {
327                         siov++;
328                         nsiov--;
329                         soffset = 0;
330                 }
331         } while (nob > 0);
332 }
333 EXPORT_SYMBOL(lnet_copy_iov2iov);
334
335 int
336 lnet_extract_iov(int dst_niov, struct kvec *dst,
337                  int src_niov, struct kvec *src,
338                  unsigned int offset, unsigned int len)
339 {
340         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
341          * for exactly 'len' bytes, and return the number of entries.
342          * NB not destructive to 'src' */
343         unsigned int    frag_len;
344         unsigned int    niov;
345
346         if (len == 0)                           /* no data => */
347                 return (0);                     /* no frags */
348
349         LASSERT(src_niov > 0);
350         while (offset >= src->iov_len) {      /* skip initial frags */
351                 offset -= src->iov_len;
352                 src_niov--;
353                 src++;
354                 LASSERT(src_niov > 0);
355         }
356
357         niov = 1;
358         for (;;) {
359                 LASSERT(src_niov > 0);
360                 LASSERT((int)niov <= dst_niov);
361
362                 frag_len = src->iov_len - offset;
363                 dst->iov_base = ((char *)src->iov_base) + offset;
364
365                 if (len <= frag_len) {
366                         dst->iov_len = len;
367                         return (niov);
368                 }
369
370                 dst->iov_len = frag_len;
371
372                 len -= frag_len;
373                 dst++;
374                 src++;
375                 niov++;
376                 src_niov--;
377                 offset = 0;
378         }
379 }
380 EXPORT_SYMBOL(lnet_extract_iov);
381
382
383 unsigned int
384 lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
385 {
386         unsigned int  nob = 0;
387
388         LASSERT(niov == 0 || kiov != NULL);
389         while (niov-- > 0)
390                 nob += (kiov++)->kiov_len;
391
392         return (nob);
393 }
394 EXPORT_SYMBOL(lnet_kiov_nob);
395
396 void
397 lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
398                     unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
399                     unsigned int nob)
400 {
401         /* NB diov, siov are READ-ONLY */
402         unsigned int    this_nob;
403         char           *daddr = NULL;
404         char           *saddr = NULL;
405
406         if (nob == 0)
407                 return;
408
409         LASSERT (!in_interrupt ());
410
411         LASSERT (ndiov > 0);
412         while (doffset >= diov->kiov_len) {
413                 doffset -= diov->kiov_len;
414                 diov++;
415                 ndiov--;
416                 LASSERT(ndiov > 0);
417         }
418
419         LASSERT(nsiov > 0);
420         while (soffset >= siov->kiov_len) {
421                 soffset -= siov->kiov_len;
422                 siov++;
423                 nsiov--;
424                 LASSERT(nsiov > 0);
425         }
426
427         do {
428                 LASSERT(ndiov > 0);
429                 LASSERT(nsiov > 0);
430                 this_nob = min3(diov->kiov_len - doffset,
431                                 siov->kiov_len - soffset,
432                                 nob);
433
434                 if (daddr == NULL)
435                         daddr = ((char *)kmap(diov->kiov_page)) +
436                                 diov->kiov_offset + doffset;
437                 if (saddr == NULL)
438                         saddr = ((char *)kmap(siov->kiov_page)) +
439                                 siov->kiov_offset + soffset;
440
441                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
442                  * However in practice at least one of the kiovs will be mapped
443                  * kernel pages and the map/unmap will be NOOPs */
444
445                 memcpy (daddr, saddr, this_nob);
446                 nob -= this_nob;
447
448                 if (diov->kiov_len > doffset + this_nob) {
449                         daddr += this_nob;
450                         doffset += this_nob;
451                 } else {
452                         kunmap(diov->kiov_page);
453                         daddr = NULL;
454                         diov++;
455                         ndiov--;
456                         doffset = 0;
457                 }
458
459                 if (siov->kiov_len > soffset + this_nob) {
460                         saddr += this_nob;
461                         soffset += this_nob;
462                 } else {
463                         kunmap(siov->kiov_page);
464                         saddr = NULL;
465                         siov++;
466                         nsiov--;
467                         soffset = 0;
468                 }
469         } while (nob > 0);
470
471         if (daddr != NULL)
472                 kunmap(diov->kiov_page);
473         if (saddr != NULL)
474                 kunmap(siov->kiov_page);
475 }
476 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
477
478 void
479 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
480                     unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
481                     unsigned int nob)
482 {
483         /* NB iov, kiov are READ-ONLY */
484         unsigned int    this_nob;
485         char           *addr = NULL;
486
487         if (nob == 0)
488                 return;
489
490         LASSERT (!in_interrupt ());
491
492         LASSERT (niov > 0);
493         while (iovoffset >= iov->iov_len) {
494                 iovoffset -= iov->iov_len;
495                 iov++;
496                 niov--;
497                 LASSERT(niov > 0);
498         }
499
500         LASSERT(nkiov > 0);
501         while (kiovoffset >= kiov->kiov_len) {
502                 kiovoffset -= kiov->kiov_len;
503                 kiov++;
504                 nkiov--;
505                 LASSERT(nkiov > 0);
506         }
507
508         do {
509                 LASSERT(niov > 0);
510                 LASSERT(nkiov > 0);
511                 this_nob = min3((unsigned int)iov->iov_len - iovoffset,
512                                 (unsigned int)kiov->kiov_len - kiovoffset,
513                                 nob);
514
515                 if (addr == NULL)
516                         addr = ((char *)kmap(kiov->kiov_page)) +
517                                 kiov->kiov_offset + kiovoffset;
518
519                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
520                 nob -= this_nob;
521
522                 if (iov->iov_len > iovoffset + this_nob) {
523                         iovoffset += this_nob;
524                 } else {
525                         iov++;
526                         niov--;
527                         iovoffset = 0;
528                 }
529
530                 if (kiov->kiov_len > kiovoffset + this_nob) {
531                         addr += this_nob;
532                         kiovoffset += this_nob;
533                 } else {
534                         kunmap(kiov->kiov_page);
535                         addr = NULL;
536                         kiov++;
537                         nkiov--;
538                         kiovoffset = 0;
539                 }
540
541         } while (nob > 0);
542
543         if (addr != NULL)
544                 kunmap(kiov->kiov_page);
545 }
546 EXPORT_SYMBOL(lnet_copy_kiov2iov);
547
548 void
549 lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
550                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
551                    unsigned int nob)
552 {
553         /* NB kiov, iov are READ-ONLY */
554         unsigned int    this_nob;
555         char           *addr = NULL;
556
557         if (nob == 0)
558                 return;
559
560         LASSERT (!in_interrupt ());
561
562         LASSERT (nkiov > 0);
563         while (kiovoffset >= kiov->kiov_len) {
564                 kiovoffset -= kiov->kiov_len;
565                 kiov++;
566                 nkiov--;
567                 LASSERT(nkiov > 0);
568         }
569
570         LASSERT(niov > 0);
571         while (iovoffset >= iov->iov_len) {
572                 iovoffset -= iov->iov_len;
573                 iov++;
574                 niov--;
575                 LASSERT(niov > 0);
576         }
577
578         do {
579                 LASSERT(nkiov > 0);
580                 LASSERT(niov > 0);
581                 this_nob = min3((unsigned int)kiov->kiov_len - kiovoffset,
582                                 (unsigned int)iov->iov_len - iovoffset,
583                                 nob);
584
585                 if (addr == NULL)
586                         addr = ((char *)kmap(kiov->kiov_page)) +
587                                 kiov->kiov_offset + kiovoffset;
588
589                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
590                 nob -= this_nob;
591
592                 if (kiov->kiov_len > kiovoffset + this_nob) {
593                         addr += this_nob;
594                         kiovoffset += this_nob;
595                 } else {
596                         kunmap(kiov->kiov_page);
597                         addr = NULL;
598                         kiov++;
599                         nkiov--;
600                         kiovoffset = 0;
601                 }
602
603                 if (iov->iov_len > iovoffset + this_nob) {
604                         iovoffset += this_nob;
605                 } else {
606                         iov++;
607                         niov--;
608                         iovoffset = 0;
609                 }
610         } while (nob > 0);
611
612         if (addr != NULL)
613                 kunmap(kiov->kiov_page);
614 }
615 EXPORT_SYMBOL(lnet_copy_iov2kiov);
616
617 int
618 lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
619                   int src_niov, lnet_kiov_t *src,
620                   unsigned int offset, unsigned int len)
621 {
622         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
623          * for exactly 'len' bytes, and return the number of entries.
624          * NB not destructive to 'src' */
625         unsigned int    frag_len;
626         unsigned int    niov;
627
628         if (len == 0)                           /* no data => */
629                 return (0);                     /* no frags */
630
631         LASSERT(src_niov > 0);
632         while (offset >= src->kiov_len) {      /* skip initial frags */
633                 offset -= src->kiov_len;
634                 src_niov--;
635                 src++;
636                 LASSERT(src_niov > 0);
637         }
638
639         niov = 1;
640         for (;;) {
641                 LASSERT(src_niov > 0);
642                 LASSERT((int)niov <= dst_niov);
643
644                 frag_len = src->kiov_len - offset;
645                 dst->kiov_page = src->kiov_page;
646                 dst->kiov_offset = src->kiov_offset + offset;
647
648                 if (len <= frag_len) {
649                         dst->kiov_len = len;
650                         LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
651                         return niov;
652                 }
653
654                 dst->kiov_len = frag_len;
655                 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
656
657                 len -= frag_len;
658                 dst++;
659                 src++;
660                 niov++;
661                 src_niov--;
662                 offset = 0;
663         }
664 }
665 EXPORT_SYMBOL(lnet_extract_kiov);
666
667 void
668 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
669              int delayed, unsigned int offset, unsigned int mlen,
670              unsigned int rlen)
671 {
672         unsigned int  niov = 0;
673         struct kvec *iov = NULL;
674         lnet_kiov_t  *kiov = NULL;
675         int           rc;
676
677         LASSERT (!in_interrupt ());
678         LASSERT (mlen == 0 || msg != NULL);
679
680         if (msg != NULL) {
681                 LASSERT(msg->msg_receiving);
682                 LASSERT(!msg->msg_sending);
683                 LASSERT(rlen == msg->msg_len);
684                 LASSERT(mlen <= msg->msg_len);
685                 LASSERT(msg->msg_offset == offset);
686                 LASSERT(msg->msg_wanted == mlen);
687
688                 msg->msg_receiving = 0;
689
690                 if (mlen != 0) {
691                         niov = msg->msg_niov;
692                         iov  = msg->msg_iov;
693                         kiov = msg->msg_kiov;
694
695                         LASSERT (niov > 0);
696                         LASSERT ((iov == NULL) != (kiov == NULL));
697                 }
698         }
699
700         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
701                                              niov, iov, kiov, offset, mlen,
702                                              rlen);
703         if (rc < 0)
704                 lnet_finalize(msg, rc);
705 }
706
707 static void
708 lnet_setpayloadbuffer(struct lnet_msg *msg)
709 {
710         struct lnet_libmd *md = msg->msg_md;
711
712         LASSERT(msg->msg_len > 0);
713         LASSERT(!msg->msg_routing);
714         LASSERT(md != NULL);
715         LASSERT(msg->msg_niov == 0);
716         LASSERT(msg->msg_iov == NULL);
717         LASSERT(msg->msg_kiov == NULL);
718
719         msg->msg_niov = md->md_niov;
720         if ((md->md_options & LNET_MD_KIOV) != 0)
721                 msg->msg_kiov = md->md_iov.kiov;
722         else
723                 msg->msg_iov = md->md_iov.iov;
724 }
725
726 void
727 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target,
728                unsigned int offset, unsigned int len)
729 {
730         msg->msg_type = type;
731         msg->msg_target = target;
732         msg->msg_len = len;
733         msg->msg_offset = offset;
734
735         if (len != 0)
736                 lnet_setpayloadbuffer(msg);
737
738         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
739         msg->msg_hdr.type           = cpu_to_le32(type);
740         /* dest_nid will be overwritten by lnet_select_pathway() */
741         msg->msg_hdr.dest_nid       = cpu_to_le64(target.nid);
742         msg->msg_hdr.dest_pid       = cpu_to_le32(target.pid);
743         /* src_nid will be set later */
744         msg->msg_hdr.src_pid        = cpu_to_le32(the_lnet.ln_pid);
745         msg->msg_hdr.payload_length = cpu_to_le32(len);
746 }
747
748 static void
749 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
750 {
751         void   *priv = msg->msg_private;
752         int rc;
753
754         LASSERT (!in_interrupt ());
755         LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
756                  (msg->msg_txcredit && msg->msg_peertxcredit));
757
758         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
759         if (rc < 0) {
760                 msg->msg_no_resend = true;
761                 lnet_finalize(msg, rc);
762         }
763 }
764
765 static int
766 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
767 {
768         int     rc;
769
770         LASSERT(!msg->msg_sending);
771         LASSERT(msg->msg_receiving);
772         LASSERT(!msg->msg_rx_ready_delay);
773         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
774
775         msg->msg_rx_ready_delay = 1;
776         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
777                                                   &msg->msg_private);
778         if (rc != 0) {
779                 CERROR("recv from %s / send to %s aborted: "
780                        "eager_recv failed %d\n",
781                        libcfs_nid2str(msg->msg_rxpeer->lpni_nid),
782                        libcfs_id2str(msg->msg_target), rc);
783                 LASSERT(rc < 0); /* required by my callers */
784         }
785
786         return rc;
787 }
788
789 static bool
790 lnet_is_peer_deadline_passed(struct lnet_peer_ni *lpni, time64_t now)
791 {
792         time64_t deadline;
793
794         deadline = lpni->lpni_last_alive +
795                    lpni->lpni_net->net_tunables.lct_peer_timeout;
796
797         /*
798          * assume peer_ni is alive as long as we're within the configured
799          * peer timeout
800          */
801         if (deadline > now)
802                 return false;
803
804         return true;
805 }
806
807 /* NB: returns 1 when alive, 0 when dead, negative when error;
808  *     may drop the lnet_net_lock */
809 static int
810 lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
811                        struct lnet_msg *msg)
812 {
813         time64_t now = ktime_get_seconds();
814
815         if (!lnet_peer_aliveness_enabled(lpni))
816                 return -ENODEV;
817
818         /*
819          * If we're resending a message, let's attempt to send it even if
820          * the peer is down to fulfill our resend quota on the message
821          */
822         if (msg->msg_retry_count > 0)
823                 return 1;
824
825         /* try and send recovery messages irregardless */
826         if (msg->msg_recovery)
827                 return 1;
828
829         /* always send any responses */
830         if (msg->msg_type == LNET_MSG_ACK ||
831             msg->msg_type == LNET_MSG_REPLY)
832                 return 1;
833
834         if (!lnet_is_peer_deadline_passed(lpni, now))
835                 return true;
836
837         return lnet_is_peer_ni_alive(lpni);
838 }
839
840 /**
841  * \param msg The message to be sent.
842  * \param do_send True if lnet_ni_send() should be called in this function.
843  *        lnet_send() is going to lnet_net_unlock immediately after this, so
844  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
845  *
846  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
847  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
848  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
849  * \retval -ECANCELED If the MD of the message has been unlinked.
850  */
851 static int
852 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
853 {
854         struct lnet_peer_ni     *lp = msg->msg_txpeer;
855         struct lnet_ni          *ni = msg->msg_txni;
856         int                     cpt = msg->msg_tx_cpt;
857         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
858
859         /* non-lnet_send() callers have checked before */
860         LASSERT(!do_send || msg->msg_tx_delayed);
861         LASSERT(!msg->msg_receiving);
862         LASSERT(msg->msg_tx_committed);
863         /* can't get here if we're sending to the loopback interface */
864         LASSERT(lp->lpni_nid != the_lnet.ln_loni->ni_nid);
865
866         /* NB 'lp' is always the next hop */
867         if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
868             lnet_peer_alive_locked(ni, lp, msg) == 0) {
869                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
870                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
871                         msg->msg_len;
872                 lnet_net_unlock(cpt);
873                 if (msg->msg_txpeer)
874                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
875                                         msg->msg_type,
876                                         LNET_STATS_TYPE_DROP);
877                 if (msg->msg_txni)
878                         lnet_incr_stats(&msg->msg_txni->ni_stats,
879                                         msg->msg_type,
880                                         LNET_STATS_TYPE_DROP);
881
882                 CNETERR("Dropping message for %s: peer not alive\n",
883                         libcfs_id2str(msg->msg_target));
884                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
885                 if (do_send)
886                         lnet_finalize(msg, -EHOSTUNREACH);
887
888                 lnet_net_lock(cpt);
889                 return -EHOSTUNREACH;
890         }
891
892         if (msg->msg_md != NULL &&
893             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
894                 lnet_net_unlock(cpt);
895
896                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
897                         "called on the MD/ME.\n",
898                         libcfs_id2str(msg->msg_target));
899                 if (do_send) {
900                         msg->msg_no_resend = true;
901                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
902                                msg, libcfs_id2str(msg->msg_target));
903                         lnet_finalize(msg, -ECANCELED);
904                 }
905
906                 lnet_net_lock(cpt);
907                 return -ECANCELED;
908         }
909
910         if (!msg->msg_peertxcredit) {
911                 spin_lock(&lp->lpni_lock);
912                 LASSERT((lp->lpni_txcredits < 0) ==
913                         !list_empty(&lp->lpni_txq));
914
915                 msg->msg_peertxcredit = 1;
916                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr);
917                 lp->lpni_txcredits--;
918
919                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
920                         lp->lpni_mintxcredits = lp->lpni_txcredits;
921
922                 if (lp->lpni_txcredits < 0) {
923                         msg->msg_tx_delayed = 1;
924                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
925                         spin_unlock(&lp->lpni_lock);
926                         return LNET_CREDIT_WAIT;
927                 }
928                 spin_unlock(&lp->lpni_lock);
929         }
930
931         if (!msg->msg_txcredit) {
932                 LASSERT((tq->tq_credits < 0) ==
933                         !list_empty(&tq->tq_delayed));
934
935                 msg->msg_txcredit = 1;
936                 tq->tq_credits--;
937                 atomic_dec(&ni->ni_tx_credits);
938
939                 if (tq->tq_credits < tq->tq_credits_min)
940                         tq->tq_credits_min = tq->tq_credits;
941
942                 if (tq->tq_credits < 0) {
943                         msg->msg_tx_delayed = 1;
944                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
945                         return LNET_CREDIT_WAIT;
946                 }
947         }
948
949         /* unset the tx_delay flag as we're going to send it now */
950         msg->msg_tx_delayed = 0;
951
952         if (do_send) {
953                 lnet_net_unlock(cpt);
954                 lnet_ni_send(ni, msg);
955                 lnet_net_lock(cpt);
956         }
957         return LNET_CREDIT_OK;
958 }
959
960
961 static struct lnet_rtrbufpool *
962 lnet_msg2bufpool(struct lnet_msg *msg)
963 {
964         struct lnet_rtrbufpool  *rbp;
965         int                     cpt;
966
967         LASSERT(msg->msg_rx_committed);
968
969         cpt = msg->msg_rx_cpt;
970         rbp = &the_lnet.ln_rtrpools[cpt][0];
971
972         LASSERT(msg->msg_len <= LNET_MTU);
973         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
974                 rbp++;
975                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
976         }
977
978         return rbp;
979 }
980
981 static int
982 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
983 {
984         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
985          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
986          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
987          * received or OK to receive */
988         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
989         struct lnet_peer *lp;
990         struct lnet_rtrbufpool *rbp;
991         struct lnet_rtrbuf *rb;
992
993         LASSERT(msg->msg_iov == NULL);
994         LASSERT(msg->msg_kiov == NULL);
995         LASSERT(msg->msg_niov == 0);
996         LASSERT(msg->msg_routing);
997         LASSERT(msg->msg_receiving);
998         LASSERT(!msg->msg_sending);
999         LASSERT(lpni->lpni_peer_net);
1000         LASSERT(lpni->lpni_peer_net->lpn_peer);
1001
1002         lp = lpni->lpni_peer_net->lpn_peer;
1003
1004         /* non-lnet_parse callers only receive delayed messages */
1005         LASSERT(!do_recv || msg->msg_rx_delayed);
1006
1007         if (!msg->msg_peerrtrcredit) {
1008                 /* lpni_lock protects the credit manipulation */
1009                 spin_lock(&lpni->lpni_lock);
1010                 /* lp_lock protects the lp_rtrq */
1011                 spin_lock(&lp->lp_lock);
1012
1013                 msg->msg_peerrtrcredit = 1;
1014                 lpni->lpni_rtrcredits--;
1015                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
1016                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
1017
1018                 if (lpni->lpni_rtrcredits < 0) {
1019                         /* must have checked eager_recv before here */
1020                         LASSERT(msg->msg_rx_ready_delay);
1021                         msg->msg_rx_delayed = 1;
1022                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1023                         spin_unlock(&lp->lp_lock);
1024                         spin_unlock(&lpni->lpni_lock);
1025                         return LNET_CREDIT_WAIT;
1026                 }
1027                 spin_unlock(&lp->lp_lock);
1028                 spin_unlock(&lpni->lpni_lock);
1029         }
1030
1031         rbp = lnet_msg2bufpool(msg);
1032
1033         if (!msg->msg_rtrcredit) {
1034                 msg->msg_rtrcredit = 1;
1035                 rbp->rbp_credits--;
1036                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1037                         rbp->rbp_mincredits = rbp->rbp_credits;
1038
1039                 if (rbp->rbp_credits < 0) {
1040                         /* must have checked eager_recv before here */
1041                         LASSERT(msg->msg_rx_ready_delay);
1042                         msg->msg_rx_delayed = 1;
1043                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1044                         return LNET_CREDIT_WAIT;
1045                 }
1046         }
1047
1048         LASSERT(!list_empty(&rbp->rbp_bufs));
1049         rb = list_entry(rbp->rbp_bufs.next, struct lnet_rtrbuf, rb_list);
1050         list_del(&rb->rb_list);
1051
1052         msg->msg_niov = rbp->rbp_npages;
1053         msg->msg_kiov = &rb->rb_kiov[0];
1054
1055         /* unset the msg-rx_delayed flag since we're receiving the message */
1056         msg->msg_rx_delayed = 0;
1057
1058         if (do_recv) {
1059                 int cpt = msg->msg_rx_cpt;
1060
1061                 lnet_net_unlock(cpt);
1062                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1063                              0, msg->msg_len, msg->msg_len);
1064                 lnet_net_lock(cpt);
1065         }
1066         return LNET_CREDIT_OK;
1067 }
1068
1069 void
1070 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1071 {
1072         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1073         struct lnet_ni          *txni = msg->msg_txni;
1074         struct lnet_msg         *msg2;
1075
1076         if (msg->msg_txcredit) {
1077                 struct lnet_ni       *ni = msg->msg_txni;
1078                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1079
1080                 /* give back NI txcredits */
1081                 msg->msg_txcredit = 0;
1082
1083                 LASSERT((tq->tq_credits < 0) ==
1084                         !list_empty(&tq->tq_delayed));
1085
1086                 tq->tq_credits++;
1087                 atomic_inc(&ni->ni_tx_credits);
1088                 if (tq->tq_credits <= 0) {
1089                         msg2 = list_entry(tq->tq_delayed.next,
1090                                           struct lnet_msg, msg_list);
1091                         list_del(&msg2->msg_list);
1092
1093                         LASSERT(msg2->msg_txni == ni);
1094                         LASSERT(msg2->msg_tx_delayed);
1095                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1096
1097                         (void) lnet_post_send_locked(msg2, 1);
1098                 }
1099         }
1100
1101         if (msg->msg_peertxcredit) {
1102                 /* give back peer txcredits */
1103                 msg->msg_peertxcredit = 0;
1104
1105                 spin_lock(&txpeer->lpni_lock);
1106                 LASSERT((txpeer->lpni_txcredits < 0) ==
1107                         !list_empty(&txpeer->lpni_txq));
1108
1109                 txpeer->lpni_txqnob -= msg->msg_len + sizeof(struct lnet_hdr);
1110                 LASSERT(txpeer->lpni_txqnob >= 0);
1111
1112                 txpeer->lpni_txcredits++;
1113                 if (txpeer->lpni_txcredits <= 0) {
1114                         int msg2_cpt;
1115
1116                         msg2 = list_entry(txpeer->lpni_txq.next,
1117                                               struct lnet_msg, msg_list);
1118                         list_del(&msg2->msg_list);
1119                         spin_unlock(&txpeer->lpni_lock);
1120
1121                         LASSERT(msg2->msg_txpeer == txpeer);
1122                         LASSERT(msg2->msg_tx_delayed);
1123
1124                         msg2_cpt = msg2->msg_tx_cpt;
1125
1126                         /*
1127                          * The msg_cpt can be different from the msg2_cpt
1128                          * so we need to make sure we lock the correct cpt
1129                          * for msg2.
1130                          * Once we call lnet_post_send_locked() it is no
1131                          * longer safe to access msg2, since it could've
1132                          * been freed by lnet_finalize(), but we still
1133                          * need to relock the correct cpt, so we cache the
1134                          * msg2_cpt for the purpose of the check that
1135                          * follows the call to lnet_pose_send_locked().
1136                          */
1137                         if (msg2_cpt != msg->msg_tx_cpt) {
1138                                 lnet_net_unlock(msg->msg_tx_cpt);
1139                                 lnet_net_lock(msg2_cpt);
1140                         }
1141                         (void) lnet_post_send_locked(msg2, 1);
1142                         if (msg2_cpt != msg->msg_tx_cpt) {
1143                                 lnet_net_unlock(msg2_cpt);
1144                                 lnet_net_lock(msg->msg_tx_cpt);
1145                         }
1146                 } else {
1147                         spin_unlock(&txpeer->lpni_lock);
1148                 }
1149         }
1150
1151         if (txni != NULL) {
1152                 msg->msg_txni = NULL;
1153                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1154         }
1155
1156         if (txpeer != NULL) {
1157                 msg->msg_txpeer = NULL;
1158                 lnet_peer_ni_decref_locked(txpeer);
1159         }
1160 }
1161
1162 void
1163 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1164 {
1165         struct lnet_msg *msg;
1166
1167         if (list_empty(&rbp->rbp_msgs))
1168                 return;
1169         msg = list_entry(rbp->rbp_msgs.next,
1170                          struct lnet_msg, msg_list);
1171         list_del(&msg->msg_list);
1172
1173         (void)lnet_post_routed_recv_locked(msg, 1);
1174 }
1175
1176 void
1177 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1178 {
1179         struct lnet_msg *msg;
1180         struct lnet_msg *tmp;
1181
1182         lnet_net_unlock(cpt);
1183
1184         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1185                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1186                              0, 0, 0, msg->msg_hdr.payload_length);
1187                 list_del_init(&msg->msg_list);
1188                 msg->msg_no_resend = true;
1189                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1190                 lnet_finalize(msg, -ECANCELED);
1191         }
1192
1193         lnet_net_lock(cpt);
1194 }
1195
1196 void
1197 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1198 {
1199         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1200         struct lnet_peer *lp;
1201         struct lnet_ni *rxni = msg->msg_rxni;
1202         struct lnet_msg *msg2;
1203
1204         if (msg->msg_rtrcredit) {
1205                 /* give back global router credits */
1206                 struct lnet_rtrbuf *rb;
1207                 struct lnet_rtrbufpool *rbp;
1208
1209                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1210                  * there until it gets one allocated, or aborts the wait
1211                  * itself */
1212                 LASSERT(msg->msg_kiov != NULL);
1213
1214                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1215                 rbp = rb->rb_pool;
1216
1217                 msg->msg_kiov = NULL;
1218                 msg->msg_rtrcredit = 0;
1219
1220                 LASSERT(rbp == lnet_msg2bufpool(msg));
1221
1222                 LASSERT((rbp->rbp_credits > 0) ==
1223                         !list_empty(&rbp->rbp_bufs));
1224
1225                 /* If routing is now turned off, we just drop this buffer and
1226                  * don't bother trying to return credits.  */
1227                 if (!the_lnet.ln_routing) {
1228                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1229                         goto routing_off;
1230                 }
1231
1232                 /* It is possible that a user has lowered the desired number of
1233                  * buffers in this pool.  Make sure we never put back
1234                  * more buffers than the stated number. */
1235                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1236                         /* Discard this buffer so we don't have too
1237                          * many. */
1238                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1239                         rbp->rbp_nbuffers--;
1240                 } else {
1241                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1242                         rbp->rbp_credits++;
1243                         if (rbp->rbp_credits <= 0)
1244                                 lnet_schedule_blocked_locked(rbp);
1245                 }
1246         }
1247
1248 routing_off:
1249         if (msg->msg_peerrtrcredit) {
1250                 LASSERT(rxpeerni);
1251                 LASSERT(rxpeerni->lpni_peer_net);
1252                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1253
1254                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1255
1256                 /* give back peer router credits */
1257                 msg->msg_peerrtrcredit = 0;
1258
1259                 spin_lock(&rxpeerni->lpni_lock);
1260                 spin_lock(&lp->lp_lock);
1261
1262                 rxpeerni->lpni_rtrcredits++;
1263
1264                 /* drop all messages which are queued to be routed on that
1265                  * peer. */
1266                 if (!the_lnet.ln_routing) {
1267                         LIST_HEAD(drop);
1268                         list_splice_init(&lp->lp_rtrq, &drop);
1269                         spin_unlock(&lp->lp_lock);
1270                         spin_unlock(&rxpeerni->lpni_lock);
1271                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1272                 } else if (!list_empty(&lp->lp_rtrq)) {
1273                         int msg2_cpt;
1274
1275                         msg2 = list_entry(lp->lp_rtrq.next,
1276                                           struct lnet_msg, msg_list);
1277                         list_del(&msg2->msg_list);
1278                         msg2_cpt = msg2->msg_rx_cpt;
1279                         spin_unlock(&lp->lp_lock);
1280                         spin_unlock(&rxpeerni->lpni_lock);
1281                         /*
1282                          * messages on the lp_rtrq can be from any NID in
1283                          * the peer, which means they might have different
1284                          * cpts. We need to make sure we lock the right
1285                          * one.
1286                          */
1287                         if (msg2_cpt != msg->msg_rx_cpt) {
1288                                 lnet_net_unlock(msg->msg_rx_cpt);
1289                                 lnet_net_lock(msg2_cpt);
1290                         }
1291                         (void) lnet_post_routed_recv_locked(msg2, 1);
1292                         if (msg2_cpt != msg->msg_rx_cpt) {
1293                                 lnet_net_unlock(msg2_cpt);
1294                                 lnet_net_lock(msg->msg_rx_cpt);
1295                         }
1296                 } else {
1297                         spin_unlock(&lp->lp_lock);
1298                         spin_unlock(&rxpeerni->lpni_lock);
1299                 }
1300         }
1301         if (rxni != NULL) {
1302                 msg->msg_rxni = NULL;
1303                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1304         }
1305         if (rxpeerni != NULL) {
1306                 msg->msg_rxpeer = NULL;
1307                 lnet_peer_ni_decref_locked(rxpeerni);
1308         }
1309 }
1310
1311 static int
1312 lnet_compare_gw_lpnis(struct lnet_peer_ni *p1, struct lnet_peer_ni *p2)
1313 {
1314         if (p1->lpni_txqnob < p2->lpni_txqnob)
1315                 return 1;
1316
1317         if (p1->lpni_txqnob > p2->lpni_txqnob)
1318                 return -1;
1319
1320         if (p1->lpni_txcredits > p2->lpni_txcredits)
1321                 return 1;
1322
1323         if (p1->lpni_txcredits < p2->lpni_txcredits)
1324                 return -1;
1325
1326         return 0;
1327 }
1328
1329 static struct lnet_peer_ni *
1330 lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid,
1331                     struct lnet_peer *peer,
1332                     struct lnet_peer_net *peer_net)
1333 {
1334         /*
1335          * Look at the peer NIs for the destination peer that connect
1336          * to the chosen net. If a peer_ni is preferred when using the
1337          * best_ni to communicate, we use that one. If there is no
1338          * preferred peer_ni, or there are multiple preferred peer_ni,
1339          * the available transmit credits are used. If the transmit
1340          * credits are equal, we round-robin over the peer_ni.
1341          */
1342         struct lnet_peer_ni *lpni = NULL;
1343         struct lnet_peer_ni *best_lpni = NULL;
1344         int best_lpni_credits = INT_MIN;
1345         bool preferred = false;
1346         bool ni_is_pref;
1347         int best_lpni_healthv = 0;
1348         int lpni_healthv;
1349
1350         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1351                 /*
1352                  * if the best_ni we've chosen aleady has this lpni
1353                  * preferred, then let's use it
1354                  */
1355                 if (best_ni) {
1356                         ni_is_pref = lnet_peer_is_pref_nid_locked(lpni,
1357                                                                 best_ni->ni_nid);
1358                         CDEBUG(D_NET, "%s ni_is_pref = %d\n",
1359                                libcfs_nid2str(best_ni->ni_nid), ni_is_pref);
1360                 } else {
1361                         ni_is_pref = false;
1362                 }
1363
1364                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1365
1366                 if (best_lpni)
1367                         CDEBUG(D_NET, "%s c:[%d, %d], s:[%d, %d]\n",
1368                                 libcfs_nid2str(lpni->lpni_nid),
1369                                 lpni->lpni_txcredits, best_lpni_credits,
1370                                 lpni->lpni_seq, best_lpni->lpni_seq);
1371
1372                 /* pick the healthiest peer ni */
1373                 if (lpni_healthv < best_lpni_healthv) {
1374                         continue;
1375                 } else if (lpni_healthv > best_lpni_healthv) {
1376                         best_lpni_healthv = lpni_healthv;
1377                 /* if this is a preferred peer use it */
1378                 } else if (!preferred && ni_is_pref) {
1379                         preferred = true;
1380                 } else if (preferred && !ni_is_pref) {
1381                         /*
1382                          * this is not the preferred peer so let's ignore
1383                          * it.
1384                          */
1385                         continue;
1386                 } else if (lpni->lpni_txcredits < best_lpni_credits) {
1387                         /*
1388                          * We already have a peer that has more credits
1389                          * available than this one. No need to consider
1390                          * this peer further.
1391                          */
1392                         continue;
1393                 } else if (lpni->lpni_txcredits == best_lpni_credits) {
1394                         /*
1395                          * The best peer found so far and the current peer
1396                          * have the same number of available credits let's
1397                          * make sure to select between them using Round
1398                          * Robin
1399                          */
1400                         if (best_lpni) {
1401                                 if (best_lpni->lpni_seq <= lpni->lpni_seq)
1402                                         continue;
1403                         }
1404                 }
1405
1406                 best_lpni = lpni;
1407                 best_lpni_credits = lpni->lpni_txcredits;
1408         }
1409
1410         /* if we still can't find a peer ni then we can't reach it */
1411         if (!best_lpni) {
1412                 __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1413                         LNET_NIDNET(dst_nid);
1414                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1415                                 libcfs_net2str(net_id));
1416                 return NULL;
1417         }
1418
1419         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1420                libcfs_nid2str(best_lpni->lpni_nid));
1421
1422         return best_lpni;
1423 }
1424
1425 /*
1426  * Prerequisite: the best_ni should already be set in the sd
1427  */
1428 static inline struct lnet_peer_ni *
1429 lnet_find_best_lpni_on_net(struct lnet_ni *lni, lnet_nid_t dst_nid,
1430                            struct lnet_peer *peer, __u32 net_id)
1431 {
1432         struct lnet_peer_net *peer_net;
1433
1434         /*
1435          * The gateway is Multi-Rail capable so now we must select the
1436          * proper peer_ni
1437          */
1438         peer_net = lnet_peer_get_net_locked(peer, net_id);
1439
1440         if (!peer_net) {
1441                 CERROR("gateway peer %s has no NI on net %s\n",
1442                        libcfs_nid2str(peer->lp_primary_nid),
1443                        libcfs_net2str(net_id));
1444                 return NULL;
1445         }
1446
1447         return lnet_select_peer_ni(lni, dst_nid, peer, peer_net);
1448 }
1449
1450 /* Compare route priorities and hop counts */
1451 static int
1452 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
1453 {
1454         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1455         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1456
1457         if (r1->lr_priority < r2->lr_priority)
1458                 return 1;
1459
1460         if (r1->lr_priority > r2->lr_priority)
1461                 return -1;
1462
1463         if (r1_hops < r2_hops)
1464                 return 1;
1465
1466         if (r1_hops > r2_hops)
1467                 return -1;
1468
1469         return 0;
1470 }
1471
1472 static struct lnet_route *
1473 lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net,
1474                        struct lnet_route **prev_route,
1475                        struct lnet_peer_ni **gwni)
1476 {
1477         struct lnet_peer_ni *lpni, *best_gw_ni = NULL;
1478         struct lnet_route *best_route;
1479         struct lnet_route *last_route;
1480         struct lnet_route *route;
1481         int rc;
1482         __u32 restrict_net;
1483         __u32 any_net = LNET_NIDNET(LNET_NID_ANY);
1484
1485         best_route = last_route = NULL;
1486         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1487                 if (!lnet_is_route_alive(route))
1488                         continue;
1489
1490                 /* If the src_net is specified then we need to find an lpni
1491                  * on that network
1492                  */
1493                 restrict_net = src_net == any_net ? route->lr_lnet : src_net;
1494                 if (!best_route) {
1495                         lpni = lnet_find_best_lpni_on_net(NULL, LNET_NID_ANY,
1496                                                           route->lr_gateway,
1497                                                           restrict_net);
1498                         if (lpni) {
1499                                 best_route = last_route = route;
1500                                 best_gw_ni = lpni;
1501                         } else
1502                                 CERROR("Gateway %s does not have a peer NI on net %s\n",
1503                                        libcfs_nid2str(route->lr_gateway->lp_primary_nid),
1504                                        libcfs_net2str(restrict_net));
1505
1506                         continue;
1507                 }
1508
1509                 /* no protection on below fields, but it's harmless */
1510                 if (last_route->lr_seq - route->lr_seq < 0)
1511                         last_route = route;
1512
1513                 rc = lnet_compare_routes(route, best_route);
1514                 if (rc == -1)
1515                         continue;
1516
1517                 lpni = lnet_find_best_lpni_on_net(NULL, LNET_NID_ANY,
1518                                                   route->lr_gateway,
1519                                                   restrict_net);
1520                 if (!lpni) {
1521                         CERROR("Gateway %s does not have a peer NI on net %s\n",
1522                                libcfs_nid2str(route->lr_gateway->lp_primary_nid),
1523                                libcfs_net2str(restrict_net));
1524                         continue;
1525                 }
1526
1527                 if (rc == 1) {
1528                         best_route = route;
1529                         best_gw_ni = lpni;
1530                         continue;
1531                 }
1532
1533                 rc = lnet_compare_gw_lpnis(lpni, best_gw_ni);
1534                 if (rc == -1)
1535                         continue;
1536
1537                 if (rc == 1 || route->lr_seq <= best_route->lr_seq) {
1538                         best_route = route;
1539                         best_gw_ni = lpni;
1540                         continue;
1541                 }
1542         }
1543
1544         *prev_route = last_route;
1545         *gwni = best_gw_ni;
1546
1547         return best_route;
1548 }
1549
1550 static struct lnet_ni *
1551 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1552                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1553                  int md_cpt)
1554 {
1555         struct lnet_ni *ni = NULL;
1556         unsigned int shortest_distance;
1557         int best_credits;
1558         int best_healthv;
1559
1560         /*
1561          * If there is no peer_ni that we can send to on this network,
1562          * then there is no point in looking for a new best_ni here.
1563         */
1564         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1565                 return best_ni;
1566
1567         if (best_ni == NULL) {
1568                 shortest_distance = UINT_MAX;
1569                 best_credits = INT_MIN;
1570                 best_healthv = 0;
1571         } else {
1572                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1573                                                      best_ni->ni_dev_cpt);
1574                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1575                 best_healthv = atomic_read(&best_ni->ni_healthv);
1576         }
1577
1578         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1579                 unsigned int distance;
1580                 int ni_credits;
1581                 int ni_healthv;
1582                 int ni_fatal;
1583
1584                 ni_credits = atomic_read(&ni->ni_tx_credits);
1585                 ni_healthv = atomic_read(&ni->ni_healthv);
1586                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1587
1588                 /*
1589                  * calculate the distance from the CPT on which
1590                  * the message memory is allocated to the CPT of
1591                  * the NI's physical device
1592                  */
1593                 distance = cfs_cpt_distance(lnet_cpt_table(),
1594                                             md_cpt,
1595                                             ni->ni_dev_cpt);
1596
1597                 CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d] with best_ni %s [c:%d, d:%d, s:%d]\n",
1598                        libcfs_nid2str(ni->ni_nid), ni_credits, distance,
1599                        ni->ni_seq, (best_ni) ? libcfs_nid2str(best_ni->ni_nid)
1600                         : "not seleced", best_credits, shortest_distance,
1601                         (best_ni) ? best_ni->ni_seq : 0);
1602
1603                 /*
1604                  * All distances smaller than the NUMA range
1605                  * are treated equally.
1606                  */
1607                 if (distance < lnet_numa_range)
1608                         distance = lnet_numa_range;
1609
1610                 /*
1611                  * Select on health, shorter distance, available
1612                  * credits, then round-robin.
1613                  */
1614                 if (ni_fatal) {
1615                         continue;
1616                 } else if (ni_healthv < best_healthv) {
1617                         continue;
1618                 } else if (ni_healthv > best_healthv) {
1619                         best_healthv = ni_healthv;
1620                         /*
1621                          * If we're going to prefer this ni because it's
1622                          * the healthiest, then we should set the
1623                          * shortest_distance in the algorithm in case
1624                          * there are multiple NIs with the same health but
1625                          * different distances.
1626                          */
1627                         if (distance < shortest_distance)
1628                                 shortest_distance = distance;
1629                 } else if (distance > shortest_distance) {
1630                         continue;
1631                 } else if (distance < shortest_distance) {
1632                         shortest_distance = distance;
1633                 } else if (ni_credits < best_credits) {
1634                         continue;
1635                 } else if (ni_credits == best_credits) {
1636                         if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1637                                 continue;
1638                 }
1639                 best_ni = ni;
1640                 best_credits = ni_credits;
1641         }
1642
1643         CDEBUG(D_NET, "selected best_ni %s\n",
1644                (best_ni) ? libcfs_nid2str(best_ni->ni_nid) : "no selection");
1645
1646         return best_ni;
1647 }
1648
1649 /*
1650  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1651  * because such traffic is required to perform discovery. We therefore
1652  * exclude all GET and PUT on that portal. We also exclude all ACK and
1653  * REPLY traffic, but that is because the portal is not tracked in the
1654  * message structure for these message types. We could restrict this
1655  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1656  */
1657 static bool
1658 lnet_msg_discovery(struct lnet_msg *msg)
1659 {
1660         if (msg->msg_type == LNET_MSG_PUT) {
1661                 if (msg->msg_hdr.msg.put.ptl_index != LNET_RESERVED_PORTAL)
1662                         return true;
1663         } else if (msg->msg_type == LNET_MSG_GET) {
1664                 if (msg->msg_hdr.msg.get.ptl_index != LNET_RESERVED_PORTAL)
1665                         return true;
1666         }
1667         return false;
1668 }
1669
1670 #define SRC_SPEC        0x0001
1671 #define SRC_ANY         0x0002
1672 #define LOCAL_DST       0x0004
1673 #define REMOTE_DST      0x0008
1674 #define MR_DST          0x0010
1675 #define NMR_DST         0x0020
1676 #define SND_RESP        0x0040
1677
1678 /* The following to defines are used for return codes */
1679 #define REPEAT_SEND     0x1000
1680 #define PASS_THROUGH    0x2000
1681
1682 /* The different cases lnet_select pathway needs to handle */
1683 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1684 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1685 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1686 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1687 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1688 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1689 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1690 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1691
1692 static int
1693 lnet_handle_lo_send(struct lnet_send_data *sd)
1694 {
1695         struct lnet_msg *msg = sd->sd_msg;
1696         int cpt = sd->sd_cpt;
1697
1698         /* No send credit hassles with LOLND */
1699         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1700         msg->msg_hdr.dest_nid = cpu_to_le64(the_lnet.ln_loni->ni_nid);
1701         if (!msg->msg_routing)
1702                 msg->msg_hdr.src_nid =
1703                         cpu_to_le64(the_lnet.ln_loni->ni_nid);
1704         msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
1705         lnet_msg_commit(msg, cpt);
1706         msg->msg_txni = the_lnet.ln_loni;
1707
1708         return LNET_CREDIT_OK;
1709 }
1710
1711 static int
1712 lnet_handle_send(struct lnet_send_data *sd)
1713 {
1714         struct lnet_ni *best_ni = sd->sd_best_ni;
1715         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1716         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1717         struct lnet_msg *msg = sd->sd_msg;
1718         int cpt2;
1719         __u32 send_case = sd->sd_send_case;
1720         int rc;
1721         __u32 routing = send_case & REMOTE_DST;
1722          struct lnet_rsp_tracker *rspt;
1723
1724         /*
1725          * Increment sequence number of the selected peer so that we
1726          * pick the next one in Round Robin.
1727          */
1728         best_lpni->lpni_seq++;
1729
1730         /*
1731          * grab a reference on the peer_ni so it sticks around even if
1732          * we need to drop and relock the lnet_net_lock below.
1733          */
1734         lnet_peer_ni_addref_locked(best_lpni);
1735
1736         /*
1737          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1738          * message. This ensures that we get a CPT that is correct for
1739          * the NI when the NI has been restricted to a subset of all CPTs.
1740          * If the selected CPT differs from the one currently locked, we
1741          * must unlock and relock the lnet_net_lock(), and then check whether
1742          * the configuration has changed. We don't have a hold on the best_ni
1743          * yet, and it may have vanished.
1744          */
1745         cpt2 = lnet_cpt_of_nid_locked(best_lpni->lpni_nid, best_ni);
1746         if (sd->sd_cpt != cpt2) {
1747                 __u32 seq = lnet_get_dlc_seq_locked();
1748                 lnet_net_unlock(sd->sd_cpt);
1749                 sd->sd_cpt = cpt2;
1750                 lnet_net_lock(sd->sd_cpt);
1751                 if (seq != lnet_get_dlc_seq_locked()) {
1752                         lnet_peer_ni_decref_locked(best_lpni);
1753                         return REPEAT_SEND;
1754                 }
1755         }
1756
1757         /*
1758          * store the best_lpni in the message right away to avoid having
1759          * to do the same operation under different conditions
1760          */
1761         msg->msg_txpeer = best_lpni;
1762         msg->msg_txni = best_ni;
1763
1764         /*
1765          * grab a reference for the best_ni since now it's in use in this
1766          * send. The reference will be dropped in lnet_finalize()
1767          */
1768         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1769
1770         /*
1771          * Always set the target.nid to the best peer picked. Either the
1772          * NID will be one of the peer NIDs selected, or the same NID as
1773          * what was originally set in the target or it will be the NID of
1774          * a router if this message should be routed
1775          */
1776         msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1777
1778         /*
1779          * lnet_msg_commit assigns the correct cpt to the message, which
1780          * is used to decrement the correct refcount on the ni when it's
1781          * time to return the credits
1782          */
1783         lnet_msg_commit(msg, sd->sd_cpt);
1784
1785         /*
1786          * If we are routing the message then we keep the src_nid that was
1787          * set by the originator. If we are not routing then we are the
1788          * originator and set it here.
1789          */
1790         if (!msg->msg_routing)
1791                 msg->msg_hdr.src_nid = cpu_to_le64(msg->msg_txni->ni_nid);
1792
1793         if (routing) {
1794                 msg->msg_target_is_router = 1;
1795                 msg->msg_target.pid = LNET_PID_LUSTRE;
1796                 /*
1797                  * since we're routing we want to ensure that the
1798                  * msg_hdr.dest_nid is set to the final destination. When
1799                  * the router receives this message it knows how to route
1800                  * it.
1801                  *
1802                  * final_dst_lpni is set at the beginning of the
1803                  * lnet_select_pathway() function and is never changed.
1804                  * It's safe to use it here.
1805                  */
1806                 msg->msg_hdr.dest_nid = cpu_to_le64(final_dst_lpni->lpni_nid);
1807         } else {
1808                 /*
1809                  * if we're not routing set the dest_nid to the best peer
1810                  * ni NID that we picked earlier in the algorithm.
1811                  */
1812                 msg->msg_hdr.dest_nid = cpu_to_le64(msg->msg_txpeer->lpni_nid);
1813         }
1814
1815         /*
1816          * if we have response tracker block update it with the next hop
1817          * nid
1818          */
1819         if (msg->msg_md) {
1820                 rspt = msg->msg_md->md_rspt_ptr;
1821                 if (rspt) {
1822                         rspt->rspt_next_hop_nid = msg->msg_txpeer->lpni_nid;
1823                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1824                                libcfs_nid2str(rspt->rspt_next_hop_nid));
1825                 }
1826         }
1827
1828         rc = lnet_post_send_locked(msg, 0);
1829
1830         if (!rc)
1831                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
1832                        libcfs_nid2str(msg->msg_hdr.src_nid),
1833                        libcfs_nid2str(msg->msg_txni->ni_nid),
1834                        libcfs_nid2str(sd->sd_src_nid),
1835                        libcfs_nid2str(msg->msg_hdr.dest_nid),
1836                        libcfs_nid2str(sd->sd_dst_nid),
1837                        libcfs_nid2str(msg->msg_txpeer->lpni_nid),
1838                        libcfs_nid2str(sd->sd_rtr_nid),
1839                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
1840
1841         return rc;
1842 }
1843
1844 static inline void
1845 lnet_set_non_mr_pref_nid(struct lnet_send_data *sd)
1846 {
1847         if (sd->sd_send_case & NMR_DST &&
1848             sd->sd_msg->msg_type != LNET_MSG_REPLY &&
1849             sd->sd_msg->msg_type != LNET_MSG_ACK &&
1850             sd->sd_best_lpni->lpni_pref_nnids == 0) {
1851                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
1852                        libcfs_nid2str(sd->sd_best_ni->ni_nid),
1853                        libcfs_nid2str(sd->sd_best_lpni->lpni_nid));
1854                 lnet_peer_ni_set_non_mr_pref_nid(sd->sd_best_lpni,
1855                                                  sd->sd_best_ni->ni_nid);
1856         }
1857 }
1858
1859 /*
1860  * Source Specified
1861  * Local Destination
1862  * non-mr peer
1863  *
1864  * use the source and destination NIDs as the pathway
1865  */
1866 static int
1867 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
1868 {
1869         /* the destination lpni is set before we get here. */
1870
1871         /* find local NI */
1872         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
1873         if (!sd->sd_best_ni) {
1874                 CERROR("Can't send to %s: src %s is not a "
1875                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
1876                                 libcfs_nid2str(sd->sd_src_nid));
1877                 return -EINVAL;
1878         }
1879
1880         /*
1881          * the preferred NID will only be set for NMR peers
1882          */
1883         lnet_set_non_mr_pref_nid(sd);
1884
1885         return lnet_handle_send(sd);
1886 }
1887
1888 /*
1889  * Source Specified
1890  * Local Destination
1891  * MR Peer
1892  *
1893  * Don't run the selection algorithm on the peer NIs. By specifying the
1894  * local NID, we're also saying that we should always use the destination NID
1895  * provided. This handles the case where we should be using the same
1896  * destination NID for the all the messages which belong to the same RPC
1897  * request.
1898  */
1899 static int
1900 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
1901 {
1902         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
1903         if (!sd->sd_best_ni) {
1904                 CERROR("Can't send to %s: src %s is not a "
1905                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
1906                                 libcfs_nid2str(sd->sd_src_nid));
1907                 return -EINVAL;
1908         }
1909
1910         if (sd->sd_best_lpni &&
1911             sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid)
1912                 return lnet_handle_lo_send(sd);
1913         else if (sd->sd_best_lpni)
1914                 return lnet_handle_send(sd);
1915
1916         CERROR("can't send to %s. no NI on %s\n",
1917                libcfs_nid2str(sd->sd_dst_nid),
1918                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
1919
1920         return -EHOSTUNREACH;
1921 }
1922
1923 struct lnet_ni *
1924 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
1925                               struct lnet_peer *peer,
1926                               struct lnet_peer_net *peer_net,
1927                               int cpt,
1928                               bool incr_seq)
1929 {
1930         struct lnet_net *local_net;
1931         struct lnet_ni *best_ni;
1932
1933         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
1934         if (!local_net)
1935                 return NULL;
1936
1937         /*
1938          * Iterate through the NIs in this local Net and select
1939          * the NI to send from. The selection is determined by
1940          * these 3 criterion in the following priority:
1941          *      1. NUMA
1942          *      2. NI available credits
1943          *      3. Round Robin
1944          */
1945         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
1946                                    peer, peer_net, cpt);
1947
1948         if (incr_seq && best_ni)
1949                 best_ni->ni_seq++;
1950
1951         return best_ni;
1952 }
1953
1954 static int
1955 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni,
1956                              struct lnet_msg *msg, lnet_nid_t rtr_nid,
1957                              int cpt)
1958 {
1959         struct lnet_peer *peer;
1960         lnet_nid_t primary_nid;
1961         int rc;
1962
1963         lnet_peer_ni_addref_locked(lpni);
1964
1965         peer = lpni->lpni_peer_net->lpn_peer;
1966
1967         if (lnet_peer_gw_discovery(peer)) {
1968                 lnet_peer_ni_decref_locked(lpni);
1969                 return 0;
1970         }
1971
1972         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
1973                 lnet_peer_ni_decref_locked(lpni);
1974                 return 0;
1975         }
1976
1977         rc = lnet_discover_peer_locked(lpni, cpt, false);
1978         if (rc) {
1979                 lnet_peer_ni_decref_locked(lpni);
1980                 return rc;
1981         }
1982         /* The peer may have changed. */
1983         peer = lpni->lpni_peer_net->lpn_peer;
1984         spin_lock(&peer->lp_lock);
1985         if (lnet_peer_is_uptodate_locked(peer)) {
1986                 spin_unlock(&peer->lp_lock);
1987                 lnet_peer_ni_decref_locked(lpni);
1988                 return 0;
1989         }
1990         /* queue message and return */
1991         msg->msg_rtr_nid_param = rtr_nid;
1992         msg->msg_sending = 0;
1993         msg->msg_txpeer = NULL;
1994         list_add_tail(&msg->msg_list, &peer->lp_dc_pendq);
1995         primary_nid = peer->lp_primary_nid;
1996         spin_unlock(&peer->lp_lock);
1997
1998         lnet_peer_ni_decref_locked(lpni);
1999
2000         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2001                 msg, libcfs_nid2str(primary_nid));
2002
2003         return LNET_DC_WAIT;
2004 }
2005
2006 static int
2007 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2008                              lnet_nid_t dst_nid,
2009                              struct lnet_peer_ni **gw_lpni,
2010                              struct lnet_peer **gw_peer)
2011 {
2012         int rc;
2013         __u32 local_lnet;
2014         struct lnet_peer *gw;
2015         struct lnet_peer *lp;
2016         struct lnet_peer_net *lpn;
2017         struct lnet_peer_net *best_lpn = NULL;
2018         struct lnet_remotenet *rnet, *best_rnet = NULL;
2019         struct lnet_route *best_route = NULL;
2020         struct lnet_route *last_route = NULL;
2021         struct lnet_peer_ni *lpni = NULL;
2022         struct lnet_peer_ni *gwni = NULL;
2023         lnet_nid_t src_nid = sd->sd_src_nid;
2024
2025         /* If a router nid was specified then we are replying to a GET or
2026          * sending an ACK. In this case we use the gateway associated with the
2027          * specified router nid.
2028          */
2029         if (sd->sd_rtr_nid != LNET_NID_ANY) {
2030                 gwni = lnet_find_peer_ni_locked(sd->sd_rtr_nid);
2031                 if (!gwni) {
2032                         CERROR("No peer NI for gateway %s\n",
2033                                libcfs_nid2str(sd->sd_rtr_nid));
2034                         return -EHOSTUNREACH;
2035                 }
2036                 gw = gwni->lpni_peer_net->lpn_peer;
2037                 lnet_peer_ni_decref_locked(gwni);
2038                 local_lnet = LNET_NIDNET(sd->sd_rtr_nid);
2039         } else {
2040                 /* we've already looked up the initial lpni using dst_nid */
2041                 lpni = sd->sd_best_lpni;
2042                 /* the peer tree must be in existence */
2043                 LASSERT(lpni && lpni->lpni_peer_net &&
2044                         lpni->lpni_peer_net->lpn_peer);
2045                 lp = lpni->lpni_peer_net->lpn_peer;
2046
2047                 list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2048                         /* is this remote network reachable?  */
2049                         rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2050                         if (!rnet)
2051                                 continue;
2052
2053                         if (!best_lpn) {
2054                                 best_lpn = lpn;
2055                                 best_rnet = rnet;
2056                         }
2057
2058                         if (best_lpn->lpn_seq <= lpn->lpn_seq)
2059                                 continue;
2060
2061                         best_lpn = lpn;
2062                         best_rnet = rnet;
2063                 }
2064
2065                 if (!best_lpn) {
2066                         CERROR("peer %s has no available nets\n",
2067                                libcfs_nid2str(sd->sd_dst_nid));
2068                         return -EHOSTUNREACH;
2069                 }
2070
2071                 sd->sd_best_lpni = lnet_find_best_lpni_on_net(sd->sd_best_ni,
2072                                                               sd->sd_dst_nid,
2073                                                               lp,
2074                                                               best_lpn->lpn_net_id);
2075                 if (!sd->sd_best_lpni) {
2076                         CERROR("peer %s down\n",
2077                                libcfs_nid2str(sd->sd_dst_nid));
2078                         return -EHOSTUNREACH;
2079                 }
2080
2081                 best_route = lnet_find_route_locked(best_rnet,
2082                                                     LNET_NIDNET(src_nid),
2083                                                     &last_route, &gwni);
2084                 if (!best_route) {
2085                         CERROR("no route to %s from %s\n",
2086                                libcfs_nid2str(dst_nid),
2087                                libcfs_nid2str(src_nid));
2088                         return -EHOSTUNREACH;
2089                 }
2090
2091                 if (!gwni) {
2092                         CERROR("Internal Error. Route expected to %s from %s\n",
2093                                libcfs_nid2str(dst_nid),
2094                                libcfs_nid2str(src_nid));
2095                         return -EFAULT;
2096                 }
2097
2098                 gw = best_route->lr_gateway;
2099                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2100                 local_lnet = best_route->lr_lnet;
2101
2102         }
2103
2104         /*
2105          * Discover this gateway if it hasn't already been discovered.
2106          * This means we might delay the message until discovery has
2107          * completed
2108          */
2109         sd->sd_msg->msg_src_nid_param = sd->sd_src_nid;
2110         rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_rtr_nid,
2111                                           sd->sd_cpt);
2112         if (rc)
2113                 return rc;
2114
2115         if (!sd->sd_best_ni)
2116                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw,
2117                                         lnet_peer_get_net_locked(gw,
2118                                                                  local_lnet),
2119                                         sd->sd_md_cpt,
2120                                         true);
2121
2122         if (!sd->sd_best_ni) {
2123                 CERROR("Internal Error. Expected local ni on %s but non found :%s\n",
2124                        libcfs_net2str(local_lnet),
2125                        libcfs_nid2str(sd->sd_src_nid));
2126                 return -EFAULT;
2127         }
2128
2129         *gw_lpni = gwni;
2130         *gw_peer = gw;
2131
2132         /*
2133          * increment the sequence numbers since now we're sure we're
2134          * going to use this path
2135          */
2136         if (sd->sd_rtr_nid == LNET_NID_ANY) {
2137                 LASSERT(best_route && last_route);
2138                 best_route->lr_seq = last_route->lr_seq + 1;
2139                 best_lpn->lpn_seq++;
2140         }
2141
2142         return 0;
2143 }
2144
2145 /*
2146  * Handle two cases:
2147  *
2148  * Case 1:
2149  *  Source specified
2150  *  Remote destination
2151  *  Non-MR destination
2152  *
2153  * Case 2:
2154  *  Source specified
2155  *  Remote destination
2156  *  MR destination
2157  *
2158  * The handling of these two cases is similar. Even though the destination
2159  * can be MR or non-MR, we'll deal directly with the router.
2160  */
2161 static int
2162 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2163 {
2164         int rc;
2165         struct lnet_peer_ni *gw_lpni = NULL;
2166         struct lnet_peer *gw_peer = NULL;
2167
2168         /* find local NI */
2169         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
2170         if (!sd->sd_best_ni) {
2171                 CERROR("Can't send to %s: src %s is not a "
2172                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
2173                                 libcfs_nid2str(sd->sd_src_nid));
2174                 return -EINVAL;
2175         }
2176
2177         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2178                                      &gw_peer);
2179         if (rc)
2180                 return rc;
2181
2182         if (sd->sd_send_case & NMR_DST)
2183                 /*
2184                 * since the final destination is non-MR let's set its preferred
2185                 * NID before we send
2186                 */
2187                 lnet_set_non_mr_pref_nid(sd);
2188
2189         /*
2190          * We're going to send to the gw found so let's set its
2191          * info
2192          */
2193         sd->sd_peer = gw_peer;
2194         sd->sd_best_lpni = gw_lpni;
2195
2196         return lnet_handle_send(sd);
2197 }
2198
2199 struct lnet_ni *
2200 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2201                                bool discovery)
2202 {
2203         struct lnet_peer_net *peer_net = NULL;
2204         struct lnet_ni *best_ni = NULL;
2205
2206         /*
2207          * The peer can have multiple interfaces, some of them can be on
2208          * the local network and others on a routed network. We should
2209          * prefer the local network. However if the local network is not
2210          * available then we need to try the routed network
2211          */
2212
2213         /* go through all the peer nets and find the best_ni */
2214         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
2215                 /*
2216                  * The peer's list of nets can contain non-local nets. We
2217                  * want to only examine the local ones.
2218                  */
2219                 if (!lnet_get_net_locked(peer_net->lpn_net_id))
2220                         continue;
2221                 best_ni = lnet_find_best_ni_on_spec_net(best_ni, peer,
2222                                                    peer_net, md_cpt, false);
2223
2224                 /*
2225                  * if this is a discovery message and lp_disc_net_id is
2226                  * specified then use that net to send the discovery on.
2227                  */
2228                 if (peer->lp_disc_net_id == peer_net->lpn_net_id &&
2229                     discovery)
2230                         break;
2231         }
2232
2233         if (best_ni)
2234                 /* increment sequence number so we can round robin */
2235                 best_ni->ni_seq++;
2236
2237         return best_ni;
2238 }
2239
2240 static struct lnet_ni *
2241 lnet_find_existing_preferred_best_ni(struct lnet_send_data *sd)
2242 {
2243         struct lnet_ni *best_ni = NULL;
2244         struct lnet_peer_net *peer_net;
2245         struct lnet_peer *peer = sd->sd_peer;
2246         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
2247         struct lnet_peer_ni *lpni;
2248         int cpt = sd->sd_cpt;
2249
2250         /*
2251          * We must use a consistent source address when sending to a
2252          * non-MR peer. However, a non-MR peer can have multiple NIDs
2253          * on multiple networks, and we may even need to talk to this
2254          * peer on multiple networks -- certain types of
2255          * load-balancing configuration do this.
2256          *
2257          * So we need to pick the NI the peer prefers for this
2258          * particular network.
2259          */
2260
2261         /* Get the target peer_ni */
2262         peer_net = lnet_peer_get_net_locked(peer,
2263                         LNET_NIDNET(best_lpni->lpni_nid));
2264         LASSERT(peer_net != NULL);
2265         list_for_each_entry(lpni, &peer_net->lpn_peer_nis,
2266                                 lpni_peer_nis) {
2267                 if (lpni->lpni_pref_nnids == 0)
2268                         continue;
2269                 LASSERT(lpni->lpni_pref_nnids == 1);
2270                 best_ni = lnet_nid2ni_locked(
2271                                 lpni->lpni_pref.nid, cpt);
2272                 break;
2273         }
2274
2275         return best_ni;
2276 }
2277
2278 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2279 static int
2280 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2281 {
2282         struct lnet_ni *best_ni = NULL;
2283         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
2284
2285         /*
2286          * We must use a consistent source address when sending to a
2287          * non-MR peer. However, a non-MR peer can have multiple NIDs
2288          * on multiple networks, and we may even need to talk to this
2289          * peer on multiple networks -- certain types of
2290          * load-balancing configuration do this.
2291          *
2292          * So we need to pick the NI the peer prefers for this
2293          * particular network.
2294          */
2295
2296         best_ni = lnet_find_existing_preferred_best_ni(sd);
2297
2298         /* if best_ni is still not set just pick one */
2299         if (!best_ni) {
2300                 best_ni =
2301                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2302                                                 sd->sd_best_lpni->lpni_peer_net,
2303                                                 sd->sd_md_cpt, true);
2304                 /* If there is no best_ni we don't have a route */
2305                 if (!best_ni) {
2306                         CERROR("no path to %s from net %s\n",
2307                                 libcfs_nid2str(best_lpni->lpni_nid),
2308                                 libcfs_net2str(best_lpni->lpni_net->net_id));
2309                         return -EHOSTUNREACH;
2310                 }
2311         }
2312
2313         sd->sd_best_ni = best_ni;
2314
2315         /* Set preferred NI if necessary. */
2316         lnet_set_non_mr_pref_nid(sd);
2317
2318         return 0;
2319 }
2320
2321
2322 /*
2323  * Source not specified
2324  * Local destination
2325  * Non-MR Peer
2326  *
2327  * always use the same source NID for NMR peers
2328  * If we've talked to that peer before then we already have a preferred
2329  * source NI associated with it. Otherwise, we select a preferred local NI
2330  * and store it in the peer
2331  */
2332 static int
2333 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2334 {
2335         int rc;
2336
2337         /* sd->sd_best_lpni is already set to the final destination */
2338
2339         /*
2340          * At this point we should've created the peer ni and peer. If we
2341          * can't find it, then something went wrong. Instead of assert
2342          * output a relevant message and fail the send
2343          */
2344         if (!sd->sd_best_lpni) {
2345                 CERROR("Internal fault. Unable to send msg %s to %s. "
2346                        "NID not known\n",
2347                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2348                        libcfs_nid2str(sd->sd_dst_nid));
2349                 return -EFAULT;
2350         }
2351
2352         rc = lnet_select_preferred_best_ni(sd);
2353         if (!rc)
2354                 rc = lnet_handle_send(sd);
2355
2356         return rc;
2357 }
2358
2359 static int
2360 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2361 {
2362         /*
2363          * NOTE we've already handled the remote peer case. So we only
2364          * need to worry about the local case here.
2365          *
2366          * if we're sending a response, ACK or reply, we need to send it
2367          * to the destination NID given to us. At this point we already
2368          * have the peer_ni we're suppose to send to, so just find the
2369          * best_ni on the peer net and use that. Since we're sending to an
2370          * MR peer then we can just run the selection algorithm on our
2371          * local NIs and pick the best one.
2372          */
2373         if (sd->sd_send_case & SND_RESP) {
2374                 sd->sd_best_ni =
2375                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2376                                                 sd->sd_best_lpni->lpni_peer_net,
2377                                                 sd->sd_md_cpt, true);
2378
2379                 if (!sd->sd_best_ni) {
2380                         /*
2381                          * We're not going to deal with not able to send
2382                          * a response to the provided final destination
2383                          */
2384                         CERROR("Can't send response to %s. "
2385                                "No local NI available\n",
2386                                 libcfs_nid2str(sd->sd_dst_nid));
2387                         return -EHOSTUNREACH;
2388                 }
2389
2390                 return lnet_handle_send(sd);
2391         }
2392
2393         /*
2394          * If we get here that means we're sending a fresh request, PUT or
2395          * GET, so we need to run our standard selection algorithm.
2396          * First find the best local interface that's on any of the peer's
2397          * networks.
2398          */
2399         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2400                                         sd->sd_md_cpt,
2401                                         lnet_msg_discovery(sd->sd_msg));
2402         if (sd->sd_best_ni) {
2403                 sd->sd_best_lpni =
2404                   lnet_find_best_lpni_on_net(sd->sd_best_ni, sd->sd_dst_nid,
2405                                              sd->sd_peer,
2406                                              sd->sd_best_ni->ni_net->net_id);
2407
2408                 /*
2409                  * if we're successful in selecting a peer_ni on the local
2410                  * network, then send to it. Otherwise fall through and
2411                  * try and see if we can reach it over another routed
2412                  * network
2413                  */
2414                 if (sd->sd_best_lpni &&
2415                     sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid) {
2416                         /*
2417                          * in case we initially started with a routed
2418                          * destination, let's reset to local
2419                          */
2420                         sd->sd_send_case &= ~REMOTE_DST;
2421                         sd->sd_send_case |= LOCAL_DST;
2422                         return lnet_handle_lo_send(sd);
2423                 } else if (sd->sd_best_lpni) {
2424                         /*
2425                          * in case we initially started with a routed
2426                          * destination, let's reset to local
2427                          */
2428                         sd->sd_send_case &= ~REMOTE_DST;
2429                         sd->sd_send_case |= LOCAL_DST;
2430                         return lnet_handle_send(sd);
2431                 }
2432
2433                 CERROR("Internal Error. Expected to have a best_lpni: "
2434                        "%s -> %s\n",
2435                        libcfs_nid2str(sd->sd_src_nid),
2436                        libcfs_nid2str(sd->sd_dst_nid));
2437
2438                 return -EFAULT;
2439         }
2440
2441         /*
2442          * Peer doesn't have a local network. Let's see if there is
2443          * a remote network we can reach it on.
2444          */
2445         return PASS_THROUGH;
2446 }
2447
2448 /*
2449  * Case 1:
2450  *      Source NID not specified
2451  *      Local destination
2452  *      MR peer
2453  *
2454  * Case 2:
2455  *      Source NID not speified
2456  *      Remote destination
2457  *      MR peer
2458  *
2459  * In both of these cases if we're sending a response, ACK or REPLY, then
2460  * we need to send to the destination NID provided.
2461  *
2462  * In the remote case let's deal with MR routers.
2463  *
2464  */
2465
2466 static int
2467 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2468 {
2469         int rc = 0;
2470         struct lnet_peer *gw_peer = NULL;
2471         struct lnet_peer_ni *gw_lpni = NULL;
2472
2473         /*
2474          * handle sending a response to a remote peer here so we don't
2475          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2476          */
2477         if (sd->sd_send_case & REMOTE_DST &&
2478             sd->sd_send_case & SND_RESP) {
2479                 struct lnet_peer_ni *gw;
2480                 struct lnet_peer *gw_peer;
2481
2482                 rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw,
2483                                                   &gw_peer);
2484                 if (rc < 0) {
2485                         CERROR("Can't send response to %s. "
2486                                "No route available\n",
2487                                 libcfs_nid2str(sd->sd_dst_nid));
2488                         return -EHOSTUNREACH;
2489                 } else if (rc > 0) {
2490                         return rc;
2491                 }
2492
2493                 sd->sd_best_lpni = gw;
2494                 sd->sd_peer = gw_peer;
2495
2496                 return lnet_handle_send(sd);
2497         }
2498
2499         /*
2500          * Even though the NID for the peer might not be on a local network,
2501          * since the peer is MR there could be other interfaces on the
2502          * local network. In that case we'd still like to prefer the local
2503          * network over the routed network. If we're unable to do that
2504          * then we select the best router among the different routed networks,
2505          * and if the router is MR then we can deal with it as such.
2506          */
2507         rc = lnet_handle_any_mr_dsta(sd);
2508         if (rc != PASS_THROUGH)
2509                 return rc;
2510
2511         /*
2512          * Now that we must route to the destination, we must consider the
2513          * MR case, where the destination has multiple interfaces, some of
2514          * which we can route to and others we do not. For this reason we
2515          * need to select the destination which we can route to and if
2516          * there are multiple, we need to round robin.
2517          */
2518         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2519                                           &gw_peer);
2520         if (rc)
2521                 return rc;
2522
2523         sd->sd_send_case &= ~LOCAL_DST;
2524         sd->sd_send_case |= REMOTE_DST;
2525
2526         sd->sd_peer = gw_peer;
2527         sd->sd_best_lpni = gw_lpni;
2528
2529         return lnet_handle_send(sd);
2530 }
2531
2532 /*
2533  * Source not specified
2534  * Remote destination
2535  * Non-MR peer
2536  *
2537  * Must send to the specified peer NID using the same source NID that
2538  * we've used before. If it's the first time to talk to that peer then
2539  * find the source NI and assign it as preferred to that peer
2540  */
2541 static int
2542 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2543 {
2544         int rc;
2545         struct lnet_peer_ni *gw_lpni = NULL;
2546         struct lnet_peer *gw_peer = NULL;
2547
2548         /*
2549          * Let's set if we have a preferred NI to talk to this NMR peer
2550          */
2551         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd);
2552
2553         /*
2554          * find the router and that'll find the best NI if we didn't find
2555          * it already.
2556          */
2557         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2558                                           &gw_peer);
2559         if (rc)
2560                 return rc;
2561
2562         /*
2563          * set the best_ni we've chosen as the preferred one for
2564          * this peer
2565          */
2566         lnet_set_non_mr_pref_nid(sd);
2567
2568         /* we'll be sending to the gw */
2569         sd->sd_best_lpni = gw_lpni;
2570         sd->sd_peer = gw_peer;
2571
2572         return lnet_handle_send(sd);
2573 }
2574
2575 static int
2576 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2577 {
2578         /*
2579          * turn off the SND_RESP bit.
2580          * It will be checked in the case handling
2581          */
2582         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2583
2584         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2585                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2586                 (send_case & SRC_SPEC) ? libcfs_nid2str(sd->sd_src_nid) : "",
2587                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2588                 libcfs_nid2str(sd->sd_dst_nid),
2589                 (send_case & LOCAL_DST) ? "local" : "routed");
2590
2591         switch (send_case) {
2592         /*
2593          * For all cases where the source is specified, we should always
2594          * use the destination NID, whether it's an MR destination or not,
2595          * since we're continuing a series of related messages for the
2596          * same RPC
2597          */
2598         case SRC_SPEC_LOCAL_NMR_DST:
2599                 return lnet_handle_spec_local_nmr_dst(sd);
2600         case SRC_SPEC_LOCAL_MR_DST:
2601                 return lnet_handle_spec_local_mr_dst(sd);
2602         case SRC_SPEC_ROUTER_NMR_DST:
2603         case SRC_SPEC_ROUTER_MR_DST:
2604                 return lnet_handle_spec_router_dst(sd);
2605         case SRC_ANY_LOCAL_NMR_DST:
2606                 return lnet_handle_any_local_nmr_dst(sd);
2607         case SRC_ANY_LOCAL_MR_DST:
2608         case SRC_ANY_ROUTER_MR_DST:
2609                 return lnet_handle_any_mr_dst(sd);
2610         case SRC_ANY_ROUTER_NMR_DST:
2611                 return lnet_handle_any_router_nmr_dst(sd);
2612         default:
2613                 CERROR("Unknown send case\n");
2614                 return -1;
2615         }
2616 }
2617
2618 static int
2619 lnet_select_pathway(lnet_nid_t src_nid, lnet_nid_t dst_nid,
2620                     struct lnet_msg *msg, lnet_nid_t rtr_nid)
2621 {
2622         struct lnet_peer_ni     *lpni;
2623         struct lnet_peer        *peer;
2624         struct lnet_send_data   send_data;
2625         int                     cpt, rc;
2626         int                     md_cpt;
2627         __u32                   send_case = 0;
2628
2629         memset(&send_data, 0, sizeof(send_data));
2630
2631         /*
2632          * get an initial CPT to use for locking. The idea here is not to
2633          * serialize the calls to select_pathway, so that as many
2634          * operations can run concurrently as possible. To do that we use
2635          * the CPT where this call is being executed. Later on when we
2636          * determine the CPT to use in lnet_message_commit, we switch the
2637          * lock and check if there was any configuration change.  If none,
2638          * then we proceed, if there is, then we restart the operation.
2639          */
2640         cpt = lnet_net_lock_current();
2641
2642         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
2643         if (md_cpt == CFS_CPT_ANY)
2644                 md_cpt = cpt;
2645
2646 again:
2647
2648         /*
2649          * If we're being asked to send to the loopback interface, there
2650          * is no need to go through any selection. We can just shortcut
2651          * the entire process and send over lolnd
2652          */
2653         send_data.sd_msg = msg;
2654         send_data.sd_cpt = cpt;
2655         if (LNET_NETTYP(LNET_NIDNET(dst_nid)) == LOLND) {
2656                 rc = lnet_handle_lo_send(&send_data);
2657                 lnet_net_unlock(cpt);
2658                 return rc;
2659         }
2660
2661         /*
2662          * find an existing peer_ni, or create one and mark it as having been
2663          * created due to network traffic. This call will create the
2664          * peer->peer_net->peer_ni tree.
2665          */
2666         lpni = lnet_nid2peerni_locked(dst_nid, LNET_NID_ANY, cpt);
2667         if (IS_ERR(lpni)) {
2668                 lnet_net_unlock(cpt);
2669                 return PTR_ERR(lpni);
2670         }
2671
2672         /*
2673          * Cache the original src_nid. If we need to resend the message
2674          * then we'll need to know whether the src_nid was originally
2675          * specified for this message. If it was originally specified,
2676          * then we need to keep using the same src_nid since it's
2677          * continuing the same sequence of messages.
2678          */
2679         msg->msg_src_nid_param = src_nid;
2680
2681         /*
2682          * If necessary, perform discovery on the peer that owns this peer_ni.
2683          * Note, this can result in the ownership of this peer_ni changing
2684          * to another peer object.
2685          */
2686         rc = lnet_initiate_peer_discovery(lpni, msg, rtr_nid, cpt);
2687         if (rc) {
2688                 lnet_peer_ni_decref_locked(lpni);
2689                 lnet_net_unlock(cpt);
2690                 return rc;
2691         }
2692         lnet_peer_ni_decref_locked(lpni);
2693
2694         peer = lpni->lpni_peer_net->lpn_peer;
2695
2696         /*
2697          * Identify the different send cases
2698          */
2699         if (src_nid == LNET_NID_ANY)
2700                 send_case |= SRC_ANY;
2701         else
2702                 send_case |= SRC_SPEC;
2703
2704         if (lnet_get_net_locked(LNET_NIDNET(dst_nid)))
2705                 send_case |= LOCAL_DST;
2706         else
2707                 send_case |= REMOTE_DST;
2708
2709         /*
2710          * if this is a non-MR peer or if we're recovering a peer ni then
2711          * let's consider this an NMR case so we can hit the destination
2712          * NID.
2713          */
2714         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery)
2715                 send_case |= NMR_DST;
2716         else
2717                 send_case |= MR_DST;
2718
2719         if (msg->msg_type == LNET_MSG_REPLY ||
2720             msg->msg_type == LNET_MSG_ACK)
2721                 send_case |= SND_RESP;
2722
2723         /* assign parameters to the send_data */
2724         send_data.sd_rtr_nid = rtr_nid;
2725         send_data.sd_src_nid = src_nid;
2726         send_data.sd_dst_nid = dst_nid;
2727         send_data.sd_best_lpni = lpni;
2728         /*
2729          * keep a pointer to the final destination in case we're going to
2730          * route, so we'll need to access it later
2731          */
2732         send_data.sd_final_dst_lpni = lpni;
2733         send_data.sd_peer = peer;
2734         send_data.sd_md_cpt = md_cpt;
2735         send_data.sd_send_case = send_case;
2736
2737         rc = lnet_handle_send_case_locked(&send_data);
2738
2739         /*
2740          * Update the local cpt since send_data.sd_cpt might've been
2741          * updated as a result of calling lnet_handle_send_case_locked().
2742          */
2743         cpt = send_data.sd_cpt;
2744
2745         if (rc == REPEAT_SEND)
2746                 goto again;
2747
2748         lnet_net_unlock(cpt);
2749
2750         return rc;
2751 }
2752
2753 int
2754 lnet_send(lnet_nid_t src_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid)
2755 {
2756         lnet_nid_t              dst_nid = msg->msg_target.nid;
2757         int                     rc;
2758
2759         /*
2760          * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
2761          * but we might want to use pre-determined router for ACK/REPLY
2762          * in the future
2763          */
2764         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
2765         LASSERT(msg->msg_txpeer == NULL);
2766         LASSERT(msg->msg_txni == NULL);
2767         LASSERT(!msg->msg_sending);
2768         LASSERT(!msg->msg_target_is_router);
2769         LASSERT(!msg->msg_receiving);
2770
2771         msg->msg_sending = 1;
2772
2773         LASSERT(!msg->msg_tx_committed);
2774
2775         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
2776         if (rc < 0) {
2777                 if (rc == -EHOSTUNREACH)
2778                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
2779                 else
2780                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
2781                 return rc;
2782         }
2783
2784         if (rc == LNET_CREDIT_OK)
2785                 lnet_ni_send(msg->msg_txni, msg);
2786
2787         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
2788         return 0;
2789 }
2790
2791 enum lnet_mt_event_type {
2792         MT_TYPE_LOCAL_NI = 0,
2793         MT_TYPE_PEER_NI
2794 };
2795
2796 struct lnet_mt_event_info {
2797         enum lnet_mt_event_type mt_type;
2798         lnet_nid_t mt_nid;
2799 };
2800
2801 /* called with res_lock held */
2802 void
2803 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
2804 {
2805         struct lnet_rsp_tracker *rspt;
2806
2807         /*
2808          * msg has a refcount on the MD so the MD is not going away.
2809          * The rspt queue for the cpt is protected by
2810          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
2811          */
2812         if (!md->md_rspt_ptr)
2813                 return;
2814
2815         rspt = md->md_rspt_ptr;
2816
2817         /* debug code */
2818         LASSERT(rspt->rspt_cpt == cpt);
2819
2820         md->md_rspt_ptr = NULL;
2821
2822         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
2823                 /*
2824                  * The monitor thread has invalidated this handle because the
2825                  * response timed out, but it failed to lookup the MD. That
2826                  * means this response tracker is on the zombie list. We can
2827                  * safely remove it under the resource lock (held by caller) and
2828                  * free the response tracker block.
2829                  */
2830                 list_del(&rspt->rspt_on_list);
2831                 lnet_rspt_free(rspt, cpt);
2832         } else {
2833                 /*
2834                  * invalidate the handle to indicate that a response has been
2835                  * received, which will then lead the monitor thread to clean up
2836                  * the rspt block.
2837                  */
2838                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
2839         }
2840 }
2841
2842 void
2843 lnet_clean_zombie_rstqs(void)
2844 {
2845         struct lnet_rsp_tracker *rspt, *tmp;
2846         int i;
2847
2848         cfs_cpt_for_each(i, lnet_cpt_table()) {
2849                 list_for_each_entry_safe(rspt, tmp,
2850                                          the_lnet.ln_mt_zombie_rstqs[i],
2851                                          rspt_on_list) {
2852                         list_del(&rspt->rspt_on_list);
2853                         lnet_rspt_free(rspt, i);
2854                 }
2855         }
2856
2857         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
2858 }
2859
2860 static void
2861 lnet_finalize_expired_responses(void)
2862 {
2863         struct lnet_libmd *md;
2864         struct lnet_rsp_tracker *rspt, *tmp;
2865         ktime_t now;
2866         int i;
2867
2868         if (the_lnet.ln_mt_rstq == NULL)
2869                 return;
2870
2871         cfs_cpt_for_each(i, lnet_cpt_table()) {
2872                 LIST_HEAD(local_queue);
2873
2874                 lnet_net_lock(i);
2875                 if (!the_lnet.ln_mt_rstq[i]) {
2876                         lnet_net_unlock(i);
2877                         continue;
2878                 }
2879                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
2880                 lnet_net_unlock(i);
2881
2882                 now = ktime_get();
2883
2884                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
2885                         /*
2886                          * The rspt mdh will be invalidated when a response
2887                          * is received or whenever we want to discard the
2888                          * block the monitor thread will walk the queue
2889                          * and clean up any rsts with an invalid mdh.
2890                          * The monitor thread will walk the queue until
2891                          * the first unexpired rspt block. This means that
2892                          * some rspt blocks which received their
2893                          * corresponding responses will linger in the
2894                          * queue until they are cleaned up eventually.
2895                          */
2896                         lnet_res_lock(i);
2897                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
2898                                 lnet_res_unlock(i);
2899                                 list_del(&rspt->rspt_on_list);
2900                                 lnet_rspt_free(rspt, i);
2901                                 continue;
2902                         }
2903
2904                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
2905                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
2906                                 struct lnet_peer_ni *lpni;
2907                                 lnet_nid_t nid;
2908
2909                                 md = lnet_handle2md(&rspt->rspt_mdh);
2910                                 if (!md) {
2911                                         /* MD has been queued for unlink, but
2912                                          * rspt hasn't been detached (Note we've
2913                                          * checked above that the rspt_mdh is
2914                                          * valid). Since we cannot lookup the MD
2915                                          * we're unable to detach the rspt
2916                                          * ourselves. Thus, move the rspt to the
2917                                          * zombie list where we'll wait for
2918                                          * either:
2919                                          *   1. The remaining operations on the
2920                                          *   MD to complete. In this case the
2921                                          *   final operation will result in
2922                                          *   lnet_msg_detach_md()->
2923                                          *   lnet_detach_rsp_tracker() where
2924                                          *   we will clean up this response
2925                                          *   tracker.
2926                                          *   2. LNet to shutdown. In this case
2927                                          *   we'll wait until after all LND Nets
2928                                          *   have shutdown and then we can
2929                                          *   safely free any remaining response
2930                                          *   tracker blocks on the zombie list.
2931                                          * Note: We need to hold the resource
2932                                          * lock when adding to the zombie list
2933                                          * because we may have concurrent access
2934                                          * with lnet_detach_rsp_tracker().
2935                                          */
2936                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
2937                                         list_move(&rspt->rspt_on_list,
2938                                                   the_lnet.ln_mt_zombie_rstqs[i]);
2939                                         lnet_res_unlock(i);
2940                                         continue;
2941                                 }
2942                                 LASSERT(md->md_rspt_ptr == rspt);
2943                                 md->md_rspt_ptr = NULL;
2944                                 lnet_res_unlock(i);
2945
2946                                 LNetMDUnlink(rspt->rspt_mdh);
2947
2948                                 nid = rspt->rspt_next_hop_nid;
2949
2950                                 list_del(&rspt->rspt_on_list);
2951                                 lnet_rspt_free(rspt, i);
2952
2953                                 /* If we're shutting down we just want to clean
2954                                  * up the rspt blocks
2955                                  */
2956                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
2957                                         continue;
2958
2959                                 lnet_net_lock(i);
2960                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
2961                                 lnet_net_unlock(i);
2962
2963                                 CDEBUG(D_NET,
2964                                        "Response timeout: md = %p: nid = %s\n",
2965                                        md, libcfs_nid2str(nid));
2966
2967                                 /*
2968                                  * If there is a timeout on the response
2969                                  * from the next hop decrement its health
2970                                  * value so that we don't use it
2971                                  */
2972                                 lnet_net_lock(0);
2973                                 lpni = lnet_find_peer_ni_locked(nid);
2974                                 if (lpni) {
2975                                         lnet_handle_remote_failure_locked(lpni);
2976                                         lnet_peer_ni_decref_locked(lpni);
2977                                 }
2978                                 lnet_net_unlock(0);
2979                         } else {
2980                                 lnet_res_unlock(i);
2981                                 break;
2982                         }
2983                 }
2984
2985                 if (!list_empty(&local_queue)) {
2986                         lnet_net_lock(i);
2987                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
2988                         lnet_net_unlock(i);
2989                 }
2990         }
2991 }
2992
2993 static void
2994 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
2995 {
2996         struct lnet_msg *msg;
2997
2998         while (!list_empty(resendq)) {
2999                 struct lnet_peer_ni *lpni;
3000
3001                 msg = list_entry(resendq->next, struct lnet_msg,
3002                                  msg_list);
3003
3004                 list_del_init(&msg->msg_list);
3005
3006                 lpni = lnet_find_peer_ni_locked(msg->msg_hdr.dest_nid);
3007                 if (!lpni) {
3008                         lnet_net_unlock(cpt);
3009                         CERROR("Expected that a peer is already created for %s\n",
3010                                libcfs_nid2str(msg->msg_hdr.dest_nid));
3011                         msg->msg_no_resend = true;
3012                         lnet_finalize(msg, -EFAULT);
3013                         lnet_net_lock(cpt);
3014                 } else {
3015                         struct lnet_peer *peer;
3016                         int rc;
3017                         lnet_nid_t src_nid = LNET_NID_ANY;
3018
3019                         /*
3020                          * if this message is not being routed and the
3021                          * peer is non-MR then we must use the same
3022                          * src_nid that was used in the original send.
3023                          * Otherwise if we're routing the message (IE
3024                          * we're a router) then we can use any of our
3025                          * local interfaces. It doesn't matter to the
3026                          * final destination.
3027                          */
3028                         peer = lpni->lpni_peer_net->lpn_peer;
3029                         if (!msg->msg_routing &&
3030                             !lnet_peer_is_multi_rail(peer))
3031                                 src_nid = le64_to_cpu(msg->msg_hdr.src_nid);
3032
3033                         /*
3034                          * If we originally specified a src NID, then we
3035                          * must attempt to reuse it in the resend as well.
3036                          */
3037                         if (msg->msg_src_nid_param != LNET_NID_ANY)
3038                                 src_nid = msg->msg_src_nid_param;
3039                         lnet_peer_ni_decref_locked(lpni);
3040
3041                         lnet_net_unlock(cpt);
3042                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3043                                libcfs_nid2str(src_nid),
3044                                libcfs_id2str(msg->msg_target),
3045                                lnet_msgtyp2str(msg->msg_type),
3046                                msg->msg_recovery,
3047                                msg->msg_retry_count);
3048                         rc = lnet_send(src_nid, msg, LNET_NID_ANY);
3049                         if (rc) {
3050                                 CERROR("Error sending %s to %s: %d\n",
3051                                        lnet_msgtyp2str(msg->msg_type),
3052                                        libcfs_id2str(msg->msg_target), rc);
3053                                 msg->msg_no_resend = true;
3054                                 lnet_finalize(msg, rc);
3055                         }
3056                         lnet_net_lock(cpt);
3057                         if (!rc)
3058                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3059                 }
3060         }
3061 }
3062
3063 static void
3064 lnet_resend_pending_msgs(void)
3065 {
3066         int i;
3067
3068         cfs_cpt_for_each(i, lnet_cpt_table()) {
3069                 lnet_net_lock(i);
3070                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3071                 lnet_net_unlock(i);
3072         }
3073 }
3074
3075 /* called with cpt and ni_lock held */
3076 static void
3077 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3078 {
3079         struct lnet_handle_md recovery_mdh;
3080
3081         LNetInvalidateMDHandle(&recovery_mdh);
3082
3083         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3084             force) {
3085                 recovery_mdh = ni->ni_ping_mdh;
3086                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3087         }
3088         lnet_ni_unlock(ni);
3089         lnet_net_unlock(cpt);
3090         if (!LNetMDHandleIsInvalid(recovery_mdh))
3091                 LNetMDUnlink(recovery_mdh);
3092         lnet_net_lock(cpt);
3093         lnet_ni_lock(ni);
3094 }
3095
3096 static void
3097 lnet_recover_local_nis(void)
3098 {
3099         struct lnet_mt_event_info *ev_info;
3100         LIST_HEAD(processed_list);
3101         LIST_HEAD(local_queue);
3102         struct lnet_handle_md mdh;
3103         struct lnet_ni *tmp;
3104         struct lnet_ni *ni;
3105         lnet_nid_t nid;
3106         int healthv;
3107         int rc;
3108
3109         /*
3110          * splice the recovery queue on a local queue. We will iterate
3111          * through the local queue and update it as needed. Once we're
3112          * done with the traversal, we'll splice the local queue back on
3113          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3114          * will be traversed in the next iteration.
3115          */
3116         lnet_net_lock(0);
3117         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3118                          &local_queue);
3119         lnet_net_unlock(0);
3120
3121         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3122                 /*
3123                  * if an NI is being deleted or it is now healthy, there
3124                  * is no need to keep it around in the recovery queue.
3125                  * The monitor thread is the only thread responsible for
3126                  * removing the NI from the recovery queue.
3127                  * Multiple threads can be adding NIs to the recovery
3128                  * queue.
3129                  */
3130                 healthv = atomic_read(&ni->ni_healthv);
3131
3132                 lnet_net_lock(0);
3133                 lnet_ni_lock(ni);
3134                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3135                     healthv == LNET_MAX_HEALTH_VALUE) {
3136                         list_del_init(&ni->ni_recovery);
3137                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3138                         lnet_ni_unlock(ni);
3139                         lnet_ni_decref_locked(ni, 0);
3140                         lnet_net_unlock(0);
3141                         continue;
3142                 }
3143
3144                 /*
3145                  * if the local NI failed recovery we must unlink the md.
3146                  * But we want to keep the local_ni on the recovery queue
3147                  * so we can continue the attempts to recover it.
3148                  */
3149                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3150                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3151                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3152                 }
3153
3154                 lnet_ni_unlock(ni);
3155                 lnet_net_unlock(0);
3156
3157
3158                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3159                        libcfs_nid2str(ni->ni_nid));
3160
3161                 lnet_ni_lock(ni);
3162                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3163                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3164                         lnet_ni_unlock(ni);
3165
3166                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3167                         if (!ev_info) {
3168                                 CERROR("out of memory. Can't recover %s\n",
3169                                        libcfs_nid2str(ni->ni_nid));
3170                                 lnet_ni_lock(ni);
3171                                 ni->ni_recovery_state &=
3172                                   ~LNET_NI_RECOVERY_PENDING;
3173                                 lnet_ni_unlock(ni);
3174                                 continue;
3175                         }
3176
3177                         mdh = ni->ni_ping_mdh;
3178                         /*
3179                          * Invalidate the ni mdh in case it's deleted.
3180                          * We'll unlink the mdh in this case below.
3181                          */
3182                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3183                         nid = ni->ni_nid;
3184
3185                         /*
3186                          * remove the NI from the local queue and drop the
3187                          * reference count to it while we're recovering
3188                          * it. The reason for that, is that the NI could
3189                          * be deleted, and the way the code is structured
3190                          * is if we don't drop the NI, then the deletion
3191                          * code will enter a loop waiting for the
3192                          * reference count to be removed while holding the
3193                          * ln_mutex_lock(). When we look up the peer to
3194                          * send to in lnet_select_pathway() we will try to
3195                          * lock the ln_mutex_lock() as well, leading to
3196                          * a deadlock. By dropping the refcount and
3197                          * removing it from the list, we allow for the NI
3198                          * to be removed, then we use the cached NID to
3199                          * look it up again. If it's gone, then we just
3200                          * continue examining the rest of the queue.
3201                          */
3202                         lnet_net_lock(0);
3203                         list_del_init(&ni->ni_recovery);
3204                         lnet_ni_decref_locked(ni, 0);
3205                         lnet_net_unlock(0);
3206
3207                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3208                         ev_info->mt_nid = nid;
3209                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3210                                             ev_info, the_lnet.ln_mt_eqh, true);
3211                         /* lookup the nid again */
3212                         lnet_net_lock(0);
3213                         ni = lnet_nid2ni_locked(nid, 0);
3214                         if (!ni) {
3215                                 /*
3216                                  * the NI has been deleted when we dropped
3217                                  * the ref count
3218                                  */
3219                                 lnet_net_unlock(0);
3220                                 LNetMDUnlink(mdh);
3221                                 continue;
3222                         }
3223                         /*
3224                          * Same note as in lnet_recover_peer_nis(). When
3225                          * we're sending the ping, the NI is free to be
3226                          * deleted or manipulated. By this point it
3227                          * could've been added back on the recovery queue,
3228                          * and a refcount taken on it.
3229                          * So we can't just add it blindly again or we'll
3230                          * corrupt the queue. We must check under lock if
3231                          * it's not on any list and if not then add it
3232                          * to the processed list, which will eventually be
3233                          * spliced back on to the recovery queue.
3234                          */
3235                         ni->ni_ping_mdh = mdh;
3236                         if (list_empty(&ni->ni_recovery)) {
3237                                 list_add_tail(&ni->ni_recovery, &processed_list);
3238                                 lnet_ni_addref_locked(ni, 0);
3239                         }
3240                         lnet_net_unlock(0);
3241
3242                         lnet_ni_lock(ni);
3243                         if (rc)
3244                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3245                 }
3246                 lnet_ni_unlock(ni);
3247         }
3248
3249         /*
3250          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3251          * reexamined in the next iteration.
3252          */
3253         list_splice_init(&processed_list, &local_queue);
3254         lnet_net_lock(0);
3255         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3256         lnet_net_unlock(0);
3257 }
3258
3259 static int
3260 lnet_resendqs_create(void)
3261 {
3262         struct list_head **resendqs;
3263         resendqs = lnet_create_array_of_queues();
3264
3265         if (!resendqs)
3266                 return -ENOMEM;
3267
3268         lnet_net_lock(LNET_LOCK_EX);
3269         the_lnet.ln_mt_resendqs = resendqs;
3270         lnet_net_unlock(LNET_LOCK_EX);
3271
3272         return 0;
3273 }
3274
3275 static void
3276 lnet_clean_local_ni_recoveryq(void)
3277 {
3278         struct lnet_ni *ni;
3279
3280         /* This is only called when the monitor thread has stopped */
3281         lnet_net_lock(0);
3282
3283         while (!list_empty(&the_lnet.ln_mt_localNIRecovq)) {
3284                 ni = list_entry(the_lnet.ln_mt_localNIRecovq.next,
3285                                 struct lnet_ni, ni_recovery);
3286                 list_del_init(&ni->ni_recovery);
3287                 lnet_ni_lock(ni);
3288                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3289                 lnet_ni_unlock(ni);
3290                 lnet_ni_decref_locked(ni, 0);
3291         }
3292
3293         lnet_net_unlock(0);
3294 }
3295
3296 static void
3297 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3298                                      bool force)
3299 {
3300         struct lnet_handle_md recovery_mdh;
3301
3302         LNetInvalidateMDHandle(&recovery_mdh);
3303
3304         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3305                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3306                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3307         }
3308         spin_unlock(&lpni->lpni_lock);
3309         lnet_net_unlock(cpt);
3310         if (!LNetMDHandleIsInvalid(recovery_mdh))
3311                 LNetMDUnlink(recovery_mdh);
3312         lnet_net_lock(cpt);
3313         spin_lock(&lpni->lpni_lock);
3314 }
3315
3316 static void
3317 lnet_clean_peer_ni_recoveryq(void)
3318 {
3319         struct lnet_peer_ni *lpni, *tmp;
3320
3321         lnet_net_lock(LNET_LOCK_EX);
3322
3323         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3324                                  lpni_recovery) {
3325                 list_del_init(&lpni->lpni_recovery);
3326                 spin_lock(&lpni->lpni_lock);
3327                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3328                 spin_unlock(&lpni->lpni_lock);
3329                 lnet_peer_ni_decref_locked(lpni);
3330         }
3331
3332         lnet_net_unlock(LNET_LOCK_EX);
3333 }
3334
3335 static void
3336 lnet_clean_resendqs(void)
3337 {
3338         struct lnet_msg *msg, *tmp;
3339         LIST_HEAD(msgs);
3340         int i;
3341
3342         cfs_cpt_for_each(i, lnet_cpt_table()) {
3343                 lnet_net_lock(i);
3344                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3345                 lnet_net_unlock(i);
3346                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3347                         list_del_init(&msg->msg_list);
3348                         msg->msg_no_resend = true;
3349                         lnet_finalize(msg, -ESHUTDOWN);
3350                 }
3351         }
3352
3353         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3354 }
3355
3356 static void
3357 lnet_recover_peer_nis(void)
3358 {
3359         struct lnet_mt_event_info *ev_info;
3360         LIST_HEAD(processed_list);
3361         LIST_HEAD(local_queue);
3362         struct lnet_handle_md mdh;
3363         struct lnet_peer_ni *lpni;
3364         struct lnet_peer_ni *tmp;
3365         lnet_nid_t nid;
3366         int healthv;
3367         int rc;
3368
3369         /*
3370          * Always use cpt 0 for locking across all interactions with
3371          * ln_mt_peerNIRecovq
3372          */
3373         lnet_net_lock(0);
3374         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3375                          &local_queue);
3376         lnet_net_unlock(0);
3377
3378         list_for_each_entry_safe(lpni, tmp, &local_queue,
3379                                  lpni_recovery) {
3380                 /*
3381                  * The same protection strategy is used here as is in the
3382                  * local recovery case.
3383                  */
3384                 lnet_net_lock(0);
3385                 healthv = atomic_read(&lpni->lpni_healthv);
3386                 spin_lock(&lpni->lpni_lock);
3387                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3388                     healthv == LNET_MAX_HEALTH_VALUE) {
3389                         list_del_init(&lpni->lpni_recovery);
3390                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3391                         spin_unlock(&lpni->lpni_lock);
3392                         lnet_peer_ni_decref_locked(lpni);
3393                         lnet_net_unlock(0);
3394                         continue;
3395                 }
3396
3397                 /*
3398                  * If the peer NI has failed recovery we must unlink the
3399                  * md. But we want to keep the peer ni on the recovery
3400                  * queue so we can try to continue recovering it
3401                  */
3402                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3403                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3404                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3405                 }
3406
3407                 spin_unlock(&lpni->lpni_lock);
3408                 lnet_net_unlock(0);
3409
3410                 /*
3411                  * NOTE: we're racing with peer deletion from user space.
3412                  * It's possible that a peer is deleted after we check its
3413                  * state. In this case the recovery can create a new peer
3414                  */
3415                 spin_lock(&lpni->lpni_lock);
3416                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3417                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3418                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3419                         spin_unlock(&lpni->lpni_lock);
3420
3421                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3422                         if (!ev_info) {
3423                                 CERROR("out of memory. Can't recover %s\n",
3424                                        libcfs_nid2str(lpni->lpni_nid));
3425                                 spin_lock(&lpni->lpni_lock);
3426                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3427                                 spin_unlock(&lpni->lpni_lock);
3428                                 continue;
3429                         }
3430
3431                         /* look at the comments in lnet_recover_local_nis() */
3432                         mdh = lpni->lpni_recovery_ping_mdh;
3433                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3434                         nid = lpni->lpni_nid;
3435                         lnet_net_lock(0);
3436                         list_del_init(&lpni->lpni_recovery);
3437                         lnet_peer_ni_decref_locked(lpni);
3438                         lnet_net_unlock(0);
3439
3440                         ev_info->mt_type = MT_TYPE_PEER_NI;
3441                         ev_info->mt_nid = nid;
3442                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3443                                             ev_info, the_lnet.ln_mt_eqh, true);
3444                         lnet_net_lock(0);
3445                         /*
3446                          * lnet_find_peer_ni_locked() grabs a refcount for
3447                          * us. No need to take it explicitly.
3448                          */
3449                         lpni = lnet_find_peer_ni_locked(nid);
3450                         if (!lpni) {
3451                                 lnet_net_unlock(0);
3452                                 LNetMDUnlink(mdh);
3453                                 continue;
3454                         }
3455
3456                         lpni->lpni_recovery_ping_mdh = mdh;
3457                         /*
3458                          * While we're unlocked the lpni could've been
3459                          * readded on the recovery queue. In this case we
3460                          * don't need to add it to the local queue, since
3461                          * it's already on there and the thread that added
3462                          * it would've incremented the refcount on the
3463                          * peer, which means we need to decref the refcount
3464                          * that was implicitly grabbed by find_peer_ni_locked.
3465                          * Otherwise, if the lpni is still not on
3466                          * the recovery queue, then we'll add it to the
3467                          * processed list.
3468                          */
3469                         if (list_empty(&lpni->lpni_recovery))
3470                                 list_add_tail(&lpni->lpni_recovery, &processed_list);
3471                         else
3472                                 lnet_peer_ni_decref_locked(lpni);
3473                         lnet_net_unlock(0);
3474
3475                         spin_lock(&lpni->lpni_lock);
3476                         if (rc)
3477                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3478                 }
3479                 spin_unlock(&lpni->lpni_lock);
3480         }
3481
3482         list_splice_init(&processed_list, &local_queue);
3483         lnet_net_lock(0);
3484         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3485         lnet_net_unlock(0);
3486 }
3487
3488 static int
3489 lnet_monitor_thread(void *arg)
3490 {
3491         time64_t recovery_timeout = 0;
3492         time64_t rsp_timeout = 0;
3493         int interval;
3494         time64_t now;
3495
3496         wait_for_completion(&the_lnet.ln_started);
3497         /*
3498          * The monitor thread takes care of the following:
3499          *  1. Checks the aliveness of routers
3500          *  2. Checks if there are messages on the resend queue to resend
3501          *     them.
3502          *  3. Check if there are any NIs on the local recovery queue and
3503          *     pings them
3504          *  4. Checks if there are any NIs on the remote recovery queue
3505          *     and pings them.
3506          */
3507         cfs_block_allsigs();
3508
3509         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
3510                 now = ktime_get_real_seconds();
3511
3512                 if (lnet_router_checker_active())
3513                         lnet_check_routers();
3514
3515                 lnet_resend_pending_msgs();
3516
3517                 if (now >= rsp_timeout) {
3518                         lnet_finalize_expired_responses();
3519                         rsp_timeout = now + (lnet_transaction_timeout / 2);
3520                 }
3521
3522                 if (now >= recovery_timeout) {
3523                         lnet_recover_local_nis();
3524                         lnet_recover_peer_nis();
3525                         recovery_timeout = now + lnet_recovery_interval;
3526                 }
3527
3528                 /*
3529                  * TODO do we need to check if we should sleep without
3530                  * timeout?  Technically, an active system will always
3531                  * have messages in flight so this check will always
3532                  * evaluate to false. And on an idle system do we care
3533                  * if we wake up every 1 second? Although, we've seen
3534                  * cases where we get a complaint that an idle thread
3535                  * is waking up unnecessarily.
3536                  *
3537                  * Take into account the current net_count when you wake
3538                  * up for alive router checking, since we need to check
3539                  * possibly as many networks as we have configured.
3540                  */
3541                 interval = min(lnet_recovery_interval,
3542                                min((unsigned int) alive_router_check_interval /
3543                                         lnet_current_net_count,
3544                                    lnet_transaction_timeout / 2));
3545                 wait_for_completion_interruptible_timeout(
3546                         &the_lnet.ln_mt_wait_complete,
3547                         cfs_time_seconds(interval));
3548                 /* Must re-init the completion before testing anything,
3549                  * including ln_mt_state.
3550                  */
3551                 reinit_completion(&the_lnet.ln_mt_wait_complete);
3552         }
3553
3554         /* Shutting down */
3555         lnet_net_lock(LNET_LOCK_EX);
3556         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3557         lnet_net_unlock(LNET_LOCK_EX);
3558
3559         /* signal that the monitor thread is exiting */
3560         up(&the_lnet.ln_mt_signal);
3561
3562         return 0;
3563 }
3564
3565 /*
3566  * lnet_send_ping
3567  * Sends a ping.
3568  * Returns == 0 if success
3569  * Returns > 0 if LNetMDBind or prior fails
3570  * Returns < 0 if LNetGet fails
3571  */
3572 int
3573 lnet_send_ping(lnet_nid_t dest_nid,
3574                struct lnet_handle_md *mdh, int nnis,
3575                void *user_data, struct lnet_handle_eq eqh, bool recovery)
3576 {
3577         struct lnet_md md = { NULL };
3578         struct lnet_process_id id;
3579         struct lnet_ping_buffer *pbuf;
3580         int rc;
3581
3582         if (dest_nid == LNET_NID_ANY) {
3583                 rc = -EHOSTUNREACH;
3584                 goto fail_error;
3585         }
3586
3587         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
3588         if (!pbuf) {
3589                 rc = ENOMEM;
3590                 goto fail_error;
3591         }
3592
3593         /* initialize md content */
3594         md.start     = &pbuf->pb_info;
3595         md.length    = LNET_PING_INFO_SIZE(nnis);
3596         md.threshold = 2; /* GET/REPLY */
3597         md.max_size  = 0;
3598         md.options   = LNET_MD_TRUNCATE;
3599         md.user_ptr  = user_data;
3600         md.eq_handle = eqh;
3601
3602         rc = LNetMDBind(md, LNET_UNLINK, mdh);
3603         if (rc) {
3604                 lnet_ping_buffer_decref(pbuf);
3605                 CERROR("Can't bind MD: %d\n", rc);
3606                 rc = -rc; /* change the rc to positive */
3607                 goto fail_error;
3608         }
3609         id.pid = LNET_PID_LUSTRE;
3610         id.nid = dest_nid;
3611
3612         rc = LNetGet(LNET_NID_ANY, *mdh, id,
3613                      LNET_RESERVED_PORTAL,
3614                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
3615
3616         if (rc)
3617                 goto fail_unlink_md;
3618
3619         return 0;
3620
3621 fail_unlink_md:
3622         LNetMDUnlink(*mdh);
3623         LNetInvalidateMDHandle(mdh);
3624 fail_error:
3625         return rc;
3626 }
3627
3628 static void
3629 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
3630                            int status, bool unlink_event)
3631 {
3632         lnet_nid_t nid = ev_info->mt_nid;
3633
3634         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
3635                 struct lnet_ni *ni;
3636
3637                 lnet_net_lock(0);
3638                 ni = lnet_nid2ni_locked(nid, 0);
3639                 if (!ni) {
3640                         lnet_net_unlock(0);
3641                         return;
3642                 }
3643                 lnet_ni_lock(ni);
3644                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3645                 if (status)
3646                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
3647                 lnet_ni_unlock(ni);
3648                 lnet_net_unlock(0);
3649
3650                 if (status != 0) {
3651                         CERROR("local NI (%s) recovery failed with %d\n",
3652                                libcfs_nid2str(nid), status);
3653                         return;
3654                 }
3655                 /*
3656                  * need to increment healthv for the ni here, because in
3657                  * the lnet_finalize() path we don't have access to this
3658                  * NI. And in order to get access to it, we'll need to
3659                  * carry forward too much information.
3660                  * In the peer case, it'll naturally be incremented
3661                  */
3662                 if (!unlink_event)
3663                         lnet_inc_healthv(&ni->ni_healthv);
3664         } else {
3665                 struct lnet_peer_ni *lpni;
3666                 int cpt;
3667
3668                 cpt = lnet_net_lock_current();
3669                 lpni = lnet_find_peer_ni_locked(nid);
3670                 if (!lpni) {
3671                         lnet_net_unlock(cpt);
3672                         return;
3673                 }
3674                 spin_lock(&lpni->lpni_lock);
3675                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3676                 if (status)
3677                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
3678                 spin_unlock(&lpni->lpni_lock);
3679                 lnet_peer_ni_decref_locked(lpni);
3680                 lnet_net_unlock(cpt);
3681
3682                 if (status != 0)
3683                         CERROR("peer NI (%s) recovery failed with %d\n",
3684                                libcfs_nid2str(nid), status);
3685         }
3686 }
3687
3688 void
3689 lnet_mt_event_handler(struct lnet_event *event)
3690 {
3691         struct lnet_mt_event_info *ev_info = event->md.user_ptr;
3692         struct lnet_ping_buffer *pbuf;
3693
3694         /* TODO: remove assert */
3695         LASSERT(event->type == LNET_EVENT_REPLY ||
3696                 event->type == LNET_EVENT_SEND ||
3697                 event->type == LNET_EVENT_UNLINK);
3698
3699         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
3700                event->status);
3701
3702         switch (event->type) {
3703         case LNET_EVENT_UNLINK:
3704                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
3705                        libcfs_nid2str(ev_info->mt_nid));
3706                 /* fallthrough */
3707         case LNET_EVENT_REPLY:
3708                 lnet_handle_recovery_reply(ev_info, event->status,
3709                                            event->type == LNET_EVENT_UNLINK);
3710                 break;
3711         case LNET_EVENT_SEND:
3712                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
3713                                libcfs_nid2str(ev_info->mt_nid),
3714                                (event->status) ? "unsuccessfully" :
3715                                "successfully", event->status);
3716                 break;
3717         default:
3718                 CERROR("Unexpected event: %d\n", event->type);
3719                 break;
3720         }
3721         if (event->unlinked) {
3722                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
3723                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md.start);
3724                 lnet_ping_buffer_decref(pbuf);
3725         }
3726 }
3727
3728 static int
3729 lnet_rsp_tracker_create(void)
3730 {
3731         struct list_head **rstqs;
3732         rstqs = lnet_create_array_of_queues();
3733
3734         if (!rstqs)
3735                 return -ENOMEM;
3736
3737         the_lnet.ln_mt_rstq = rstqs;
3738
3739         return 0;
3740 }
3741
3742 static void
3743 lnet_rsp_tracker_clean(void)
3744 {
3745         lnet_finalize_expired_responses();
3746
3747         cfs_percpt_free(the_lnet.ln_mt_rstq);
3748         the_lnet.ln_mt_rstq = NULL;
3749 }
3750
3751 int lnet_monitor_thr_start(void)
3752 {
3753         int rc = 0;
3754         struct task_struct *task;
3755
3756         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
3757                 return -EALREADY;
3758
3759         rc = lnet_resendqs_create();
3760         if (rc)
3761                 return rc;
3762
3763         rc = lnet_rsp_tracker_create();
3764         if (rc)
3765                 goto clean_queues;
3766
3767         sema_init(&the_lnet.ln_mt_signal, 0);
3768
3769         lnet_net_lock(LNET_LOCK_EX);
3770         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
3771         lnet_net_unlock(LNET_LOCK_EX);
3772         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
3773         if (IS_ERR(task)) {
3774                 rc = PTR_ERR(task);
3775                 CERROR("Can't start monitor thread: %d\n", rc);
3776                 goto clean_thread;
3777         }
3778
3779         return 0;
3780
3781 clean_thread:
3782         lnet_net_lock(LNET_LOCK_EX);
3783         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
3784         lnet_net_unlock(LNET_LOCK_EX);
3785         /* block until event callback signals exit */
3786         down(&the_lnet.ln_mt_signal);
3787         /* clean up */
3788         lnet_net_lock(LNET_LOCK_EX);
3789         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3790         lnet_net_unlock(LNET_LOCK_EX);
3791         lnet_rsp_tracker_clean();
3792         lnet_clean_local_ni_recoveryq();
3793         lnet_clean_peer_ni_recoveryq();
3794         lnet_clean_resendqs();
3795         LNetInvalidateEQHandle(&the_lnet.ln_mt_eqh);
3796         return rc;
3797 clean_queues:
3798         lnet_rsp_tracker_clean();
3799         lnet_clean_local_ni_recoveryq();
3800         lnet_clean_peer_ni_recoveryq();
3801         lnet_clean_resendqs();
3802         return rc;
3803 }
3804
3805 void lnet_monitor_thr_stop(void)
3806 {
3807         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3808                 return;
3809
3810         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
3811         lnet_net_lock(LNET_LOCK_EX);
3812         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
3813         lnet_net_unlock(LNET_LOCK_EX);
3814
3815         /* tell the monitor thread that we're shutting down */
3816         complete(&the_lnet.ln_mt_wait_complete);
3817
3818         /* block until monitor thread signals that it's done */
3819         down(&the_lnet.ln_mt_signal);
3820         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
3821
3822         /* perform cleanup tasks */
3823         lnet_rsp_tracker_clean();
3824         lnet_clean_local_ni_recoveryq();
3825         lnet_clean_peer_ni_recoveryq();
3826         lnet_clean_resendqs();
3827 }
3828
3829 void
3830 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
3831                   __u32 msg_type)
3832 {
3833         lnet_net_lock(cpt);
3834         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
3835         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
3836         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
3837         lnet_net_unlock(cpt);
3838
3839         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
3840 }
3841
3842 static void
3843 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
3844 {
3845         struct lnet_hdr *hdr = &msg->msg_hdr;
3846
3847         if (msg->msg_wanted != 0)
3848                 lnet_setpayloadbuffer(msg);
3849
3850         lnet_build_msg_event(msg, LNET_EVENT_PUT);
3851
3852         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
3853          * it back into the ACK during lnet_finalize() */
3854         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
3855                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
3856
3857         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
3858                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
3859 }
3860
3861 static int
3862 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
3863 {
3864         struct lnet_hdr         *hdr = &msg->msg_hdr;
3865         struct lnet_match_info  info;
3866         int                     rc;
3867         bool                    ready_delay;
3868
3869         /* Convert put fields to host byte order */
3870         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
3871         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
3872         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
3873
3874         /* Primary peer NID. */
3875         info.mi_id.nid  = msg->msg_initiator;
3876         info.mi_id.pid  = hdr->src_pid;
3877         info.mi_opc     = LNET_MD_OP_PUT;
3878         info.mi_portal  = hdr->msg.put.ptl_index;
3879         info.mi_rlength = hdr->payload_length;
3880         info.mi_roffset = hdr->msg.put.offset;
3881         info.mi_mbits   = hdr->msg.put.match_bits;
3882         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
3883
3884         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
3885         ready_delay = msg->msg_rx_ready_delay;
3886
3887  again:
3888         rc = lnet_ptl_match_md(&info, msg);
3889         switch (rc) {
3890         default:
3891                 LBUG();
3892
3893         case LNET_MATCHMD_OK:
3894                 lnet_recv_put(ni, msg);
3895                 return 0;
3896
3897         case LNET_MATCHMD_NONE:
3898                 if (ready_delay)
3899                         /* no eager_recv or has already called it, should
3900                          * have been attached on delayed list */
3901                         return 0;
3902
3903                 rc = lnet_ni_eager_recv(ni, msg);
3904                 if (rc == 0) {
3905                         ready_delay = true;
3906                         goto again;
3907                 }
3908                 /* fall through */
3909
3910         case LNET_MATCHMD_DROP:
3911                 CNETERR("Dropping PUT from %s portal %d match %llu"
3912                         " offset %d length %d: %d\n",
3913                         libcfs_id2str(info.mi_id), info.mi_portal,
3914                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
3915
3916                 return -ENOENT; /* -ve: OK but no match */
3917         }
3918 }
3919
3920 static int
3921 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
3922 {
3923         struct lnet_match_info info;
3924         struct lnet_hdr *hdr = &msg->msg_hdr;
3925         struct lnet_process_id source_id;
3926         struct lnet_handle_wire reply_wmd;
3927         int rc;
3928
3929         /* Convert get fields to host byte order */
3930         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
3931         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
3932         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
3933         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
3934
3935         source_id.nid = hdr->src_nid;
3936         source_id.pid = hdr->src_pid;
3937         /* Primary peer NID */
3938         info.mi_id.nid  = msg->msg_initiator;
3939         info.mi_id.pid  = hdr->src_pid;
3940         info.mi_opc     = LNET_MD_OP_GET;
3941         info.mi_portal  = hdr->msg.get.ptl_index;
3942         info.mi_rlength = hdr->msg.get.sink_length;
3943         info.mi_roffset = hdr->msg.get.src_offset;
3944         info.mi_mbits   = hdr->msg.get.match_bits;
3945         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
3946
3947         rc = lnet_ptl_match_md(&info, msg);
3948         if (rc == LNET_MATCHMD_DROP) {
3949                 CNETERR("Dropping GET from %s portal %d match %llu"
3950                         " offset %d length %d\n",
3951                         libcfs_id2str(info.mi_id), info.mi_portal,
3952                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
3953                 return -ENOENT; /* -ve: OK but no match */
3954         }
3955
3956         LASSERT(rc == LNET_MATCHMD_OK);
3957
3958         lnet_build_msg_event(msg, LNET_EVENT_GET);
3959
3960         reply_wmd = hdr->msg.get.return_wmd;
3961
3962         lnet_prep_send(msg, LNET_MSG_REPLY, source_id,
3963                        msg->msg_offset, msg->msg_wanted);
3964
3965         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
3966
3967         if (rdma_get) {
3968                 /* The LND completes the REPLY from her recv procedure */
3969                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
3970                              msg->msg_offset, msg->msg_len, msg->msg_len);
3971                 return 0;
3972         }
3973
3974         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
3975         msg->msg_receiving = 0;
3976
3977         rc = lnet_send(ni->ni_nid, msg, msg->msg_from);
3978         if (rc < 0) {
3979                 /* didn't get as far as lnet_ni_send() */
3980                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
3981                        libcfs_nid2str(ni->ni_nid),
3982                        libcfs_id2str(info.mi_id), rc);
3983
3984                 lnet_finalize(msg, rc);
3985         }
3986
3987         return 0;
3988 }
3989
3990 static int
3991 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
3992 {
3993         void *private = msg->msg_private;
3994         struct lnet_hdr *hdr = &msg->msg_hdr;
3995         struct lnet_process_id src = {0};
3996         struct lnet_libmd *md;
3997         unsigned int rlength;
3998         unsigned int mlength;
3999         int cpt;
4000
4001         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4002         lnet_res_lock(cpt);
4003
4004         src.nid = hdr->src_nid;
4005         src.pid = hdr->src_pid;
4006
4007         /* NB handles only looked up by creator (no flips) */
4008         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4009         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4010                 CNETERR("%s: Dropping REPLY from %s for %s "
4011                         "MD %#llx.%#llx\n",
4012                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4013                         (md == NULL) ? "invalid" : "inactive",
4014                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4015                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4016                 if (md != NULL && md->md_me != NULL)
4017                         CERROR("REPLY MD also attached to portal %d\n",
4018                                md->md_me->me_portal);
4019
4020                 lnet_res_unlock(cpt);
4021                 return -ENOENT; /* -ve: OK but no match */
4022         }
4023
4024         LASSERT(md->md_offset == 0);
4025
4026         rlength = hdr->payload_length;
4027         mlength = min(rlength, md->md_length);
4028
4029         if (mlength < rlength &&
4030             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4031                 CNETERR("%s: Dropping REPLY from %s length %d "
4032                         "for MD %#llx would overflow (%d)\n",
4033                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4034                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4035                         mlength);
4036                 lnet_res_unlock(cpt);
4037                 return -ENOENT; /* -ve: OK but no match */
4038         }
4039
4040         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4041                libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4042                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4043
4044         lnet_msg_attach_md(msg, md, 0, mlength);
4045
4046         if (mlength != 0)
4047                 lnet_setpayloadbuffer(msg);
4048
4049         lnet_res_unlock(cpt);
4050
4051         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4052
4053         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4054         return 0;
4055 }
4056
4057 static int
4058 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4059 {
4060         struct lnet_hdr *hdr = &msg->msg_hdr;
4061         struct lnet_process_id src = {0};
4062         struct lnet_libmd *md;
4063         int cpt;
4064
4065         src.nid = hdr->src_nid;
4066         src.pid = hdr->src_pid;
4067
4068         /* Convert ack fields to host byte order */
4069         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4070         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4071
4072         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4073         lnet_res_lock(cpt);
4074
4075         /* NB handles only looked up by creator (no flips) */
4076         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4077         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4078                 /* Don't moan; this is expected */
4079                 CDEBUG(D_NET,
4080                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4081                        libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4082                        (md == NULL) ? "invalid" : "inactive",
4083                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4084                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4085                 if (md != NULL && md->md_me != NULL)
4086                         CERROR("Source MD also attached to portal %d\n",
4087                                md->md_me->me_portal);
4088
4089                 lnet_res_unlock(cpt);
4090                 return -ENOENT;                  /* -ve! */
4091         }
4092
4093         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4094                libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4095                hdr->msg.ack.dst_wmd.wh_object_cookie);
4096
4097         lnet_msg_attach_md(msg, md, 0, 0);
4098
4099         lnet_res_unlock(cpt);
4100
4101         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4102
4103         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4104         return 0;
4105 }
4106
4107 /**
4108  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4109  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4110  * \retval -ve                  error code
4111  */
4112 int
4113 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4114 {
4115         int     rc = 0;
4116
4117         if (!the_lnet.ln_routing)
4118                 return -ECANCELED;
4119
4120         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4121             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4122                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4123                         msg->msg_rx_ready_delay = 1;
4124                 } else {
4125                         lnet_net_unlock(msg->msg_rx_cpt);
4126                         rc = lnet_ni_eager_recv(ni, msg);
4127                         lnet_net_lock(msg->msg_rx_cpt);
4128                 }
4129         }
4130
4131         if (rc == 0)
4132                 rc = lnet_post_routed_recv_locked(msg, 0);
4133         return rc;
4134 }
4135
4136 int
4137 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4138 {
4139         int     rc;
4140
4141         switch (msg->msg_type) {
4142         case LNET_MSG_ACK:
4143                 rc = lnet_parse_ack(ni, msg);
4144                 break;
4145         case LNET_MSG_PUT:
4146                 rc = lnet_parse_put(ni, msg);
4147                 break;
4148         case LNET_MSG_GET:
4149                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4150                 break;
4151         case LNET_MSG_REPLY:
4152                 rc = lnet_parse_reply(ni, msg);
4153                 break;
4154         default: /* prevent an unused label if !kernel */
4155                 LASSERT(0);
4156                 return -EPROTO;
4157         }
4158
4159         LASSERT(rc == 0 || rc == -ENOENT);
4160         return rc;
4161 }
4162
4163 char *
4164 lnet_msgtyp2str (int type)
4165 {
4166         switch (type) {
4167         case LNET_MSG_ACK:
4168                 return ("ACK");
4169         case LNET_MSG_PUT:
4170                 return ("PUT");
4171         case LNET_MSG_GET:
4172                 return ("GET");
4173         case LNET_MSG_REPLY:
4174                 return ("REPLY");
4175         case LNET_MSG_HELLO:
4176                 return ("HELLO");
4177         default:
4178                 return ("<UNKNOWN>");
4179         }
4180 }
4181
4182 void
4183 lnet_print_hdr(struct lnet_hdr *hdr)
4184 {
4185         struct lnet_process_id src = {
4186                 .nid = hdr->src_nid,
4187                 .pid = hdr->src_pid,
4188         };
4189         struct lnet_process_id dst = {
4190                 .nid = hdr->dest_nid,
4191                 .pid = hdr->dest_pid,
4192         };
4193         char *type_str = lnet_msgtyp2str(hdr->type);
4194
4195         CWARN("P3 Header at %p of type %s\n", hdr, type_str);
4196         CWARN("    From %s\n", libcfs_id2str(src));
4197         CWARN("    To   %s\n", libcfs_id2str(dst));
4198
4199         switch (hdr->type) {
4200         default:
4201                 break;
4202
4203         case LNET_MSG_PUT:
4204                 CWARN("    Ptl index %d, ack md %#llx.%#llx, "
4205                       "match bits %llu\n",
4206                       hdr->msg.put.ptl_index,
4207                       hdr->msg.put.ack_wmd.wh_interface_cookie,
4208                       hdr->msg.put.ack_wmd.wh_object_cookie,
4209                       hdr->msg.put.match_bits);
4210                 CWARN("    Length %d, offset %d, hdr data %#llx\n",
4211                       hdr->payload_length, hdr->msg.put.offset,
4212                       hdr->msg.put.hdr_data);
4213                 break;
4214
4215         case LNET_MSG_GET:
4216                 CWARN("    Ptl index %d, return md %#llx.%#llx, "
4217                       "match bits %llu\n", hdr->msg.get.ptl_index,
4218                       hdr->msg.get.return_wmd.wh_interface_cookie,
4219                       hdr->msg.get.return_wmd.wh_object_cookie,
4220                       hdr->msg.get.match_bits);
4221                 CWARN("    Length %d, src offset %d\n",
4222                       hdr->msg.get.sink_length,
4223                       hdr->msg.get.src_offset);
4224                 break;
4225
4226         case LNET_MSG_ACK:
4227                 CWARN("    dst md %#llx.%#llx, "
4228                       "manipulated length %d\n",
4229                       hdr->msg.ack.dst_wmd.wh_interface_cookie,
4230                       hdr->msg.ack.dst_wmd.wh_object_cookie,
4231                       hdr->msg.ack.mlength);
4232                 break;
4233
4234         case LNET_MSG_REPLY:
4235                 CWARN("    dst md %#llx.%#llx, "
4236                       "length %d\n",
4237                       hdr->msg.reply.dst_wmd.wh_interface_cookie,
4238                       hdr->msg.reply.dst_wmd.wh_object_cookie,
4239                       hdr->payload_length);
4240         }
4241
4242 }
4243
4244 int
4245 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
4246            void *private, int rdma_req)
4247 {
4248         struct lnet_peer_ni *lpni;
4249         struct lnet_msg *msg;
4250         __u32 payload_length;
4251         lnet_pid_t dest_pid;
4252         lnet_nid_t dest_nid;
4253         lnet_nid_t src_nid;
4254         bool push = false;
4255         int for_me;
4256         __u32 type;
4257         int rc = 0;
4258         int cpt;
4259
4260         LASSERT (!in_interrupt ());
4261
4262         type = le32_to_cpu(hdr->type);
4263         src_nid = le64_to_cpu(hdr->src_nid);
4264         dest_nid = le64_to_cpu(hdr->dest_nid);
4265         dest_pid = le32_to_cpu(hdr->dest_pid);
4266         payload_length = le32_to_cpu(hdr->payload_length);
4267
4268         for_me = (ni->ni_nid == dest_nid);
4269         cpt = lnet_cpt_of_nid(from_nid, ni);
4270
4271         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4272                 libcfs_nid2str(dest_nid),
4273                 libcfs_nid2str(ni->ni_nid),
4274                 libcfs_nid2str(src_nid),
4275                 lnet_msgtyp2str(type),
4276                 (for_me) ? "for me" : "routed");
4277
4278         switch (type) {
4279         case LNET_MSG_ACK:
4280         case LNET_MSG_GET:
4281                 if (payload_length > 0) {
4282                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4283                                libcfs_nid2str(from_nid),
4284                                libcfs_nid2str(src_nid),
4285                                lnet_msgtyp2str(type), payload_length);
4286                         return -EPROTO;
4287                 }
4288                 break;
4289
4290         case LNET_MSG_PUT:
4291         case LNET_MSG_REPLY:
4292                 if (payload_length >
4293                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4294                         CERROR("%s, src %s: bad %s payload %d "
4295                                "(%d max expected)\n",
4296                                libcfs_nid2str(from_nid),
4297                                libcfs_nid2str(src_nid),
4298                                lnet_msgtyp2str(type),
4299                                payload_length,
4300                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4301                         return -EPROTO;
4302                 }
4303                 break;
4304
4305         default:
4306                 CERROR("%s, src %s: Bad message type 0x%x\n",
4307                        libcfs_nid2str(from_nid),
4308                        libcfs_nid2str(src_nid), type);
4309                 return -EPROTO;
4310         }
4311
4312         if (the_lnet.ln_routing &&
4313             ni->ni_net->net_last_alive != ktime_get_real_seconds()) {
4314                 lnet_ni_lock(ni);
4315                 spin_lock(&ni->ni_net->net_lock);
4316                 ni->ni_net->net_last_alive = ktime_get_real_seconds();
4317                 spin_unlock(&ni->ni_net->net_lock);
4318                 if (ni->ni_status != NULL &&
4319                     ni->ni_status->ns_status == LNET_NI_STATUS_DOWN) {
4320                         ni->ni_status->ns_status = LNET_NI_STATUS_UP;
4321                         push = true;
4322                 }
4323                 lnet_ni_unlock(ni);
4324         }
4325
4326         if (push)
4327                 lnet_push_update_to_peers(1);
4328
4329         /* Regard a bad destination NID as a protocol error.  Senders should
4330          * know what they're doing; if they don't they're misconfigured, buggy
4331          * or malicious so we chop them off at the knees :) */
4332
4333         if (!for_me) {
4334                 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
4335                         /* should have gone direct */
4336                         CERROR("%s, src %s: Bad dest nid %s "
4337                                "(should have been sent direct)\n",
4338                                 libcfs_nid2str(from_nid),
4339                                 libcfs_nid2str(src_nid),
4340                                 libcfs_nid2str(dest_nid));
4341                         return -EPROTO;
4342                 }
4343
4344                 if (lnet_islocalnid(dest_nid)) {
4345                         /* dest is another local NI; sender should have used
4346                          * this node's NID on its own network */
4347                         CERROR("%s, src %s: Bad dest nid %s "
4348                                "(it's my nid but on a different network)\n",
4349                                 libcfs_nid2str(from_nid),
4350                                 libcfs_nid2str(src_nid),
4351                                 libcfs_nid2str(dest_nid));
4352                         return -EPROTO;
4353                 }
4354
4355                 if (rdma_req && type == LNET_MSG_GET) {
4356                         CERROR("%s, src %s: Bad optimized GET for %s "
4357                                "(final destination must be me)\n",
4358                                 libcfs_nid2str(from_nid),
4359                                 libcfs_nid2str(src_nid),
4360                                 libcfs_nid2str(dest_nid));
4361                         return -EPROTO;
4362                 }
4363
4364                 if (!the_lnet.ln_routing) {
4365                         CERROR("%s, src %s: Dropping message for %s "
4366                                "(routing not enabled)\n",
4367                                 libcfs_nid2str(from_nid),
4368                                 libcfs_nid2str(src_nid),
4369                                 libcfs_nid2str(dest_nid));
4370                         goto drop;
4371                 }
4372         }
4373
4374         /* Message looks OK; we're not going to return an error, so we MUST
4375          * call back lnd_recv() come what may... */
4376
4377         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4378             fail_peer(src_nid, 0)) {                    /* shall we now? */
4379                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4380                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4381                        lnet_msgtyp2str(type));
4382                 goto drop;
4383         }
4384
4385         if (!list_empty(&the_lnet.ln_drop_rules) &&
4386             lnet_drop_rule_match(hdr, ni->ni_nid, NULL)) {
4387                 CDEBUG(D_NET,
4388                        "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
4389                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4390                        libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
4391                 goto drop;
4392         }
4393
4394         if (lnet_drop_asym_route && for_me &&
4395             LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
4396                 struct lnet_net *net;
4397                 struct lnet_remotenet *rnet;
4398                 bool found = true;
4399
4400                 /* we are dealing with a routed message,
4401                  * so see if route to reach src_nid goes through from_nid
4402                  */
4403                 lnet_net_lock(cpt);
4404                 net = lnet_get_net_locked(LNET_NIDNET(ni->ni_nid));
4405                 if (!net) {
4406                         lnet_net_unlock(cpt);
4407                         CERROR("net %s not found\n",
4408                                libcfs_net2str(LNET_NIDNET(ni->ni_nid)));
4409                         return -EPROTO;
4410                 }
4411
4412                 rnet = lnet_find_rnet_locked(LNET_NIDNET(src_nid));
4413                 if (rnet) {
4414                         struct lnet_peer *gw = NULL;
4415                         struct lnet_peer_ni *lpni = NULL;
4416                         struct lnet_route *route;
4417
4418                         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
4419                                 found = false;
4420                                 gw = route->lr_gateway;
4421                                 if (route->lr_lnet != net->net_id)
4422                                         continue;
4423                                 /*
4424                                  * if the nid is one of the gateway's NIDs
4425                                  * then this is a valid gateway
4426                                  */
4427                                 while ((lpni = lnet_get_next_peer_ni_locked(gw,
4428                                                 NULL, lpni)) != NULL) {
4429                                         if (lpni->lpni_nid == from_nid) {
4430                                                 found = true;
4431                                                 break;
4432                                         }
4433                                 }
4434                         }
4435                 }
4436                 lnet_net_unlock(cpt);
4437                 if (!found) {
4438                         /* we would not use from_nid to route a message to
4439                          * src_nid
4440                          * => asymmetric routing detected but forbidden
4441                          */
4442                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4443                                libcfs_nid2str(from_nid),
4444                                libcfs_nid2str(src_nid), lnet_msgtyp2str(type));
4445                         goto drop;
4446                 }
4447         }
4448
4449         msg = lnet_msg_alloc();
4450         if (msg == NULL) {
4451                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4452                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4453                        lnet_msgtyp2str(type));
4454                 goto drop;
4455         }
4456
4457         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4458          * pointers NULL etc */
4459
4460         msg->msg_type = type;
4461         msg->msg_private = private;
4462         msg->msg_receiving = 1;
4463         msg->msg_rdma_get = rdma_req;
4464         msg->msg_len = msg->msg_wanted = payload_length;
4465         msg->msg_offset = 0;
4466         msg->msg_hdr = *hdr;
4467         /* for building message event */
4468         msg->msg_from = from_nid;
4469         if (!for_me) {
4470                 msg->msg_target.pid     = dest_pid;
4471                 msg->msg_target.nid     = dest_nid;
4472                 msg->msg_routing        = 1;
4473
4474         } else {
4475                 /* convert common msg->hdr fields to host byteorder */
4476                 msg->msg_hdr.type       = type;
4477                 msg->msg_hdr.src_nid    = src_nid;
4478                 msg->msg_hdr.src_pid    = le32_to_cpu(msg->msg_hdr.src_pid);
4479                 msg->msg_hdr.dest_nid   = dest_nid;
4480                 msg->msg_hdr.dest_pid   = dest_pid;
4481                 msg->msg_hdr.payload_length = payload_length;
4482         }
4483
4484         lnet_net_lock(cpt);
4485         lpni = lnet_nid2peerni_locked(from_nid, ni->ni_nid, cpt);
4486         if (IS_ERR(lpni)) {
4487                 lnet_net_unlock(cpt);
4488                 CERROR("%s, src %s: Dropping %s "
4489                        "(error %ld looking up sender)\n",
4490                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4491                        lnet_msgtyp2str(type), PTR_ERR(lpni));
4492                 lnet_msg_free(msg);
4493                 if (rc == -ESHUTDOWN)
4494                         /* We are shutting down.  Don't do anything more */
4495                         return 0;
4496                 goto drop;
4497         }
4498
4499         if (the_lnet.ln_routing)
4500                 lpni->lpni_last_alive = ktime_get_seconds();
4501
4502         msg->msg_rxpeer = lpni;
4503         msg->msg_rxni = ni;
4504         lnet_ni_addref_locked(ni, cpt);
4505         /* Multi-Rail: Primary NID of source. */
4506         msg->msg_initiator = lnet_peer_primary_nid_locked(src_nid);
4507
4508         /*
4509          * mark the status of this lpni as UP since we received a message
4510          * from it. The ping response reports back the ns_status which is
4511          * marked on the remote as up or down and we cache it here.
4512          */
4513         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
4514
4515         lnet_msg_commit(msg, cpt);
4516
4517         /* message delay simulation */
4518         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
4519                      lnet_delay_rule_match_locked(hdr, msg))) {
4520                 lnet_net_unlock(cpt);
4521                 return 0;
4522         }
4523
4524         if (!for_me) {
4525                 rc = lnet_parse_forward_locked(ni, msg);
4526                 lnet_net_unlock(cpt);
4527
4528                 if (rc < 0)
4529                         goto free_drop;
4530
4531                 if (rc == LNET_CREDIT_OK) {
4532                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
4533                                      0, payload_length, payload_length);
4534                 }
4535                 return 0;
4536         }
4537
4538         lnet_net_unlock(cpt);
4539
4540         rc = lnet_parse_local(ni, msg);
4541         if (rc != 0)
4542                 goto free_drop;
4543         return 0;
4544
4545  free_drop:
4546         LASSERT(msg->msg_md == NULL);
4547         lnet_finalize(msg, rc);
4548
4549  drop:
4550         lnet_drop_message(ni, cpt, private, payload_length, type);
4551         return 0;
4552 }
4553 EXPORT_SYMBOL(lnet_parse);
4554
4555 void
4556 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
4557 {
4558         while (!list_empty(head)) {
4559                 struct lnet_process_id id = {0};
4560                 struct lnet_msg *msg;
4561
4562                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4563                 list_del(&msg->msg_list);
4564
4565                 id.nid = msg->msg_hdr.src_nid;
4566                 id.pid = msg->msg_hdr.src_pid;
4567
4568                 LASSERT(msg->msg_md == NULL);
4569                 LASSERT(msg->msg_rx_delayed);
4570                 LASSERT(msg->msg_rxpeer != NULL);
4571                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4572
4573                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
4574                       " offset %d length %d: %s\n",
4575                       libcfs_id2str(id),
4576                       msg->msg_hdr.msg.put.ptl_index,
4577                       msg->msg_hdr.msg.put.match_bits,
4578                       msg->msg_hdr.msg.put.offset,
4579                       msg->msg_hdr.payload_length, reason);
4580
4581                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
4582                  * called lnet_drop_message(), so I just hang onto msg as well
4583                  * until that's done */
4584
4585                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
4586                                   msg->msg_private, msg->msg_len,
4587                                   msg->msg_type);
4588
4589                 msg->msg_no_resend = true;
4590                 /*
4591                  * NB: message will not generate event because w/o attached MD,
4592                  * but we still should give error code so lnet_msg_decommit()
4593                  * can skip counters operations and other checks.
4594                  */
4595                 lnet_finalize(msg, -ENOENT);
4596         }
4597 }
4598
4599 void
4600 lnet_recv_delayed_msg_list(struct list_head *head)
4601 {
4602         while (!list_empty(head)) {
4603                 struct lnet_msg *msg;
4604                 struct lnet_process_id id;
4605
4606                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4607                 list_del(&msg->msg_list);
4608
4609                 /* md won't disappear under me, since each msg
4610                  * holds a ref on it */
4611
4612                 id.nid = msg->msg_hdr.src_nid;
4613                 id.pid = msg->msg_hdr.src_pid;
4614
4615                 LASSERT(msg->msg_rx_delayed);
4616                 LASSERT(msg->msg_md != NULL);
4617                 LASSERT(msg->msg_rxpeer != NULL);
4618                 LASSERT(msg->msg_rxni != NULL);
4619                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4620
4621                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
4622                        "match %llu offset %d length %d.\n",
4623                         libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
4624                         msg->msg_hdr.msg.put.match_bits,
4625                         msg->msg_hdr.msg.put.offset,
4626                         msg->msg_hdr.payload_length);
4627
4628                 lnet_recv_put(msg->msg_rxni, msg);
4629         }
4630 }
4631
4632 static void
4633 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
4634                         struct lnet_libmd *md, struct lnet_handle_md mdh)
4635 {
4636         s64 timeout_ns;
4637         struct lnet_rsp_tracker *local_rspt;
4638
4639         /*
4640          * MD has a refcount taken by message so it's not going away.
4641          * The MD however can be looked up. We need to secure the access
4642          * to the md_rspt_ptr by taking the res_lock.
4643          * The rspt can be accessed without protection up to when it gets
4644          * added to the list.
4645          */
4646
4647         lnet_res_lock(cpt);
4648         local_rspt = md->md_rspt_ptr;
4649         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
4650         if (local_rspt != NULL) {
4651                 /*
4652                  * we already have an rspt attached to the md, so we'll
4653                  * update the deadline on that one.
4654                  */
4655                 lnet_rspt_free(rspt, cpt);
4656         } else {
4657                 /* new md */
4658                 rspt->rspt_mdh = mdh;
4659                 rspt->rspt_cpt = cpt;
4660                 /* store the rspt so we can access it when we get the REPLY */
4661                 md->md_rspt_ptr = rspt;
4662                 local_rspt = rspt;
4663         }
4664         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
4665
4666         /*
4667          * add to the list of tracked responses. It's added to tail of the
4668          * list in order to expire all the older entries first.
4669          */
4670         lnet_net_lock(cpt);
4671         list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
4672         lnet_net_unlock(cpt);
4673         lnet_res_unlock(cpt);
4674 }
4675
4676 /**
4677  * Initiate an asynchronous PUT operation.
4678  *
4679  * There are several events associated with a PUT: completion of the send on
4680  * the initiator node (LNET_EVENT_SEND), and when the send completes
4681  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
4682  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
4683  * used at the target node to indicate the completion of incoming data
4684  * delivery.
4685  *
4686  * The local events will be logged in the EQ associated with the MD pointed to
4687  * by \a mdh handle. Using a MD without an associated EQ results in these
4688  * events being discarded. In this case, the caller must have another
4689  * mechanism (e.g., a higher level protocol) for determining when it is safe
4690  * to modify the memory region associated with the MD.
4691  *
4692  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
4693  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
4694  *
4695  * \param self Indicates the NID of a local interface through which to send
4696  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
4697  * \param mdh A handle for the MD that describes the memory to be sent. The MD
4698  * must be "free floating" (See LNetMDBind()).
4699  * \param ack Controls whether an acknowledgment is requested.
4700  * Acknowledgments are only sent when they are requested by the initiating
4701  * process and the target MD enables them.
4702  * \param target A process identifier for the target process.
4703  * \param portal The index in the \a target's portal table.
4704  * \param match_bits The match bits to use for MD selection at the target
4705  * process.
4706  * \param offset The offset into the target MD (only used when the target
4707  * MD has the LNET_MD_MANAGE_REMOTE option set).
4708  * \param hdr_data 64 bits of user data that can be included in the message
4709  * header. This data is written to an event queue entry at the target if an
4710  * EQ is present on the matching MD.
4711  *
4712  * \retval  0      Success, and only in this case events will be generated
4713  * and logged to EQ (if it exists).
4714  * \retval -EIO    Simulated failure.
4715  * \retval -ENOMEM Memory allocation failure.
4716  * \retval -ENOENT Invalid MD object.
4717  *
4718  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
4719  */
4720 int
4721 LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
4722         struct lnet_process_id target, unsigned int portal,
4723         __u64 match_bits, unsigned int offset,
4724         __u64 hdr_data)
4725 {
4726         struct lnet_msg *msg;
4727         struct lnet_libmd *md;
4728         int cpt;
4729         int rc;
4730         struct lnet_rsp_tracker *rspt = NULL;
4731
4732         LASSERT(the_lnet.ln_refcount > 0);
4733
4734         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4735             fail_peer(target.nid, 1)) {                 /* shall we now? */
4736                 CERROR("Dropping PUT to %s: simulated failure\n",
4737                        libcfs_id2str(target));
4738                 return -EIO;
4739         }
4740
4741         msg = lnet_msg_alloc();
4742         if (msg == NULL) {
4743                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
4744                        libcfs_id2str(target));
4745                 return -ENOMEM;
4746         }
4747         msg->msg_vmflush = !!memory_pressure_get();
4748
4749         cpt = lnet_cpt_of_cookie(mdh.cookie);
4750
4751         if (ack == LNET_ACK_REQ) {
4752                 rspt = lnet_rspt_alloc(cpt);
4753                 if (!rspt) {
4754                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
4755                                 libcfs_id2str(target));
4756                         return -ENOMEM;
4757                 }
4758                 INIT_LIST_HEAD(&rspt->rspt_on_list);
4759         }
4760
4761         lnet_res_lock(cpt);
4762
4763         md = lnet_handle2md(&mdh);
4764         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4765                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
4766                        match_bits, portal, libcfs_id2str(target),
4767                        md == NULL ? -1 : md->md_threshold);
4768                 if (md != NULL && md->md_me != NULL)
4769                         CERROR("Source MD also attached to portal %d\n",
4770                                md->md_me->me_portal);
4771                 lnet_res_unlock(cpt);
4772
4773                 lnet_rspt_free(rspt, cpt);
4774                 lnet_msg_free(msg);
4775                 return -ENOENT;
4776         }
4777
4778         CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
4779
4780         lnet_msg_attach_md(msg, md, 0, 0);
4781
4782         lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
4783
4784         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
4785         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
4786         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
4787         msg->msg_hdr.msg.put.hdr_data = hdr_data;
4788
4789         /* NB handles only looked up by creator (no flips) */
4790         if (ack == LNET_ACK_REQ) {
4791                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
4792                         the_lnet.ln_interface_cookie;
4793                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
4794                         md->md_lh.lh_cookie;
4795         } else {
4796                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
4797                         LNET_WIRE_HANDLE_COOKIE_NONE;
4798                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
4799                         LNET_WIRE_HANDLE_COOKIE_NONE;
4800         }
4801
4802         lnet_res_unlock(cpt);
4803
4804         lnet_build_msg_event(msg, LNET_EVENT_SEND);
4805
4806         if (ack == LNET_ACK_REQ)
4807                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
4808
4809         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
4810                                  CFS_FAIL_ONCE))
4811                 rc = -EIO;
4812         else
4813                 rc = lnet_send(self, msg, LNET_NID_ANY);
4814
4815         if (rc != 0) {
4816                 CNETERR("Error sending PUT to %s: %d\n",
4817                         libcfs_id2str(target), rc);
4818                 msg->msg_no_resend = true;
4819                 lnet_finalize(msg, rc);
4820         }
4821
4822         /* completion will be signalled by an event */
4823         return 0;
4824 }
4825 EXPORT_SYMBOL(LNetPut);
4826
4827 /*
4828  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
4829  * returns a msg for the LND to pass to lnet_finalize() when the sink
4830  * data has been received.
4831  *
4832  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
4833  * lnet_finalize() is called on it, so the LND must call this first
4834  */
4835 struct lnet_msg *
4836 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
4837 {
4838         struct lnet_msg *msg = lnet_msg_alloc();
4839         struct lnet_libmd *getmd = getmsg->msg_md;
4840         struct lnet_process_id peer_id = getmsg->msg_target;
4841         int cpt;
4842
4843         LASSERT(!getmsg->msg_target_is_router);
4844         LASSERT(!getmsg->msg_routing);
4845
4846         if (msg == NULL) {
4847                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
4848                        libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
4849                 goto drop;
4850         }
4851
4852         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
4853         lnet_res_lock(cpt);
4854
4855         LASSERT(getmd->md_refcount > 0);
4856
4857         if (getmd->md_threshold == 0) {
4858                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
4859                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
4860                         getmd);
4861                 lnet_res_unlock(cpt);
4862                 goto drop;
4863         }
4864
4865         LASSERT(getmd->md_offset == 0);
4866
4867         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
4868                libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
4869
4870         /* setup information for lnet_build_msg_event */
4871         msg->msg_initiator = getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
4872         msg->msg_from = peer_id.nid;
4873         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
4874         msg->msg_hdr.src_nid = peer_id.nid;
4875         msg->msg_hdr.payload_length = getmd->md_length;
4876         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
4877
4878         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
4879         lnet_res_unlock(cpt);
4880
4881         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
4882
4883         lnet_net_lock(cpt);
4884         lnet_msg_commit(msg, cpt);
4885         lnet_net_unlock(cpt);
4886
4887         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4888
4889         return msg;
4890
4891  drop:
4892         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
4893
4894         lnet_net_lock(cpt);
4895         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
4896         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4897         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
4898                 getmd->md_length;
4899         lnet_net_unlock(cpt);
4900
4901         if (msg != NULL)
4902                 lnet_msg_free(msg);
4903
4904         return NULL;
4905 }
4906 EXPORT_SYMBOL(lnet_create_reply_msg);
4907
4908 void
4909 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
4910                        unsigned int len)
4911 {
4912         /* Set the REPLY length, now the RDMA that elides the REPLY message has
4913          * completed and I know it. */
4914         LASSERT(reply != NULL);
4915         LASSERT(reply->msg_type == LNET_MSG_GET);
4916         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
4917
4918         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
4919          * the end of my buffer, I might as well be dead. */
4920         LASSERT(len <= reply->msg_ev.mlength);
4921
4922         reply->msg_ev.mlength = len;
4923 }
4924 EXPORT_SYMBOL(lnet_set_reply_msg_len);
4925
4926 /**
4927  * Initiate an asynchronous GET operation.
4928  *
4929  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
4930  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
4931  * the target node in the REPLY has been written to local MD.
4932  *
4933  * On the target node, an LNET_EVENT_GET is logged when the GET request
4934  * arrives and is accepted into a MD.
4935  *
4936  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
4937  * \param mdh A handle for the MD that describes the memory into which the
4938  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
4939  *
4940  * \retval  0      Success, and only in this case events will be generated
4941  * and logged to EQ (if it exists) of the MD.
4942  * \retval -EIO    Simulated failure.
4943  * \retval -ENOMEM Memory allocation failure.
4944  * \retval -ENOENT Invalid MD object.
4945  */
4946 int
4947 LNetGet(lnet_nid_t self, struct lnet_handle_md mdh,
4948         struct lnet_process_id target, unsigned int portal,
4949         __u64 match_bits, unsigned int offset, bool recovery)
4950 {
4951         struct lnet_msg *msg;
4952         struct lnet_libmd *md;
4953         struct lnet_rsp_tracker *rspt;
4954         int cpt;
4955         int rc;
4956
4957         LASSERT(the_lnet.ln_refcount > 0);
4958
4959         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4960             fail_peer(target.nid, 1))                   /* shall we now? */
4961         {
4962                 CERROR("Dropping GET to %s: simulated failure\n",
4963                        libcfs_id2str(target));
4964                 return -EIO;
4965         }
4966
4967         msg = lnet_msg_alloc();
4968         if (!msg) {
4969                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
4970                        libcfs_id2str(target));
4971                 return -ENOMEM;
4972         }
4973
4974         cpt = lnet_cpt_of_cookie(mdh.cookie);
4975
4976         rspt = lnet_rspt_alloc(cpt);
4977         if (!rspt) {
4978                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
4979                        libcfs_id2str(target));
4980                 return -ENOMEM;
4981         }
4982         INIT_LIST_HEAD(&rspt->rspt_on_list);
4983
4984         msg->msg_recovery = recovery;
4985
4986         lnet_res_lock(cpt);
4987
4988         md = lnet_handle2md(&mdh);
4989         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4990                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
4991                        match_bits, portal, libcfs_id2str(target),
4992                        md == NULL ? -1 : md->md_threshold);
4993                 if (md != NULL && md->md_me != NULL)
4994                         CERROR("REPLY MD also attached to portal %d\n",
4995                                md->md_me->me_portal);
4996
4997                 lnet_res_unlock(cpt);
4998
4999                 lnet_msg_free(msg);
5000                 lnet_rspt_free(rspt, cpt);
5001                 return -ENOENT;
5002         }
5003
5004         CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
5005
5006         lnet_msg_attach_md(msg, md, 0, 0);
5007
5008         lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
5009
5010         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5011         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5012         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5013         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5014
5015         /* NB handles only looked up by creator (no flips) */
5016         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5017                 the_lnet.ln_interface_cookie;
5018         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5019                 md->md_lh.lh_cookie;
5020
5021         lnet_res_unlock(cpt);
5022
5023         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5024
5025         lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5026
5027         rc = lnet_send(self, msg, LNET_NID_ANY);
5028         if (rc < 0) {
5029                 CNETERR("Error sending GET to %s: %d\n",
5030                         libcfs_id2str(target), rc);
5031                 msg->msg_no_resend = true;
5032                 lnet_finalize(msg, rc);
5033         }
5034
5035         /* completion will be signalled by an event */
5036         return 0;
5037 }
5038 EXPORT_SYMBOL(LNetGet);
5039
5040 /**
5041  * Calculate distance to node at \a dstnid.
5042  *
5043  * \param dstnid Target NID.
5044  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5045  * is saved here.
5046  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5047  * here.
5048  *
5049  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5050  * local_nid_dist_zero is set, which is the default.
5051  * \retval positives Distance to target NID, i.e. number of hops plus one.
5052  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5053  */
5054 int
5055 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
5056 {
5057         struct list_head        *e;
5058         struct lnet_ni *ni = NULL;
5059         struct lnet_remotenet *rnet;
5060         __u32                   dstnet = LNET_NIDNET(dstnid);
5061         int                     hops;
5062         int                     cpt;
5063         __u32                   order = 2;
5064         struct list_head        *rn_list;
5065
5066         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5067          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5068          * keep order 0 free for 0@lo and order 1 free for a local NID
5069          * match */
5070
5071         LASSERT(the_lnet.ln_refcount > 0);
5072
5073         cpt = lnet_net_lock_current();
5074
5075         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5076                 if (ni->ni_nid == dstnid) {
5077                         if (srcnidp != NULL)
5078                                 *srcnidp = dstnid;
5079                         if (orderp != NULL) {
5080                                 if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
5081                                         *orderp = 0;
5082                                 else
5083                                         *orderp = 1;
5084                         }
5085                         lnet_net_unlock(cpt);
5086
5087                         return local_nid_dist_zero ? 0 : 1;
5088                 }
5089
5090                 if (LNET_NIDNET(ni->ni_nid) == dstnet) {
5091                         /* Check if ni was originally created in
5092                          * current net namespace.
5093                          * If not, assign order above 0xffff0000,
5094                          * to make this ni not a priority. */
5095                         if (current->nsproxy &&
5096                             !net_eq(ni->ni_net_ns, current->nsproxy->net_ns))
5097                                         order += 0xffff0000;
5098                         if (srcnidp != NULL)
5099                                 *srcnidp = ni->ni_nid;
5100                         if (orderp != NULL)
5101                                 *orderp = order;
5102                         lnet_net_unlock(cpt);
5103                         return 1;
5104                 }
5105
5106                 order++;
5107         }
5108
5109         rn_list = lnet_net2rnethash(dstnet);
5110         list_for_each(e, rn_list) {
5111                 rnet = list_entry(e, struct lnet_remotenet, lrn_list);
5112
5113                 if (rnet->lrn_net == dstnet) {
5114                         struct lnet_route *route;
5115                         struct lnet_route *shortest = NULL;
5116                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5117                         __u32 route_hops;
5118
5119                         LASSERT(!list_empty(&rnet->lrn_routes));
5120
5121                         list_for_each_entry(route, &rnet->lrn_routes,
5122                                             lr_list) {
5123                                 route_hops = route->lr_hops;
5124                                 if (route_hops == LNET_UNDEFINED_HOPS)
5125                                         route_hops = 1;
5126                                 if (shortest == NULL ||
5127                                     route_hops < shortest_hops) {
5128                                         shortest = route;
5129                                         shortest_hops = route_hops;
5130                                 }
5131                         }
5132
5133                         LASSERT(shortest != NULL);
5134                         hops = shortest_hops;
5135                         if (srcnidp != NULL) {
5136                                 struct lnet_net *net;
5137                                 net = lnet_get_net_locked(shortest->lr_lnet);
5138                                 LASSERT(net);
5139                                 ni = lnet_get_next_ni_locked(net, NULL);
5140                                 *srcnidp = ni->ni_nid;
5141                         }
5142                         if (orderp != NULL)
5143                                 *orderp = order;
5144                         lnet_net_unlock(cpt);
5145                         return hops + 1;
5146                 }
5147                 order++;
5148         }
5149
5150         lnet_net_unlock(cpt);
5151         return -EHOSTUNREACH;
5152 }
5153 EXPORT_SYMBOL(LNetDist);