Whamcloud - gitweb
LU-12756 lnet: Refactor lnet_find_best_lpni_on_net
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/lnet/lib-move.c
33  *
34  * Data movement routines
35  */
36
37 #define DEBUG_SUBSYSTEM S_LNET
38
39 #include <linux/pagemap.h>
40
41 #include <lnet/lib-lnet.h>
42 #include <linux/nsproxy.h>
43 #include <net/net_namespace.h>
44
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48
49 struct lnet_send_data {
50         struct lnet_ni *sd_best_ni;
51         struct lnet_peer_ni *sd_best_lpni;
52         struct lnet_peer_ni *sd_final_dst_lpni;
53         struct lnet_peer *sd_peer;
54         struct lnet_peer *sd_gw_peer;
55         struct lnet_peer_ni *sd_gw_lpni;
56         struct lnet_peer_net *sd_peer_net;
57         struct lnet_msg *sd_msg;
58         lnet_nid_t sd_dst_nid;
59         lnet_nid_t sd_src_nid;
60         lnet_nid_t sd_rtr_nid;
61         int sd_cpt;
62         int sd_md_cpt;
63         __u32 sd_send_case;
64 };
65
66 static inline struct lnet_comm_count *
67 get_stats_counts(struct lnet_element_stats *stats,
68                  enum lnet_stats_type stats_type)
69 {
70         switch (stats_type) {
71         case LNET_STATS_TYPE_SEND:
72                 return &stats->el_send_stats;
73         case LNET_STATS_TYPE_RECV:
74                 return &stats->el_recv_stats;
75         case LNET_STATS_TYPE_DROP:
76                 return &stats->el_drop_stats;
77         default:
78                 CERROR("Unknown stats type\n");
79         }
80
81         return NULL;
82 }
83
84 void lnet_incr_stats(struct lnet_element_stats *stats,
85                      enum lnet_msg_type msg_type,
86                      enum lnet_stats_type stats_type)
87 {
88         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
89         if (!counts)
90                 return;
91
92         switch (msg_type) {
93         case LNET_MSG_ACK:
94                 atomic_inc(&counts->co_ack_count);
95                 break;
96         case LNET_MSG_PUT:
97                 atomic_inc(&counts->co_put_count);
98                 break;
99         case LNET_MSG_GET:
100                 atomic_inc(&counts->co_get_count);
101                 break;
102         case LNET_MSG_REPLY:
103                 atomic_inc(&counts->co_reply_count);
104                 break;
105         case LNET_MSG_HELLO:
106                 atomic_inc(&counts->co_hello_count);
107                 break;
108         default:
109                 CERROR("There is a BUG in the code. Unknown message type\n");
110                 break;
111         }
112 }
113
114 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
115                      enum lnet_stats_type stats_type)
116 {
117         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
118         if (!counts)
119                 return 0;
120
121         return (atomic_read(&counts->co_ack_count) +
122                 atomic_read(&counts->co_put_count) +
123                 atomic_read(&counts->co_get_count) +
124                 atomic_read(&counts->co_reply_count) +
125                 atomic_read(&counts->co_hello_count));
126 }
127
128 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
129                                 struct lnet_comm_count *counts)
130 {
131         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
132         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
133         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
134         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
135         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
136 }
137
138 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
139                               struct lnet_element_stats *stats)
140 {
141         struct lnet_comm_count *counts;
142
143         LASSERT(msg_stats);
144         LASSERT(stats);
145
146         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
147         if (!counts)
148                 return;
149         assign_stats(&msg_stats->im_send_stats, counts);
150
151         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
152         if (!counts)
153                 return;
154         assign_stats(&msg_stats->im_recv_stats, counts);
155
156         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
157         if (!counts)
158                 return;
159         assign_stats(&msg_stats->im_drop_stats, counts);
160 }
161
162 int
163 lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
164 {
165         struct lnet_test_peer *tp;
166         struct list_head *el;
167         struct list_head *next;
168         struct list_head  cull;
169
170         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
171         if (threshold != 0) {
172                 /* Adding a new entry */
173                 LIBCFS_ALLOC(tp, sizeof(*tp));
174                 if (tp == NULL)
175                         return -ENOMEM;
176
177                 tp->tp_nid = nid;
178                 tp->tp_threshold = threshold;
179
180                 lnet_net_lock(0);
181                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
182                 lnet_net_unlock(0);
183                 return 0;
184         }
185
186         /* removing entries */
187         INIT_LIST_HEAD(&cull);
188
189         lnet_net_lock(0);
190
191         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
192                 tp = list_entry(el, struct lnet_test_peer, tp_list);
193
194                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
195                     nid == LNET_NID_ANY ||      /* removing all entries */
196                     tp->tp_nid == nid) {        /* matched this one */
197                         list_move(&tp->tp_list, &cull);
198                 }
199         }
200
201         lnet_net_unlock(0);
202
203         while (!list_empty(&cull)) {
204                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
205
206                 list_del(&tp->tp_list);
207                 LIBCFS_FREE(tp, sizeof(*tp));
208         }
209         return 0;
210 }
211
212 static int
213 fail_peer (lnet_nid_t nid, int outgoing)
214 {
215         struct lnet_test_peer *tp;
216         struct list_head *el;
217         struct list_head *next;
218         struct list_head  cull;
219         int               fail = 0;
220
221         INIT_LIST_HEAD(&cull);
222
223         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
224         lnet_net_lock(0);
225
226         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
227                 tp = list_entry(el, struct lnet_test_peer, tp_list);
228
229                 if (tp->tp_threshold == 0) {
230                         /* zombie entry */
231                         if (outgoing) {
232                                 /* only cull zombies on outgoing tests,
233                                  * since we may be at interrupt priority on
234                                  * incoming messages. */
235                                 list_move(&tp->tp_list, &cull);
236                         }
237                         continue;
238                 }
239
240                 if (tp->tp_nid == LNET_NID_ANY ||       /* fail every peer */
241                     nid == tp->tp_nid) {                /* fail this peer */
242                         fail = 1;
243
244                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
245                                 tp->tp_threshold--;
246                                 if (outgoing &&
247                                     tp->tp_threshold == 0) {
248                                         /* see above */
249                                         list_move(&tp->tp_list, &cull);
250                                 }
251                         }
252                         break;
253                 }
254         }
255
256         lnet_net_unlock(0);
257
258         while (!list_empty(&cull)) {
259                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
260                 list_del(&tp->tp_list);
261
262                 LIBCFS_FREE(tp, sizeof(*tp));
263         }
264
265         return fail;
266 }
267
268 unsigned int
269 lnet_iov_nob(unsigned int niov, struct kvec *iov)
270 {
271         unsigned int nob = 0;
272
273         LASSERT(niov == 0 || iov != NULL);
274         while (niov-- > 0)
275                 nob += (iov++)->iov_len;
276
277         return (nob);
278 }
279 EXPORT_SYMBOL(lnet_iov_nob);
280
281 void
282 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
283                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
284                   unsigned int nob)
285 {
286         /* NB diov, siov are READ-ONLY */
287         unsigned int  this_nob;
288
289         if (nob == 0)
290                 return;
291
292         /* skip complete frags before 'doffset' */
293         LASSERT(ndiov > 0);
294         while (doffset >= diov->iov_len) {
295                 doffset -= diov->iov_len;
296                 diov++;
297                 ndiov--;
298                 LASSERT(ndiov > 0);
299         }
300
301         /* skip complete frags before 'soffset' */
302         LASSERT(nsiov > 0);
303         while (soffset >= siov->iov_len) {
304                 soffset -= siov->iov_len;
305                 siov++;
306                 nsiov--;
307                 LASSERT(nsiov > 0);
308         }
309
310         do {
311                 LASSERT(ndiov > 0);
312                 LASSERT(nsiov > 0);
313                 this_nob = MIN(diov->iov_len - doffset,
314                                siov->iov_len - soffset);
315                 this_nob = MIN(this_nob, nob);
316
317                 memcpy((char *)diov->iov_base + doffset,
318                        (char *)siov->iov_base + soffset, this_nob);
319                 nob -= this_nob;
320
321                 if (diov->iov_len > doffset + this_nob) {
322                         doffset += this_nob;
323                 } else {
324                         diov++;
325                         ndiov--;
326                         doffset = 0;
327                 }
328
329                 if (siov->iov_len > soffset + this_nob) {
330                         soffset += this_nob;
331                 } else {
332                         siov++;
333                         nsiov--;
334                         soffset = 0;
335                 }
336         } while (nob > 0);
337 }
338 EXPORT_SYMBOL(lnet_copy_iov2iov);
339
340 int
341 lnet_extract_iov(int dst_niov, struct kvec *dst,
342                  int src_niov, struct kvec *src,
343                  unsigned int offset, unsigned int len)
344 {
345         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
346          * for exactly 'len' bytes, and return the number of entries.
347          * NB not destructive to 'src' */
348         unsigned int    frag_len;
349         unsigned int    niov;
350
351         if (len == 0)                           /* no data => */
352                 return (0);                     /* no frags */
353
354         LASSERT(src_niov > 0);
355         while (offset >= src->iov_len) {      /* skip initial frags */
356                 offset -= src->iov_len;
357                 src_niov--;
358                 src++;
359                 LASSERT(src_niov > 0);
360         }
361
362         niov = 1;
363         for (;;) {
364                 LASSERT(src_niov > 0);
365                 LASSERT((int)niov <= dst_niov);
366
367                 frag_len = src->iov_len - offset;
368                 dst->iov_base = ((char *)src->iov_base) + offset;
369
370                 if (len <= frag_len) {
371                         dst->iov_len = len;
372                         return (niov);
373                 }
374
375                 dst->iov_len = frag_len;
376
377                 len -= frag_len;
378                 dst++;
379                 src++;
380                 niov++;
381                 src_niov--;
382                 offset = 0;
383         }
384 }
385 EXPORT_SYMBOL(lnet_extract_iov);
386
387
388 unsigned int
389 lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
390 {
391         unsigned int  nob = 0;
392
393         LASSERT(niov == 0 || kiov != NULL);
394         while (niov-- > 0)
395                 nob += (kiov++)->kiov_len;
396
397         return (nob);
398 }
399 EXPORT_SYMBOL(lnet_kiov_nob);
400
401 void
402 lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
403                     unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
404                     unsigned int nob)
405 {
406         /* NB diov, siov are READ-ONLY */
407         unsigned int    this_nob;
408         char           *daddr = NULL;
409         char           *saddr = NULL;
410
411         if (nob == 0)
412                 return;
413
414         LASSERT (!in_interrupt ());
415
416         LASSERT (ndiov > 0);
417         while (doffset >= diov->kiov_len) {
418                 doffset -= diov->kiov_len;
419                 diov++;
420                 ndiov--;
421                 LASSERT(ndiov > 0);
422         }
423
424         LASSERT(nsiov > 0);
425         while (soffset >= siov->kiov_len) {
426                 soffset -= siov->kiov_len;
427                 siov++;
428                 nsiov--;
429                 LASSERT(nsiov > 0);
430         }
431
432         do {
433                 LASSERT(ndiov > 0);
434                 LASSERT(nsiov > 0);
435                 this_nob = MIN(diov->kiov_len - doffset,
436                                siov->kiov_len - soffset);
437                 this_nob = MIN(this_nob, nob);
438
439                 if (daddr == NULL)
440                         daddr = ((char *)kmap(diov->kiov_page)) +
441                                 diov->kiov_offset + doffset;
442                 if (saddr == NULL)
443                         saddr = ((char *)kmap(siov->kiov_page)) +
444                                 siov->kiov_offset + soffset;
445
446                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
447                  * However in practice at least one of the kiovs will be mapped
448                  * kernel pages and the map/unmap will be NOOPs */
449
450                 memcpy (daddr, saddr, this_nob);
451                 nob -= this_nob;
452
453                 if (diov->kiov_len > doffset + this_nob) {
454                         daddr += this_nob;
455                         doffset += this_nob;
456                 } else {
457                         kunmap(diov->kiov_page);
458                         daddr = NULL;
459                         diov++;
460                         ndiov--;
461                         doffset = 0;
462                 }
463
464                 if (siov->kiov_len > soffset + this_nob) {
465                         saddr += this_nob;
466                         soffset += this_nob;
467                 } else {
468                         kunmap(siov->kiov_page);
469                         saddr = NULL;
470                         siov++;
471                         nsiov--;
472                         soffset = 0;
473                 }
474         } while (nob > 0);
475
476         if (daddr != NULL)
477                 kunmap(diov->kiov_page);
478         if (saddr != NULL)
479                 kunmap(siov->kiov_page);
480 }
481 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
482
483 void
484 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
485                     unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
486                     unsigned int nob)
487 {
488         /* NB iov, kiov are READ-ONLY */
489         unsigned int    this_nob;
490         char           *addr = NULL;
491
492         if (nob == 0)
493                 return;
494
495         LASSERT (!in_interrupt ());
496
497         LASSERT (niov > 0);
498         while (iovoffset >= iov->iov_len) {
499                 iovoffset -= iov->iov_len;
500                 iov++;
501                 niov--;
502                 LASSERT(niov > 0);
503         }
504
505         LASSERT(nkiov > 0);
506         while (kiovoffset >= kiov->kiov_len) {
507                 kiovoffset -= kiov->kiov_len;
508                 kiov++;
509                 nkiov--;
510                 LASSERT(nkiov > 0);
511         }
512
513         do {
514                 LASSERT(niov > 0);
515                 LASSERT(nkiov > 0);
516                 this_nob = MIN(iov->iov_len - iovoffset,
517                                kiov->kiov_len - kiovoffset);
518                 this_nob = MIN(this_nob, nob);
519
520                 if (addr == NULL)
521                         addr = ((char *)kmap(kiov->kiov_page)) +
522                                 kiov->kiov_offset + kiovoffset;
523
524                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
525                 nob -= this_nob;
526
527                 if (iov->iov_len > iovoffset + this_nob) {
528                         iovoffset += this_nob;
529                 } else {
530                         iov++;
531                         niov--;
532                         iovoffset = 0;
533                 }
534
535                 if (kiov->kiov_len > kiovoffset + this_nob) {
536                         addr += this_nob;
537                         kiovoffset += this_nob;
538                 } else {
539                         kunmap(kiov->kiov_page);
540                         addr = NULL;
541                         kiov++;
542                         nkiov--;
543                         kiovoffset = 0;
544                 }
545
546         } while (nob > 0);
547
548         if (addr != NULL)
549                 kunmap(kiov->kiov_page);
550 }
551 EXPORT_SYMBOL(lnet_copy_kiov2iov);
552
553 void
554 lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
555                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
556                    unsigned int nob)
557 {
558         /* NB kiov, iov are READ-ONLY */
559         unsigned int    this_nob;
560         char           *addr = NULL;
561
562         if (nob == 0)
563                 return;
564
565         LASSERT (!in_interrupt ());
566
567         LASSERT (nkiov > 0);
568         while (kiovoffset >= kiov->kiov_len) {
569                 kiovoffset -= kiov->kiov_len;
570                 kiov++;
571                 nkiov--;
572                 LASSERT(nkiov > 0);
573         }
574
575         LASSERT(niov > 0);
576         while (iovoffset >= iov->iov_len) {
577                 iovoffset -= iov->iov_len;
578                 iov++;
579                 niov--;
580                 LASSERT(niov > 0);
581         }
582
583         do {
584                 LASSERT(nkiov > 0);
585                 LASSERT(niov > 0);
586                 this_nob = MIN(kiov->kiov_len - kiovoffset,
587                                iov->iov_len - iovoffset);
588                 this_nob = MIN(this_nob, nob);
589
590                 if (addr == NULL)
591                         addr = ((char *)kmap(kiov->kiov_page)) +
592                                 kiov->kiov_offset + kiovoffset;
593
594                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
595                 nob -= this_nob;
596
597                 if (kiov->kiov_len > kiovoffset + this_nob) {
598                         addr += this_nob;
599                         kiovoffset += this_nob;
600                 } else {
601                         kunmap(kiov->kiov_page);
602                         addr = NULL;
603                         kiov++;
604                         nkiov--;
605                         kiovoffset = 0;
606                 }
607
608                 if (iov->iov_len > iovoffset + this_nob) {
609                         iovoffset += this_nob;
610                 } else {
611                         iov++;
612                         niov--;
613                         iovoffset = 0;
614                 }
615         } while (nob > 0);
616
617         if (addr != NULL)
618                 kunmap(kiov->kiov_page);
619 }
620 EXPORT_SYMBOL(lnet_copy_iov2kiov);
621
622 int
623 lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
624                   int src_niov, lnet_kiov_t *src,
625                   unsigned int offset, unsigned int len)
626 {
627         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
628          * for exactly 'len' bytes, and return the number of entries.
629          * NB not destructive to 'src' */
630         unsigned int    frag_len;
631         unsigned int    niov;
632
633         if (len == 0)                           /* no data => */
634                 return (0);                     /* no frags */
635
636         LASSERT(src_niov > 0);
637         while (offset >= src->kiov_len) {      /* skip initial frags */
638                 offset -= src->kiov_len;
639                 src_niov--;
640                 src++;
641                 LASSERT(src_niov > 0);
642         }
643
644         niov = 1;
645         for (;;) {
646                 LASSERT(src_niov > 0);
647                 LASSERT((int)niov <= dst_niov);
648
649                 frag_len = src->kiov_len - offset;
650                 dst->kiov_page = src->kiov_page;
651                 dst->kiov_offset = src->kiov_offset + offset;
652
653                 if (len <= frag_len) {
654                         dst->kiov_len = len;
655                         LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
656                         return niov;
657                 }
658
659                 dst->kiov_len = frag_len;
660                 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
661
662                 len -= frag_len;
663                 dst++;
664                 src++;
665                 niov++;
666                 src_niov--;
667                 offset = 0;
668         }
669 }
670 EXPORT_SYMBOL(lnet_extract_kiov);
671
672 void
673 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
674              int delayed, unsigned int offset, unsigned int mlen,
675              unsigned int rlen)
676 {
677         unsigned int  niov = 0;
678         struct kvec *iov = NULL;
679         lnet_kiov_t  *kiov = NULL;
680         int           rc;
681
682         LASSERT (!in_interrupt ());
683         LASSERT (mlen == 0 || msg != NULL);
684
685         if (msg != NULL) {
686                 LASSERT(msg->msg_receiving);
687                 LASSERT(!msg->msg_sending);
688                 LASSERT(rlen == msg->msg_len);
689                 LASSERT(mlen <= msg->msg_len);
690                 LASSERT(msg->msg_offset == offset);
691                 LASSERT(msg->msg_wanted == mlen);
692
693                 msg->msg_receiving = 0;
694
695                 if (mlen != 0) {
696                         niov = msg->msg_niov;
697                         iov  = msg->msg_iov;
698                         kiov = msg->msg_kiov;
699
700                         LASSERT (niov > 0);
701                         LASSERT ((iov == NULL) != (kiov == NULL));
702                 }
703         }
704
705         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
706                                              niov, iov, kiov, offset, mlen,
707                                              rlen);
708         if (rc < 0)
709                 lnet_finalize(msg, rc);
710 }
711
712 static void
713 lnet_setpayloadbuffer(struct lnet_msg *msg)
714 {
715         struct lnet_libmd *md = msg->msg_md;
716
717         LASSERT(msg->msg_len > 0);
718         LASSERT(!msg->msg_routing);
719         LASSERT(md != NULL);
720         LASSERT(msg->msg_niov == 0);
721         LASSERT(msg->msg_iov == NULL);
722         LASSERT(msg->msg_kiov == NULL);
723
724         msg->msg_niov = md->md_niov;
725         if ((md->md_options & LNET_MD_KIOV) != 0)
726                 msg->msg_kiov = md->md_iov.kiov;
727         else
728                 msg->msg_iov = md->md_iov.iov;
729 }
730
731 void
732 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target,
733                unsigned int offset, unsigned int len)
734 {
735         msg->msg_type = type;
736         msg->msg_target = target;
737         msg->msg_len = len;
738         msg->msg_offset = offset;
739
740         if (len != 0)
741                 lnet_setpayloadbuffer(msg);
742
743         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
744         msg->msg_hdr.type           = cpu_to_le32(type);
745         /* dest_nid will be overwritten by lnet_select_pathway() */
746         msg->msg_hdr.dest_nid       = cpu_to_le64(target.nid);
747         msg->msg_hdr.dest_pid       = cpu_to_le32(target.pid);
748         /* src_nid will be set later */
749         msg->msg_hdr.src_pid        = cpu_to_le32(the_lnet.ln_pid);
750         msg->msg_hdr.payload_length = cpu_to_le32(len);
751 }
752
753 static void
754 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
755 {
756         void   *priv = msg->msg_private;
757         int rc;
758
759         LASSERT (!in_interrupt ());
760         LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
761                  (msg->msg_txcredit && msg->msg_peertxcredit));
762
763         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
764         if (rc < 0) {
765                 msg->msg_no_resend = true;
766                 lnet_finalize(msg, rc);
767         }
768 }
769
770 static int
771 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
772 {
773         int     rc;
774
775         LASSERT(!msg->msg_sending);
776         LASSERT(msg->msg_receiving);
777         LASSERT(!msg->msg_rx_ready_delay);
778         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
779
780         msg->msg_rx_ready_delay = 1;
781         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
782                                                   &msg->msg_private);
783         if (rc != 0) {
784                 CERROR("recv from %s / send to %s aborted: "
785                        "eager_recv failed %d\n",
786                        libcfs_nid2str(msg->msg_rxpeer->lpni_nid),
787                        libcfs_id2str(msg->msg_target), rc);
788                 LASSERT(rc < 0); /* required by my callers */
789         }
790
791         return rc;
792 }
793
794 static bool
795 lnet_is_peer_deadline_passed(struct lnet_peer_ni *lpni, time64_t now)
796 {
797         time64_t deadline;
798
799         deadline = lpni->lpni_last_alive +
800                    lpni->lpni_net->net_tunables.lct_peer_timeout;
801
802         /*
803          * assume peer_ni is alive as long as we're within the configured
804          * peer timeout
805          */
806         if (deadline > now)
807                 return false;
808
809         return true;
810 }
811
812 /* NB: returns 1 when alive, 0 when dead, negative when error;
813  *     may drop the lnet_net_lock */
814 static int
815 lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
816                        struct lnet_msg *msg)
817 {
818         time64_t now = ktime_get_seconds();
819
820         if (!lnet_peer_aliveness_enabled(lpni))
821                 return -ENODEV;
822
823         /*
824          * If we're resending a message, let's attempt to send it even if
825          * the peer is down to fulfill our resend quota on the message
826          */
827         if (msg->msg_retry_count > 0)
828                 return 1;
829
830         /* try and send recovery messages irregardless */
831         if (msg->msg_recovery)
832                 return 1;
833
834         /* always send any responses */
835         if (msg->msg_type == LNET_MSG_ACK ||
836             msg->msg_type == LNET_MSG_REPLY)
837                 return 1;
838
839         if (!lnet_is_peer_deadline_passed(lpni, now))
840                 return true;
841
842         return lnet_is_peer_ni_alive(lpni);
843 }
844
845 /**
846  * \param msg The message to be sent.
847  * \param do_send True if lnet_ni_send() should be called in this function.
848  *        lnet_send() is going to lnet_net_unlock immediately after this, so
849  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
850  *
851  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
852  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
853  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
854  * \retval -ECANCELED If the MD of the message has been unlinked.
855  */
856 static int
857 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
858 {
859         struct lnet_peer_ni     *lp = msg->msg_txpeer;
860         struct lnet_ni          *ni = msg->msg_txni;
861         int                     cpt = msg->msg_tx_cpt;
862         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
863
864         /* non-lnet_send() callers have checked before */
865         LASSERT(!do_send || msg->msg_tx_delayed);
866         LASSERT(!msg->msg_receiving);
867         LASSERT(msg->msg_tx_committed);
868         /* can't get here if we're sending to the loopback interface */
869         LASSERT(lp->lpni_nid != the_lnet.ln_loni->ni_nid);
870
871         /* NB 'lp' is always the next hop */
872         if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
873             lnet_peer_alive_locked(ni, lp, msg) == 0) {
874                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
875                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
876                         msg->msg_len;
877                 lnet_net_unlock(cpt);
878                 if (msg->msg_txpeer)
879                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
880                                         msg->msg_type,
881                                         LNET_STATS_TYPE_DROP);
882                 if (msg->msg_txni)
883                         lnet_incr_stats(&msg->msg_txni->ni_stats,
884                                         msg->msg_type,
885                                         LNET_STATS_TYPE_DROP);
886
887                 CNETERR("Dropping message for %s: peer not alive\n",
888                         libcfs_id2str(msg->msg_target));
889                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
890                 if (do_send)
891                         lnet_finalize(msg, -EHOSTUNREACH);
892
893                 lnet_net_lock(cpt);
894                 return -EHOSTUNREACH;
895         }
896
897         if (msg->msg_md != NULL &&
898             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
899                 lnet_net_unlock(cpt);
900
901                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
902                         "called on the MD/ME.\n",
903                         libcfs_id2str(msg->msg_target));
904                 if (do_send) {
905                         msg->msg_no_resend = true;
906                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
907                                msg, libcfs_id2str(msg->msg_target));
908                         lnet_finalize(msg, -ECANCELED);
909                 }
910
911                 lnet_net_lock(cpt);
912                 return -ECANCELED;
913         }
914
915         if (!msg->msg_peertxcredit) {
916                 spin_lock(&lp->lpni_lock);
917                 LASSERT((lp->lpni_txcredits < 0) ==
918                         !list_empty(&lp->lpni_txq));
919
920                 msg->msg_peertxcredit = 1;
921                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr);
922                 lp->lpni_txcredits--;
923
924                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
925                         lp->lpni_mintxcredits = lp->lpni_txcredits;
926
927                 if (lp->lpni_txcredits < 0) {
928                         msg->msg_tx_delayed = 1;
929                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
930                         spin_unlock(&lp->lpni_lock);
931                         return LNET_CREDIT_WAIT;
932                 }
933                 spin_unlock(&lp->lpni_lock);
934         }
935
936         if (!msg->msg_txcredit) {
937                 LASSERT((tq->tq_credits < 0) ==
938                         !list_empty(&tq->tq_delayed));
939
940                 msg->msg_txcredit = 1;
941                 tq->tq_credits--;
942                 atomic_dec(&ni->ni_tx_credits);
943
944                 if (tq->tq_credits < tq->tq_credits_min)
945                         tq->tq_credits_min = tq->tq_credits;
946
947                 if (tq->tq_credits < 0) {
948                         msg->msg_tx_delayed = 1;
949                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
950                         return LNET_CREDIT_WAIT;
951                 }
952         }
953
954         /* unset the tx_delay flag as we're going to send it now */
955         msg->msg_tx_delayed = 0;
956
957         if (do_send) {
958                 lnet_net_unlock(cpt);
959                 lnet_ni_send(ni, msg);
960                 lnet_net_lock(cpt);
961         }
962         return LNET_CREDIT_OK;
963 }
964
965
966 static struct lnet_rtrbufpool *
967 lnet_msg2bufpool(struct lnet_msg *msg)
968 {
969         struct lnet_rtrbufpool  *rbp;
970         int                     cpt;
971
972         LASSERT(msg->msg_rx_committed);
973
974         cpt = msg->msg_rx_cpt;
975         rbp = &the_lnet.ln_rtrpools[cpt][0];
976
977         LASSERT(msg->msg_len <= LNET_MTU);
978         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
979                 rbp++;
980                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
981         }
982
983         return rbp;
984 }
985
986 static int
987 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
988 {
989         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
990          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
991          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
992          * received or OK to receive */
993         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
994         struct lnet_peer *lp;
995         struct lnet_rtrbufpool *rbp;
996         struct lnet_rtrbuf *rb;
997
998         LASSERT(msg->msg_iov == NULL);
999         LASSERT(msg->msg_kiov == NULL);
1000         LASSERT(msg->msg_niov == 0);
1001         LASSERT(msg->msg_routing);
1002         LASSERT(msg->msg_receiving);
1003         LASSERT(!msg->msg_sending);
1004         LASSERT(lpni->lpni_peer_net);
1005         LASSERT(lpni->lpni_peer_net->lpn_peer);
1006
1007         lp = lpni->lpni_peer_net->lpn_peer;
1008
1009         /* non-lnet_parse callers only receive delayed messages */
1010         LASSERT(!do_recv || msg->msg_rx_delayed);
1011
1012         if (!msg->msg_peerrtrcredit) {
1013                 /* lpni_lock protects the credit manipulation */
1014                 spin_lock(&lpni->lpni_lock);
1015                 /* lp_lock protects the lp_rtrq */
1016                 spin_lock(&lp->lp_lock);
1017
1018                 msg->msg_peerrtrcredit = 1;
1019                 lpni->lpni_rtrcredits--;
1020                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
1021                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
1022
1023                 if (lpni->lpni_rtrcredits < 0) {
1024                         /* must have checked eager_recv before here */
1025                         LASSERT(msg->msg_rx_ready_delay);
1026                         msg->msg_rx_delayed = 1;
1027                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1028                         spin_unlock(&lp->lp_lock);
1029                         spin_unlock(&lpni->lpni_lock);
1030                         return LNET_CREDIT_WAIT;
1031                 }
1032                 spin_unlock(&lp->lp_lock);
1033                 spin_unlock(&lpni->lpni_lock);
1034         }
1035
1036         rbp = lnet_msg2bufpool(msg);
1037
1038         if (!msg->msg_rtrcredit) {
1039                 msg->msg_rtrcredit = 1;
1040                 rbp->rbp_credits--;
1041                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1042                         rbp->rbp_mincredits = rbp->rbp_credits;
1043
1044                 if (rbp->rbp_credits < 0) {
1045                         /* must have checked eager_recv before here */
1046                         LASSERT(msg->msg_rx_ready_delay);
1047                         msg->msg_rx_delayed = 1;
1048                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1049                         return LNET_CREDIT_WAIT;
1050                 }
1051         }
1052
1053         LASSERT(!list_empty(&rbp->rbp_bufs));
1054         rb = list_entry(rbp->rbp_bufs.next, struct lnet_rtrbuf, rb_list);
1055         list_del(&rb->rb_list);
1056
1057         msg->msg_niov = rbp->rbp_npages;
1058         msg->msg_kiov = &rb->rb_kiov[0];
1059
1060         /* unset the msg-rx_delayed flag since we're receiving the message */
1061         msg->msg_rx_delayed = 0;
1062
1063         if (do_recv) {
1064                 int cpt = msg->msg_rx_cpt;
1065
1066                 lnet_net_unlock(cpt);
1067                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1068                              0, msg->msg_len, msg->msg_len);
1069                 lnet_net_lock(cpt);
1070         }
1071         return LNET_CREDIT_OK;
1072 }
1073
1074 void
1075 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1076 {
1077         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1078         struct lnet_ni          *txni = msg->msg_txni;
1079         struct lnet_msg         *msg2;
1080
1081         if (msg->msg_txcredit) {
1082                 struct lnet_ni       *ni = msg->msg_txni;
1083                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1084
1085                 /* give back NI txcredits */
1086                 msg->msg_txcredit = 0;
1087
1088                 LASSERT((tq->tq_credits < 0) ==
1089                         !list_empty(&tq->tq_delayed));
1090
1091                 tq->tq_credits++;
1092                 atomic_inc(&ni->ni_tx_credits);
1093                 if (tq->tq_credits <= 0) {
1094                         msg2 = list_entry(tq->tq_delayed.next,
1095                                           struct lnet_msg, msg_list);
1096                         list_del(&msg2->msg_list);
1097
1098                         LASSERT(msg2->msg_txni == ni);
1099                         LASSERT(msg2->msg_tx_delayed);
1100                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1101
1102                         (void) lnet_post_send_locked(msg2, 1);
1103                 }
1104         }
1105
1106         if (msg->msg_peertxcredit) {
1107                 /* give back peer txcredits */
1108                 msg->msg_peertxcredit = 0;
1109
1110                 spin_lock(&txpeer->lpni_lock);
1111                 LASSERT((txpeer->lpni_txcredits < 0) ==
1112                         !list_empty(&txpeer->lpni_txq));
1113
1114                 txpeer->lpni_txqnob -= msg->msg_len + sizeof(struct lnet_hdr);
1115                 LASSERT(txpeer->lpni_txqnob >= 0);
1116
1117                 txpeer->lpni_txcredits++;
1118                 if (txpeer->lpni_txcredits <= 0) {
1119                         int msg2_cpt;
1120
1121                         msg2 = list_entry(txpeer->lpni_txq.next,
1122                                               struct lnet_msg, msg_list);
1123                         list_del(&msg2->msg_list);
1124                         spin_unlock(&txpeer->lpni_lock);
1125
1126                         LASSERT(msg2->msg_txpeer == txpeer);
1127                         LASSERT(msg2->msg_tx_delayed);
1128
1129                         msg2_cpt = msg2->msg_tx_cpt;
1130
1131                         /*
1132                          * The msg_cpt can be different from the msg2_cpt
1133                          * so we need to make sure we lock the correct cpt
1134                          * for msg2.
1135                          * Once we call lnet_post_send_locked() it is no
1136                          * longer safe to access msg2, since it could've
1137                          * been freed by lnet_finalize(), but we still
1138                          * need to relock the correct cpt, so we cache the
1139                          * msg2_cpt for the purpose of the check that
1140                          * follows the call to lnet_pose_send_locked().
1141                          */
1142                         if (msg2_cpt != msg->msg_tx_cpt) {
1143                                 lnet_net_unlock(msg->msg_tx_cpt);
1144                                 lnet_net_lock(msg2_cpt);
1145                         }
1146                         (void) lnet_post_send_locked(msg2, 1);
1147                         if (msg2_cpt != msg->msg_tx_cpt) {
1148                                 lnet_net_unlock(msg2_cpt);
1149                                 lnet_net_lock(msg->msg_tx_cpt);
1150                         }
1151                 } else {
1152                         spin_unlock(&txpeer->lpni_lock);
1153                 }
1154         }
1155
1156         if (txni != NULL) {
1157                 msg->msg_txni = NULL;
1158                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1159         }
1160
1161         if (txpeer != NULL) {
1162                 msg->msg_txpeer = NULL;
1163                 lnet_peer_ni_decref_locked(txpeer);
1164         }
1165 }
1166
1167 void
1168 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1169 {
1170         struct lnet_msg *msg;
1171
1172         if (list_empty(&rbp->rbp_msgs))
1173                 return;
1174         msg = list_entry(rbp->rbp_msgs.next,
1175                          struct lnet_msg, msg_list);
1176         list_del(&msg->msg_list);
1177
1178         (void)lnet_post_routed_recv_locked(msg, 1);
1179 }
1180
1181 void
1182 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1183 {
1184         struct lnet_msg *msg;
1185         struct lnet_msg *tmp;
1186
1187         lnet_net_unlock(cpt);
1188
1189         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1190                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1191                              0, 0, 0, msg->msg_hdr.payload_length);
1192                 list_del_init(&msg->msg_list);
1193                 msg->msg_no_resend = true;
1194                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1195                 lnet_finalize(msg, -ECANCELED);
1196         }
1197
1198         lnet_net_lock(cpt);
1199 }
1200
1201 void
1202 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1203 {
1204         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1205         struct lnet_peer *lp;
1206         struct lnet_ni *rxni = msg->msg_rxni;
1207         struct lnet_msg *msg2;
1208
1209         if (msg->msg_rtrcredit) {
1210                 /* give back global router credits */
1211                 struct lnet_rtrbuf *rb;
1212                 struct lnet_rtrbufpool *rbp;
1213
1214                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1215                  * there until it gets one allocated, or aborts the wait
1216                  * itself */
1217                 LASSERT(msg->msg_kiov != NULL);
1218
1219                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1220                 rbp = rb->rb_pool;
1221
1222                 msg->msg_kiov = NULL;
1223                 msg->msg_rtrcredit = 0;
1224
1225                 LASSERT(rbp == lnet_msg2bufpool(msg));
1226
1227                 LASSERT((rbp->rbp_credits > 0) ==
1228                         !list_empty(&rbp->rbp_bufs));
1229
1230                 /* If routing is now turned off, we just drop this buffer and
1231                  * don't bother trying to return credits.  */
1232                 if (!the_lnet.ln_routing) {
1233                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1234                         goto routing_off;
1235                 }
1236
1237                 /* It is possible that a user has lowered the desired number of
1238                  * buffers in this pool.  Make sure we never put back
1239                  * more buffers than the stated number. */
1240                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1241                         /* Discard this buffer so we don't have too
1242                          * many. */
1243                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1244                         rbp->rbp_nbuffers--;
1245                 } else {
1246                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1247                         rbp->rbp_credits++;
1248                         if (rbp->rbp_credits <= 0)
1249                                 lnet_schedule_blocked_locked(rbp);
1250                 }
1251         }
1252
1253 routing_off:
1254         if (msg->msg_peerrtrcredit) {
1255                 LASSERT(rxpeerni);
1256                 LASSERT(rxpeerni->lpni_peer_net);
1257                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1258
1259                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1260
1261                 /* give back peer router credits */
1262                 msg->msg_peerrtrcredit = 0;
1263
1264                 spin_lock(&rxpeerni->lpni_lock);
1265                 spin_lock(&lp->lp_lock);
1266
1267                 rxpeerni->lpni_rtrcredits++;
1268
1269                 /* drop all messages which are queued to be routed on that
1270                  * peer. */
1271                 if (!the_lnet.ln_routing) {
1272                         struct list_head drop;
1273                         INIT_LIST_HEAD(&drop);
1274                         list_splice_init(&lp->lp_rtrq, &drop);
1275                         spin_unlock(&lp->lp_lock);
1276                         spin_unlock(&rxpeerni->lpni_lock);
1277                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1278                 } else if (!list_empty(&lp->lp_rtrq)) {
1279                         int msg2_cpt;
1280
1281                         msg2 = list_entry(lp->lp_rtrq.next,
1282                                           struct lnet_msg, msg_list);
1283                         list_del(&msg2->msg_list);
1284                         msg2_cpt = msg2->msg_rx_cpt;
1285                         spin_unlock(&lp->lp_lock);
1286                         spin_unlock(&rxpeerni->lpni_lock);
1287                         /*
1288                          * messages on the lp_rtrq can be from any NID in
1289                          * the peer, which means they might have different
1290                          * cpts. We need to make sure we lock the right
1291                          * one.
1292                          */
1293                         if (msg2_cpt != msg->msg_rx_cpt) {
1294                                 lnet_net_unlock(msg->msg_rx_cpt);
1295                                 lnet_net_lock(msg2_cpt);
1296                         }
1297                         (void) lnet_post_routed_recv_locked(msg2, 1);
1298                         if (msg2_cpt != msg->msg_rx_cpt) {
1299                                 lnet_net_unlock(msg2_cpt);
1300                                 lnet_net_lock(msg->msg_rx_cpt);
1301                         }
1302                 } else {
1303                         spin_unlock(&lp->lp_lock);
1304                         spin_unlock(&rxpeerni->lpni_lock);
1305                 }
1306         }
1307         if (rxni != NULL) {
1308                 msg->msg_rxni = NULL;
1309                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1310         }
1311         if (rxpeerni != NULL) {
1312                 msg->msg_rxpeer = NULL;
1313                 lnet_peer_ni_decref_locked(rxpeerni);
1314         }
1315 }
1316
1317 static int
1318 lnet_compare_peers(struct lnet_peer_ni *p1, struct lnet_peer_ni *p2)
1319 {
1320         if (p1->lpni_txqnob < p2->lpni_txqnob)
1321                 return 1;
1322
1323         if (p1->lpni_txqnob > p2->lpni_txqnob)
1324                 return -1;
1325
1326         if (p1->lpni_txcredits > p2->lpni_txcredits)
1327                 return 1;
1328
1329         if (p1->lpni_txcredits < p2->lpni_txcredits)
1330                 return -1;
1331
1332         return 0;
1333 }
1334
1335 static struct lnet_peer_ni *
1336 lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid,
1337                     struct lnet_peer *peer,
1338                     struct lnet_peer_net *peer_net)
1339 {
1340         /*
1341          * Look at the peer NIs for the destination peer that connect
1342          * to the chosen net. If a peer_ni is preferred when using the
1343          * best_ni to communicate, we use that one. If there is no
1344          * preferred peer_ni, or there are multiple preferred peer_ni,
1345          * the available transmit credits are used. If the transmit
1346          * credits are equal, we round-robin over the peer_ni.
1347          */
1348         struct lnet_peer_ni *lpni = NULL;
1349         struct lnet_peer_ni *best_lpni = NULL;
1350         int best_lpni_credits = INT_MIN;
1351         bool preferred = false;
1352         bool ni_is_pref;
1353         int best_lpni_healthv = 0;
1354         int lpni_healthv;
1355
1356         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1357                 /*
1358                  * if the best_ni we've chosen aleady has this lpni
1359                  * preferred, then let's use it
1360                  */
1361                 if (best_ni) {
1362                         ni_is_pref = lnet_peer_is_pref_nid_locked(lpni,
1363                                                                 best_ni->ni_nid);
1364                         CDEBUG(D_NET, "%s ni_is_pref = %d\n",
1365                                libcfs_nid2str(best_ni->ni_nid), ni_is_pref);
1366                 } else {
1367                         ni_is_pref = false;
1368                 }
1369
1370                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1371
1372                 if (best_lpni)
1373                         CDEBUG(D_NET, "%s c:[%d, %d], s:[%d, %d]\n",
1374                                 libcfs_nid2str(lpni->lpni_nid),
1375                                 lpni->lpni_txcredits, best_lpni_credits,
1376                                 lpni->lpni_seq, best_lpni->lpni_seq);
1377
1378                 /* pick the healthiest peer ni */
1379                 if (lpni_healthv < best_lpni_healthv) {
1380                         continue;
1381                 } else if (lpni_healthv > best_lpni_healthv) {
1382                         best_lpni_healthv = lpni_healthv;
1383                 /* if this is a preferred peer use it */
1384                 } else if (!preferred && ni_is_pref) {
1385                         preferred = true;
1386                 } else if (preferred && !ni_is_pref) {
1387                         /*
1388                          * this is not the preferred peer so let's ignore
1389                          * it.
1390                          */
1391                         continue;
1392                 } else if (lpni->lpni_txcredits < best_lpni_credits) {
1393                         /*
1394                          * We already have a peer that has more credits
1395                          * available than this one. No need to consider
1396                          * this peer further.
1397                          */
1398                         continue;
1399                 } else if (lpni->lpni_txcredits == best_lpni_credits) {
1400                         /*
1401                          * The best peer found so far and the current peer
1402                          * have the same number of available credits let's
1403                          * make sure to select between them using Round
1404                          * Robin
1405                          */
1406                         if (best_lpni) {
1407                                 if (best_lpni->lpni_seq <= lpni->lpni_seq)
1408                                         continue;
1409                         }
1410                 }
1411
1412                 best_lpni = lpni;
1413                 best_lpni_credits = lpni->lpni_txcredits;
1414         }
1415
1416         /* if we still can't find a peer ni then we can't reach it */
1417         if (!best_lpni) {
1418                 __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1419                         LNET_NIDNET(dst_nid);
1420                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1421                                 libcfs_net2str(net_id));
1422                 return NULL;
1423         }
1424
1425         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1426                libcfs_nid2str(best_lpni->lpni_nid));
1427
1428         return best_lpni;
1429 }
1430
1431 /*
1432  * Prerequisite: the best_ni should already be set in the sd
1433  */
1434 static inline struct lnet_peer_ni *
1435 lnet_find_best_lpni_on_net(struct lnet_ni *lni, lnet_nid_t dst_nid,
1436                            struct lnet_peer *peer, __u32 net_id)
1437 {
1438         struct lnet_peer_net *peer_net;
1439
1440         /*
1441          * The gateway is Multi-Rail capable so now we must select the
1442          * proper peer_ni
1443          */
1444         peer_net = lnet_peer_get_net_locked(peer, net_id);
1445
1446         if (!peer_net) {
1447                 CERROR("gateway peer %s has no NI on net %s\n",
1448                        libcfs_nid2str(peer->lp_primary_nid),
1449                        libcfs_net2str(net_id));
1450                 return NULL;
1451         }
1452
1453         return lnet_select_peer_ni(lni, dst_nid, peer, peer_net);
1454 }
1455
1456 static int
1457 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2,
1458                     struct lnet_peer_ni **best_lpni)
1459 {
1460         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1461         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1462         struct lnet_peer *lp1 = r1->lr_gateway;
1463         struct lnet_peer *lp2 = r2->lr_gateway;
1464         struct lnet_peer_ni *lpni1;
1465         struct lnet_peer_ni *lpni2;
1466         int rc;
1467
1468         lpni1 = lnet_find_best_lpni_on_net(NULL, LNET_NID_ANY, lp1,
1469                                            r1->lr_lnet);
1470         lpni2 = lnet_find_best_lpni_on_net(NULL, LNET_NID_ANY, lp2,
1471                                            r2->lr_lnet);
1472         LASSERT(lpni1 && lpni2);
1473
1474         if (r1->lr_priority < r2->lr_priority) {
1475                 *best_lpni = lpni1;
1476                 return 1;
1477         }
1478
1479         if (r1->lr_priority > r2->lr_priority) {
1480                 *best_lpni = lpni2;
1481                 return -1;
1482         }
1483
1484         if (r1_hops < r2_hops) {
1485                 *best_lpni = lpni1;
1486                 return 1;
1487         }
1488
1489         if (r1_hops > r2_hops) {
1490                 *best_lpni = lpni2;
1491                 return -1;
1492         }
1493
1494         rc = lnet_compare_peers(lpni1, lpni2);
1495         if (rc == 1) {
1496                 *best_lpni = lpni1;
1497                 return rc;
1498         } else if (rc == -1) {
1499                 *best_lpni = lpni2;
1500                 return rc;
1501         }
1502
1503         if (r1->lr_seq - r2->lr_seq <= 0) {
1504                 *best_lpni = lpni1;
1505                 return 1;
1506         }
1507
1508         *best_lpni = lpni2;
1509         return -1;
1510 }
1511
1512 static struct lnet_route *
1513 lnet_find_route_locked(struct lnet_net *net, __u32 remote_net,
1514                        struct lnet_route **prev_route,
1515                        struct lnet_peer_ni **gwni)
1516 {
1517         struct lnet_peer_ni *best_gw_ni = NULL;
1518         struct lnet_route *best_route;
1519         struct lnet_route *last_route;
1520         struct lnet_remotenet *rnet;
1521         struct lnet_peer *lp_best;
1522         struct lnet_route *route;
1523         struct lnet_peer *lp;
1524         int rc;
1525
1526         rnet = lnet_find_rnet_locked(remote_net);
1527         if (rnet == NULL)
1528                 return NULL;
1529
1530         lp_best = NULL;
1531         best_route = last_route = NULL;
1532         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1533                 lp = route->lr_gateway;
1534
1535                 if (!lnet_is_route_alive(route))
1536                         continue;
1537
1538                 if (lp_best == NULL) {
1539                         best_route = last_route = route;
1540                         lp_best = lp;
1541                 }
1542
1543                 /* no protection on below fields, but it's harmless */
1544                 if (last_route->lr_seq - route->lr_seq < 0)
1545                         last_route = route;
1546
1547                 rc = lnet_compare_routes(route, best_route, &best_gw_ni);
1548                 if (rc < 0)
1549                         continue;
1550
1551                 best_route = route;
1552                 lp_best = lp;
1553         }
1554
1555         *prev_route = last_route;
1556         *gwni = best_gw_ni;
1557
1558         return best_route;
1559 }
1560
1561 static struct lnet_ni *
1562 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1563                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1564                  int md_cpt)
1565 {
1566         struct lnet_ni *ni = NULL;
1567         unsigned int shortest_distance;
1568         int best_credits;
1569         int best_healthv;
1570
1571         /*
1572          * If there is no peer_ni that we can send to on this network,
1573          * then there is no point in looking for a new best_ni here.
1574         */
1575         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1576                 return best_ni;
1577
1578         if (best_ni == NULL) {
1579                 shortest_distance = UINT_MAX;
1580                 best_credits = INT_MIN;
1581                 best_healthv = 0;
1582         } else {
1583                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1584                                                      best_ni->ni_dev_cpt);
1585                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1586                 best_healthv = atomic_read(&best_ni->ni_healthv);
1587         }
1588
1589         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1590                 unsigned int distance;
1591                 int ni_credits;
1592                 int ni_healthv;
1593                 int ni_fatal;
1594
1595                 ni_credits = atomic_read(&ni->ni_tx_credits);
1596                 ni_healthv = atomic_read(&ni->ni_healthv);
1597                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1598
1599                 /*
1600                  * calculate the distance from the CPT on which
1601                  * the message memory is allocated to the CPT of
1602                  * the NI's physical device
1603                  */
1604                 distance = cfs_cpt_distance(lnet_cpt_table(),
1605                                             md_cpt,
1606                                             ni->ni_dev_cpt);
1607
1608                 CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d] with best_ni %s [c:%d, d:%d, s:%d]\n",
1609                        libcfs_nid2str(ni->ni_nid), ni_credits, distance,
1610                        ni->ni_seq, (best_ni) ? libcfs_nid2str(best_ni->ni_nid)
1611                         : "not seleced", best_credits, shortest_distance,
1612                         (best_ni) ? best_ni->ni_seq : 0);
1613
1614                 /*
1615                  * All distances smaller than the NUMA range
1616                  * are treated equally.
1617                  */
1618                 if (distance < lnet_numa_range)
1619                         distance = lnet_numa_range;
1620
1621                 /*
1622                  * Select on health, shorter distance, available
1623                  * credits, then round-robin.
1624                  */
1625                 if (ni_fatal) {
1626                         continue;
1627                 } else if (ni_healthv < best_healthv) {
1628                         continue;
1629                 } else if (ni_healthv > best_healthv) {
1630                         best_healthv = ni_healthv;
1631                         /*
1632                          * If we're going to prefer this ni because it's
1633                          * the healthiest, then we should set the
1634                          * shortest_distance in the algorithm in case
1635                          * there are multiple NIs with the same health but
1636                          * different distances.
1637                          */
1638                         if (distance < shortest_distance)
1639                                 shortest_distance = distance;
1640                 } else if (distance > shortest_distance) {
1641                         continue;
1642                 } else if (distance < shortest_distance) {
1643                         shortest_distance = distance;
1644                 } else if (ni_credits < best_credits) {
1645                         continue;
1646                 } else if (ni_credits == best_credits) {
1647                         if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1648                                 continue;
1649                 }
1650                 best_ni = ni;
1651                 best_credits = ni_credits;
1652         }
1653
1654         CDEBUG(D_NET, "selected best_ni %s\n",
1655                (best_ni) ? libcfs_nid2str(best_ni->ni_nid) : "no selection");
1656
1657         return best_ni;
1658 }
1659
1660 /*
1661  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1662  * because such traffic is required to perform discovery. We therefore
1663  * exclude all GET and PUT on that portal. We also exclude all ACK and
1664  * REPLY traffic, but that is because the portal is not tracked in the
1665  * message structure for these message types. We could restrict this
1666  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1667  */
1668 static bool
1669 lnet_msg_discovery(struct lnet_msg *msg)
1670 {
1671         if (msg->msg_type == LNET_MSG_PUT) {
1672                 if (msg->msg_hdr.msg.put.ptl_index != LNET_RESERVED_PORTAL)
1673                         return true;
1674         } else if (msg->msg_type == LNET_MSG_GET) {
1675                 if (msg->msg_hdr.msg.get.ptl_index != LNET_RESERVED_PORTAL)
1676                         return true;
1677         }
1678         return false;
1679 }
1680
1681 #define SRC_SPEC        0x0001
1682 #define SRC_ANY         0x0002
1683 #define LOCAL_DST       0x0004
1684 #define REMOTE_DST      0x0008
1685 #define MR_DST          0x0010
1686 #define NMR_DST         0x0020
1687 #define SND_RESP        0x0040
1688
1689 /* The following to defines are used for return codes */
1690 #define REPEAT_SEND     0x1000
1691 #define PASS_THROUGH    0x2000
1692
1693 /* The different cases lnet_select pathway needs to handle */
1694 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1695 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1696 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1697 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1698 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1699 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1700 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1701 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1702
1703 static int
1704 lnet_handle_lo_send(struct lnet_send_data *sd)
1705 {
1706         struct lnet_msg *msg = sd->sd_msg;
1707         int cpt = sd->sd_cpt;
1708
1709         /* No send credit hassles with LOLND */
1710         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1711         msg->msg_hdr.dest_nid = cpu_to_le64(the_lnet.ln_loni->ni_nid);
1712         if (!msg->msg_routing)
1713                 msg->msg_hdr.src_nid =
1714                         cpu_to_le64(the_lnet.ln_loni->ni_nid);
1715         msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
1716         lnet_msg_commit(msg, cpt);
1717         msg->msg_txni = the_lnet.ln_loni;
1718
1719         return LNET_CREDIT_OK;
1720 }
1721
1722 static int
1723 lnet_handle_send(struct lnet_send_data *sd)
1724 {
1725         struct lnet_ni *best_ni = sd->sd_best_ni;
1726         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1727         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1728         struct lnet_msg *msg = sd->sd_msg;
1729         int cpt2;
1730         __u32 send_case = sd->sd_send_case;
1731         int rc;
1732         __u32 routing = send_case & REMOTE_DST;
1733          struct lnet_rsp_tracker *rspt;
1734
1735         /*
1736          * Increment sequence number of the selected peer so that we
1737          * pick the next one in Round Robin.
1738          */
1739         best_lpni->lpni_seq++;
1740
1741         /*
1742          * grab a reference on the peer_ni so it sticks around even if
1743          * we need to drop and relock the lnet_net_lock below.
1744          */
1745         lnet_peer_ni_addref_locked(best_lpni);
1746
1747         /*
1748          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1749          * message. This ensures that we get a CPT that is correct for
1750          * the NI when the NI has been restricted to a subset of all CPTs.
1751          * If the selected CPT differs from the one currently locked, we
1752          * must unlock and relock the lnet_net_lock(), and then check whether
1753          * the configuration has changed. We don't have a hold on the best_ni
1754          * yet, and it may have vanished.
1755          */
1756         cpt2 = lnet_cpt_of_nid_locked(best_lpni->lpni_nid, best_ni);
1757         if (sd->sd_cpt != cpt2) {
1758                 __u32 seq = lnet_get_dlc_seq_locked();
1759                 lnet_net_unlock(sd->sd_cpt);
1760                 sd->sd_cpt = cpt2;
1761                 lnet_net_lock(sd->sd_cpt);
1762                 if (seq != lnet_get_dlc_seq_locked()) {
1763                         lnet_peer_ni_decref_locked(best_lpni);
1764                         return REPEAT_SEND;
1765                 }
1766         }
1767
1768         /*
1769          * store the best_lpni in the message right away to avoid having
1770          * to do the same operation under different conditions
1771          */
1772         msg->msg_txpeer = best_lpni;
1773         msg->msg_txni = best_ni;
1774
1775         /*
1776          * grab a reference for the best_ni since now it's in use in this
1777          * send. The reference will be dropped in lnet_finalize()
1778          */
1779         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1780
1781         /*
1782          * Always set the target.nid to the best peer picked. Either the
1783          * NID will be one of the peer NIDs selected, or the same NID as
1784          * what was originally set in the target or it will be the NID of
1785          * a router if this message should be routed
1786          */
1787         msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1788
1789         /*
1790          * lnet_msg_commit assigns the correct cpt to the message, which
1791          * is used to decrement the correct refcount on the ni when it's
1792          * time to return the credits
1793          */
1794         lnet_msg_commit(msg, sd->sd_cpt);
1795
1796         /*
1797          * If we are routing the message then we keep the src_nid that was
1798          * set by the originator. If we are not routing then we are the
1799          * originator and set it here.
1800          */
1801         if (!msg->msg_routing)
1802                 msg->msg_hdr.src_nid = cpu_to_le64(msg->msg_txni->ni_nid);
1803
1804         if (routing) {
1805                 msg->msg_target_is_router = 1;
1806                 msg->msg_target.pid = LNET_PID_LUSTRE;
1807                 /*
1808                  * since we're routing we want to ensure that the
1809                  * msg_hdr.dest_nid is set to the final destination. When
1810                  * the router receives this message it knows how to route
1811                  * it.
1812                  *
1813                  * final_dst_lpni is set at the beginning of the
1814                  * lnet_select_pathway() function and is never changed.
1815                  * It's safe to use it here.
1816                  */
1817                 msg->msg_hdr.dest_nid = cpu_to_le64(final_dst_lpni->lpni_nid);
1818         } else {
1819                 /*
1820                  * if we're not routing set the dest_nid to the best peer
1821                  * ni NID that we picked earlier in the algorithm.
1822                  */
1823                 msg->msg_hdr.dest_nid = cpu_to_le64(msg->msg_txpeer->lpni_nid);
1824         }
1825
1826         /*
1827          * if we have response tracker block update it with the next hop
1828          * nid
1829          */
1830         if (msg->msg_md) {
1831                 rspt = msg->msg_md->md_rspt_ptr;
1832                 if (rspt) {
1833                         rspt->rspt_next_hop_nid = msg->msg_txpeer->lpni_nid;
1834                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1835                                libcfs_nid2str(rspt->rspt_next_hop_nid));
1836                 }
1837         }
1838
1839         rc = lnet_post_send_locked(msg, 0);
1840
1841         if (!rc)
1842                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
1843                        libcfs_nid2str(msg->msg_hdr.src_nid),
1844                        libcfs_nid2str(msg->msg_txni->ni_nid),
1845                        libcfs_nid2str(sd->sd_src_nid),
1846                        libcfs_nid2str(msg->msg_hdr.dest_nid),
1847                        libcfs_nid2str(sd->sd_dst_nid),
1848                        libcfs_nid2str(msg->msg_txpeer->lpni_nid),
1849                        libcfs_nid2str(sd->sd_rtr_nid),
1850                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
1851
1852         return rc;
1853 }
1854
1855 static inline void
1856 lnet_set_non_mr_pref_nid(struct lnet_send_data *sd)
1857 {
1858         if (sd->sd_send_case & NMR_DST &&
1859             sd->sd_msg->msg_type != LNET_MSG_REPLY &&
1860             sd->sd_msg->msg_type != LNET_MSG_ACK &&
1861             sd->sd_best_lpni->lpni_pref_nnids == 0) {
1862                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
1863                        libcfs_nid2str(sd->sd_best_ni->ni_nid),
1864                        libcfs_nid2str(sd->sd_best_lpni->lpni_nid));
1865                 lnet_peer_ni_set_non_mr_pref_nid(sd->sd_best_lpni,
1866                                                  sd->sd_best_ni->ni_nid);
1867         }
1868 }
1869
1870 /*
1871  * Source Specified
1872  * Local Destination
1873  * non-mr peer
1874  *
1875  * use the source and destination NIDs as the pathway
1876  */
1877 static int
1878 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
1879 {
1880         /* the destination lpni is set before we get here. */
1881
1882         /* find local NI */
1883         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
1884         if (!sd->sd_best_ni) {
1885                 CERROR("Can't send to %s: src %s is not a "
1886                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
1887                                 libcfs_nid2str(sd->sd_src_nid));
1888                 return -EINVAL;
1889         }
1890
1891         /*
1892          * the preferred NID will only be set for NMR peers
1893          */
1894         lnet_set_non_mr_pref_nid(sd);
1895
1896         return lnet_handle_send(sd);
1897 }
1898
1899 /*
1900  * Source Specified
1901  * Local Destination
1902  * MR Peer
1903  *
1904  * Don't run the selection algorithm on the peer NIs. By specifying the
1905  * local NID, we're also saying that we should always use the destination NID
1906  * provided. This handles the case where we should be using the same
1907  * destination NID for the all the messages which belong to the same RPC
1908  * request.
1909  */
1910 static int
1911 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
1912 {
1913         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
1914         if (!sd->sd_best_ni) {
1915                 CERROR("Can't send to %s: src %s is not a "
1916                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
1917                                 libcfs_nid2str(sd->sd_src_nid));
1918                 return -EINVAL;
1919         }
1920
1921         if (sd->sd_best_lpni &&
1922             sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid)
1923                 return lnet_handle_lo_send(sd);
1924         else if (sd->sd_best_lpni)
1925                 return lnet_handle_send(sd);
1926
1927         CERROR("can't send to %s. no NI on %s\n",
1928                libcfs_nid2str(sd->sd_dst_nid),
1929                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
1930
1931         return -EHOSTUNREACH;
1932 }
1933
1934 struct lnet_ni *
1935 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
1936                               struct lnet_peer *peer,
1937                               struct lnet_peer_net *peer_net,
1938                               int cpt,
1939                               bool incr_seq)
1940 {
1941         struct lnet_net *local_net;
1942         struct lnet_ni *best_ni;
1943
1944         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
1945         if (!local_net)
1946                 return NULL;
1947
1948         /*
1949          * Iterate through the NIs in this local Net and select
1950          * the NI to send from. The selection is determined by
1951          * these 3 criterion in the following priority:
1952          *      1. NUMA
1953          *      2. NI available credits
1954          *      3. Round Robin
1955          */
1956         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
1957                                    peer, peer_net, cpt);
1958
1959         if (incr_seq && best_ni)
1960                 best_ni->ni_seq++;
1961
1962         return best_ni;
1963 }
1964
1965 static int
1966 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni,
1967                              struct lnet_msg *msg, lnet_nid_t rtr_nid,
1968                              int cpt)
1969 {
1970         struct lnet_peer *peer;
1971         lnet_nid_t primary_nid;
1972         int rc;
1973
1974         lnet_peer_ni_addref_locked(lpni);
1975
1976         peer = lpni->lpni_peer_net->lpn_peer;
1977
1978         if (lnet_peer_gw_discovery(peer)) {
1979                 lnet_peer_ni_decref_locked(lpni);
1980                 return 0;
1981         }
1982
1983         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
1984                 lnet_peer_ni_decref_locked(lpni);
1985                 return 0;
1986         }
1987
1988         rc = lnet_discover_peer_locked(lpni, cpt, false);
1989         if (rc) {
1990                 lnet_peer_ni_decref_locked(lpni);
1991                 return rc;
1992         }
1993         /* The peer may have changed. */
1994         peer = lpni->lpni_peer_net->lpn_peer;
1995         spin_lock(&peer->lp_lock);
1996         if (lnet_peer_is_uptodate_locked(peer)) {
1997                 spin_unlock(&peer->lp_lock);
1998                 lnet_peer_ni_decref_locked(lpni);
1999                 return 0;
2000         }
2001         /* queue message and return */
2002         msg->msg_rtr_nid_param = rtr_nid;
2003         msg->msg_sending = 0;
2004         msg->msg_txpeer = NULL;
2005         list_add_tail(&msg->msg_list, &peer->lp_dc_pendq);
2006         primary_nid = peer->lp_primary_nid;
2007         spin_unlock(&peer->lp_lock);
2008
2009         lnet_peer_ni_decref_locked(lpni);
2010
2011         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2012                 msg, libcfs_nid2str(primary_nid));
2013
2014         return LNET_DC_WAIT;
2015 }
2016
2017 static int
2018 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2019                              lnet_nid_t dst_nid,
2020                              struct lnet_peer_ni **gw_lpni,
2021                              struct lnet_peer **gw_peer)
2022 {
2023         int rc;
2024         __u32 local_lnet;
2025         struct lnet_peer *gw;
2026         struct lnet_peer *lp;
2027         struct lnet_peer_net *lpn;
2028         struct lnet_peer_net *best_lpn = NULL;
2029         struct lnet_remotenet *rnet;
2030         struct lnet_route *best_route = NULL;
2031         struct lnet_route *last_route = NULL;
2032         struct lnet_peer_ni *lpni = NULL;
2033         struct lnet_peer_ni *gwni = NULL;
2034         lnet_nid_t src_nid = sd->sd_src_nid;
2035
2036         /* If a router nid was specified then we are replying to a GET or
2037          * sending an ACK. In this case we use the gateway associated with the
2038          * specified router nid.
2039          */
2040         if (sd->sd_rtr_nid != LNET_NID_ANY) {
2041                 gwni = lnet_find_peer_ni_locked(sd->sd_rtr_nid);
2042                 if (!gwni) {
2043                         CERROR("No peer NI for gateway %s\n",
2044                                libcfs_nid2str(sd->sd_rtr_nid));
2045                         return -EHOSTUNREACH;
2046                 }
2047                 gw = gwni->lpni_peer_net->lpn_peer;
2048                 lnet_peer_ni_decref_locked(gwni);
2049                 local_lnet = LNET_NIDNET(sd->sd_rtr_nid);
2050         } else {
2051                 /* we've already looked up the initial lpni using dst_nid */
2052                 lpni = sd->sd_best_lpni;
2053                 /* the peer tree must be in existence */
2054                 LASSERT(lpni && lpni->lpni_peer_net &&
2055                         lpni->lpni_peer_net->lpn_peer);
2056                 lp = lpni->lpni_peer_net->lpn_peer;
2057
2058                 list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2059                         /* is this remote network reachable?  */
2060                         rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2061                         if (!rnet)
2062                                 continue;
2063
2064                         if (!best_lpn)
2065                                 best_lpn = lpn;
2066
2067                         if (best_lpn->lpn_seq <= lpn->lpn_seq)
2068                                 continue;
2069
2070                         best_lpn = lpn;
2071                 }
2072
2073                 if (!best_lpn) {
2074                         CERROR("peer %s has no available nets\n",
2075                                libcfs_nid2str(sd->sd_dst_nid));
2076                         return -EHOSTUNREACH;
2077                 }
2078
2079                 sd->sd_best_lpni = lnet_find_best_lpni_on_net(sd->sd_best_ni,
2080                                                               sd->sd_dst_nid,
2081                                                               lp,
2082                                                               best_lpn->lpn_net_id);
2083                 if (!sd->sd_best_lpni) {
2084                         CERROR("peer %s down\n",
2085                                libcfs_nid2str(sd->sd_dst_nid));
2086                         return -EHOSTUNREACH;
2087                 }
2088
2089                 best_route = lnet_find_route_locked(NULL, best_lpn->lpn_net_id,
2090                                                     &last_route, &gwni);
2091                 if (!best_route) {
2092                         CERROR("no route to %s from %s\n",
2093                                libcfs_nid2str(dst_nid),
2094                                libcfs_nid2str(src_nid));
2095                         return -EHOSTUNREACH;
2096                 }
2097
2098                 if (!gwni) {
2099                         CERROR("Internal Error. Route expected to %s from %s\n",
2100                                libcfs_nid2str(dst_nid),
2101                                libcfs_nid2str(src_nid));
2102                         return -EFAULT;
2103                 }
2104
2105                 gw = best_route->lr_gateway;
2106                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2107                 local_lnet = best_route->lr_lnet;
2108
2109         }
2110
2111         /*
2112          * Discover this gateway if it hasn't already been discovered.
2113          * This means we might delay the message until discovery has
2114          * completed
2115          */
2116         sd->sd_msg->msg_src_nid_param = sd->sd_src_nid;
2117         rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_rtr_nid,
2118                                           sd->sd_cpt);
2119         if (rc)
2120                 return rc;
2121
2122         if (!sd->sd_best_ni)
2123                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw,
2124                                         lnet_peer_get_net_locked(gw,
2125                                                                  local_lnet),
2126                                         sd->sd_md_cpt,
2127                                         true);
2128
2129         if (!sd->sd_best_ni) {
2130                 CERROR("Internal Error. Expected local ni on %s but non found :%s\n",
2131                        libcfs_net2str(local_lnet),
2132                        libcfs_nid2str(sd->sd_src_nid));
2133                 return -EFAULT;
2134         }
2135
2136         *gw_lpni = gwni;
2137         *gw_peer = gw;
2138
2139         /*
2140          * increment the sequence numbers since now we're sure we're
2141          * going to use this path
2142          */
2143         if (sd->sd_rtr_nid == LNET_NID_ANY) {
2144                 LASSERT(best_route && last_route);
2145                 best_route->lr_seq = last_route->lr_seq + 1;
2146                 best_lpn->lpn_seq++;
2147         }
2148
2149         return 0;
2150 }
2151
2152 /*
2153  * Handle two cases:
2154  *
2155  * Case 1:
2156  *  Source specified
2157  *  Remote destination
2158  *  Non-MR destination
2159  *
2160  * Case 2:
2161  *  Source specified
2162  *  Remote destination
2163  *  MR destination
2164  *
2165  * The handling of these two cases is similar. Even though the destination
2166  * can be MR or non-MR, we'll deal directly with the router.
2167  */
2168 static int
2169 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2170 {
2171         int rc;
2172         struct lnet_peer_ni *gw_lpni = NULL;
2173         struct lnet_peer *gw_peer = NULL;
2174
2175         /* find local NI */
2176         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
2177         if (!sd->sd_best_ni) {
2178                 CERROR("Can't send to %s: src %s is not a "
2179                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
2180                                 libcfs_nid2str(sd->sd_src_nid));
2181                 return -EINVAL;
2182         }
2183
2184         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2185                                      &gw_peer);
2186         if (rc)
2187                 return rc;
2188
2189         if (sd->sd_send_case & NMR_DST)
2190                 /*
2191                 * since the final destination is non-MR let's set its preferred
2192                 * NID before we send
2193                 */
2194                 lnet_set_non_mr_pref_nid(sd);
2195
2196         /*
2197          * We're going to send to the gw found so let's set its
2198          * info
2199          */
2200         sd->sd_peer = gw_peer;
2201         sd->sd_best_lpni = gw_lpni;
2202
2203         return lnet_handle_send(sd);
2204 }
2205
2206 struct lnet_ni *
2207 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2208                                bool discovery)
2209 {
2210         struct lnet_peer_net *peer_net = NULL;
2211         struct lnet_ni *best_ni = NULL;
2212
2213         /*
2214          * The peer can have multiple interfaces, some of them can be on
2215          * the local network and others on a routed network. We should
2216          * prefer the local network. However if the local network is not
2217          * available then we need to try the routed network
2218          */
2219
2220         /* go through all the peer nets and find the best_ni */
2221         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
2222                 /*
2223                  * The peer's list of nets can contain non-local nets. We
2224                  * want to only examine the local ones.
2225                  */
2226                 if (!lnet_get_net_locked(peer_net->lpn_net_id))
2227                         continue;
2228                 best_ni = lnet_find_best_ni_on_spec_net(best_ni, peer,
2229                                                    peer_net, md_cpt, false);
2230
2231                 /*
2232                  * if this is a discovery message and lp_disc_net_id is
2233                  * specified then use that net to send the discovery on.
2234                  */
2235                 if (peer->lp_disc_net_id == peer_net->lpn_net_id &&
2236                     discovery)
2237                         break;
2238         }
2239
2240         if (best_ni)
2241                 /* increment sequence number so we can round robin */
2242                 best_ni->ni_seq++;
2243
2244         return best_ni;
2245 }
2246
2247 static struct lnet_ni *
2248 lnet_find_existing_preferred_best_ni(struct lnet_send_data *sd)
2249 {
2250         struct lnet_ni *best_ni = NULL;
2251         struct lnet_peer_net *peer_net;
2252         struct lnet_peer *peer = sd->sd_peer;
2253         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
2254         struct lnet_peer_ni *lpni;
2255         int cpt = sd->sd_cpt;
2256
2257         /*
2258          * We must use a consistent source address when sending to a
2259          * non-MR peer. However, a non-MR peer can have multiple NIDs
2260          * on multiple networks, and we may even need to talk to this
2261          * peer on multiple networks -- certain types of
2262          * load-balancing configuration do this.
2263          *
2264          * So we need to pick the NI the peer prefers for this
2265          * particular network.
2266          */
2267
2268         /* Get the target peer_ni */
2269         peer_net = lnet_peer_get_net_locked(peer,
2270                         LNET_NIDNET(best_lpni->lpni_nid));
2271         LASSERT(peer_net != NULL);
2272         list_for_each_entry(lpni, &peer_net->lpn_peer_nis,
2273                                 lpni_peer_nis) {
2274                 if (lpni->lpni_pref_nnids == 0)
2275                         continue;
2276                 LASSERT(lpni->lpni_pref_nnids == 1);
2277                 best_ni = lnet_nid2ni_locked(
2278                                 lpni->lpni_pref.nid, cpt);
2279                 break;
2280         }
2281
2282         return best_ni;
2283 }
2284
2285 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2286 static int
2287 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2288 {
2289         struct lnet_ni *best_ni = NULL;
2290         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
2291
2292         /*
2293          * We must use a consistent source address when sending to a
2294          * non-MR peer. However, a non-MR peer can have multiple NIDs
2295          * on multiple networks, and we may even need to talk to this
2296          * peer on multiple networks -- certain types of
2297          * load-balancing configuration do this.
2298          *
2299          * So we need to pick the NI the peer prefers for this
2300          * particular network.
2301          */
2302
2303         best_ni = lnet_find_existing_preferred_best_ni(sd);
2304
2305         /* if best_ni is still not set just pick one */
2306         if (!best_ni) {
2307                 best_ni =
2308                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2309                                                 sd->sd_best_lpni->lpni_peer_net,
2310                                                 sd->sd_md_cpt, true);
2311                 /* If there is no best_ni we don't have a route */
2312                 if (!best_ni) {
2313                         CERROR("no path to %s from net %s\n",
2314                                 libcfs_nid2str(best_lpni->lpni_nid),
2315                                 libcfs_net2str(best_lpni->lpni_net->net_id));
2316                         return -EHOSTUNREACH;
2317                 }
2318         }
2319
2320         sd->sd_best_ni = best_ni;
2321
2322         /* Set preferred NI if necessary. */
2323         lnet_set_non_mr_pref_nid(sd);
2324
2325         return 0;
2326 }
2327
2328
2329 /*
2330  * Source not specified
2331  * Local destination
2332  * Non-MR Peer
2333  *
2334  * always use the same source NID for NMR peers
2335  * If we've talked to that peer before then we already have a preferred
2336  * source NI associated with it. Otherwise, we select a preferred local NI
2337  * and store it in the peer
2338  */
2339 static int
2340 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2341 {
2342         int rc;
2343
2344         /* sd->sd_best_lpni is already set to the final destination */
2345
2346         /*
2347          * At this point we should've created the peer ni and peer. If we
2348          * can't find it, then something went wrong. Instead of assert
2349          * output a relevant message and fail the send
2350          */
2351         if (!sd->sd_best_lpni) {
2352                 CERROR("Internal fault. Unable to send msg %s to %s. "
2353                        "NID not known\n",
2354                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2355                        libcfs_nid2str(sd->sd_dst_nid));
2356                 return -EFAULT;
2357         }
2358
2359         rc = lnet_select_preferred_best_ni(sd);
2360         if (!rc)
2361                 rc = lnet_handle_send(sd);
2362
2363         return rc;
2364 }
2365
2366 static int
2367 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2368 {
2369         /*
2370          * NOTE we've already handled the remote peer case. So we only
2371          * need to worry about the local case here.
2372          *
2373          * if we're sending a response, ACK or reply, we need to send it
2374          * to the destination NID given to us. At this point we already
2375          * have the peer_ni we're suppose to send to, so just find the
2376          * best_ni on the peer net and use that. Since we're sending to an
2377          * MR peer then we can just run the selection algorithm on our
2378          * local NIs and pick the best one.
2379          */
2380         if (sd->sd_send_case & SND_RESP) {
2381                 sd->sd_best_ni =
2382                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2383                                                 sd->sd_best_lpni->lpni_peer_net,
2384                                                 sd->sd_md_cpt, true);
2385
2386                 if (!sd->sd_best_ni) {
2387                         /*
2388                          * We're not going to deal with not able to send
2389                          * a response to the provided final destination
2390                          */
2391                         CERROR("Can't send response to %s. "
2392                                "No local NI available\n",
2393                                 libcfs_nid2str(sd->sd_dst_nid));
2394                         return -EHOSTUNREACH;
2395                 }
2396
2397                 return lnet_handle_send(sd);
2398         }
2399
2400         /*
2401          * If we get here that means we're sending a fresh request, PUT or
2402          * GET, so we need to run our standard selection algorithm.
2403          * First find the best local interface that's on any of the peer's
2404          * networks.
2405          */
2406         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2407                                         sd->sd_md_cpt,
2408                                         lnet_msg_discovery(sd->sd_msg));
2409         if (sd->sd_best_ni) {
2410                 sd->sd_best_lpni =
2411                   lnet_find_best_lpni_on_net(sd->sd_best_ni, sd->sd_dst_nid,
2412                                              sd->sd_peer,
2413                                              sd->sd_best_ni->ni_net->net_id);
2414
2415                 /*
2416                  * if we're successful in selecting a peer_ni on the local
2417                  * network, then send to it. Otherwise fall through and
2418                  * try and see if we can reach it over another routed
2419                  * network
2420                  */
2421                 if (sd->sd_best_lpni &&
2422                     sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid) {
2423                         /*
2424                          * in case we initially started with a routed
2425                          * destination, let's reset to local
2426                          */
2427                         sd->sd_send_case &= ~REMOTE_DST;
2428                         sd->sd_send_case |= LOCAL_DST;
2429                         return lnet_handle_lo_send(sd);
2430                 } else if (sd->sd_best_lpni) {
2431                         /*
2432                          * in case we initially started with a routed
2433                          * destination, let's reset to local
2434                          */
2435                         sd->sd_send_case &= ~REMOTE_DST;
2436                         sd->sd_send_case |= LOCAL_DST;
2437                         return lnet_handle_send(sd);
2438                 }
2439
2440                 CERROR("Internal Error. Expected to have a best_lpni: "
2441                        "%s -> %s\n",
2442                        libcfs_nid2str(sd->sd_src_nid),
2443                        libcfs_nid2str(sd->sd_dst_nid));
2444
2445                 return -EFAULT;
2446         }
2447
2448         /*
2449          * Peer doesn't have a local network. Let's see if there is
2450          * a remote network we can reach it on.
2451          */
2452         return PASS_THROUGH;
2453 }
2454
2455 /*
2456  * Case 1:
2457  *      Source NID not specified
2458  *      Local destination
2459  *      MR peer
2460  *
2461  * Case 2:
2462  *      Source NID not speified
2463  *      Remote destination
2464  *      MR peer
2465  *
2466  * In both of these cases if we're sending a response, ACK or REPLY, then
2467  * we need to send to the destination NID provided.
2468  *
2469  * In the remote case let's deal with MR routers.
2470  *
2471  */
2472
2473 static int
2474 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2475 {
2476         int rc = 0;
2477         struct lnet_peer *gw_peer = NULL;
2478         struct lnet_peer_ni *gw_lpni = NULL;
2479
2480         /*
2481          * handle sending a response to a remote peer here so we don't
2482          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2483          */
2484         if (sd->sd_send_case & REMOTE_DST &&
2485             sd->sd_send_case & SND_RESP) {
2486                 struct lnet_peer_ni *gw;
2487                 struct lnet_peer *gw_peer;
2488
2489                 rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw,
2490                                                   &gw_peer);
2491                 if (rc < 0) {
2492                         CERROR("Can't send response to %s. "
2493                                "No route available\n",
2494                                 libcfs_nid2str(sd->sd_dst_nid));
2495                         return -EHOSTUNREACH;
2496                 } else if (rc > 0) {
2497                         return rc;
2498                 }
2499
2500                 sd->sd_best_lpni = gw;
2501                 sd->sd_peer = gw_peer;
2502
2503                 return lnet_handle_send(sd);
2504         }
2505
2506         /*
2507          * Even though the NID for the peer might not be on a local network,
2508          * since the peer is MR there could be other interfaces on the
2509          * local network. In that case we'd still like to prefer the local
2510          * network over the routed network. If we're unable to do that
2511          * then we select the best router among the different routed networks,
2512          * and if the router is MR then we can deal with it as such.
2513          */
2514         rc = lnet_handle_any_mr_dsta(sd);
2515         if (rc != PASS_THROUGH)
2516                 return rc;
2517
2518         /*
2519          * Now that we must route to the destination, we must consider the
2520          * MR case, where the destination has multiple interfaces, some of
2521          * which we can route to and others we do not. For this reason we
2522          * need to select the destination which we can route to and if
2523          * there are multiple, we need to round robin.
2524          */
2525         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2526                                           &gw_peer);
2527         if (rc)
2528                 return rc;
2529
2530         sd->sd_send_case &= ~LOCAL_DST;
2531         sd->sd_send_case |= REMOTE_DST;
2532
2533         sd->sd_peer = gw_peer;
2534         sd->sd_best_lpni = gw_lpni;
2535
2536         return lnet_handle_send(sd);
2537 }
2538
2539 /*
2540  * Source not specified
2541  * Remote destination
2542  * Non-MR peer
2543  *
2544  * Must send to the specified peer NID using the same source NID that
2545  * we've used before. If it's the first time to talk to that peer then
2546  * find the source NI and assign it as preferred to that peer
2547  */
2548 static int
2549 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2550 {
2551         int rc;
2552         struct lnet_peer_ni *gw_lpni = NULL;
2553         struct lnet_peer *gw_peer = NULL;
2554
2555         /*
2556          * Let's set if we have a preferred NI to talk to this NMR peer
2557          */
2558         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd);
2559
2560         /*
2561          * find the router and that'll find the best NI if we didn't find
2562          * it already.
2563          */
2564         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2565                                           &gw_peer);
2566         if (rc)
2567                 return rc;
2568
2569         /*
2570          * set the best_ni we've chosen as the preferred one for
2571          * this peer
2572          */
2573         lnet_set_non_mr_pref_nid(sd);
2574
2575         /* we'll be sending to the gw */
2576         sd->sd_best_lpni = gw_lpni;
2577         sd->sd_peer = gw_peer;
2578
2579         return lnet_handle_send(sd);
2580 }
2581
2582 static int
2583 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2584 {
2585         /*
2586          * turn off the SND_RESP bit.
2587          * It will be checked in the case handling
2588          */
2589         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2590
2591         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2592                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2593                 (send_case & SRC_SPEC) ? libcfs_nid2str(sd->sd_src_nid) : "",
2594                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2595                 libcfs_nid2str(sd->sd_dst_nid),
2596                 (send_case & LOCAL_DST) ? "local" : "routed");
2597
2598         switch (send_case) {
2599         /*
2600          * For all cases where the source is specified, we should always
2601          * use the destination NID, whether it's an MR destination or not,
2602          * since we're continuing a series of related messages for the
2603          * same RPC
2604          */
2605         case SRC_SPEC_LOCAL_NMR_DST:
2606                 return lnet_handle_spec_local_nmr_dst(sd);
2607         case SRC_SPEC_LOCAL_MR_DST:
2608                 return lnet_handle_spec_local_mr_dst(sd);
2609         case SRC_SPEC_ROUTER_NMR_DST:
2610         case SRC_SPEC_ROUTER_MR_DST:
2611                 return lnet_handle_spec_router_dst(sd);
2612         case SRC_ANY_LOCAL_NMR_DST:
2613                 return lnet_handle_any_local_nmr_dst(sd);
2614         case SRC_ANY_LOCAL_MR_DST:
2615         case SRC_ANY_ROUTER_MR_DST:
2616                 return lnet_handle_any_mr_dst(sd);
2617         case SRC_ANY_ROUTER_NMR_DST:
2618                 return lnet_handle_any_router_nmr_dst(sd);
2619         default:
2620                 CERROR("Unknown send case\n");
2621                 return -1;
2622         }
2623 }
2624
2625 static int
2626 lnet_select_pathway(lnet_nid_t src_nid, lnet_nid_t dst_nid,
2627                     struct lnet_msg *msg, lnet_nid_t rtr_nid)
2628 {
2629         struct lnet_peer_ni     *lpni;
2630         struct lnet_peer        *peer;
2631         struct lnet_send_data   send_data;
2632         int                     cpt, rc;
2633         int                     md_cpt;
2634         __u32                   send_case = 0;
2635
2636         memset(&send_data, 0, sizeof(send_data));
2637
2638         /*
2639          * get an initial CPT to use for locking. The idea here is not to
2640          * serialize the calls to select_pathway, so that as many
2641          * operations can run concurrently as possible. To do that we use
2642          * the CPT where this call is being executed. Later on when we
2643          * determine the CPT to use in lnet_message_commit, we switch the
2644          * lock and check if there was any configuration change.  If none,
2645          * then we proceed, if there is, then we restart the operation.
2646          */
2647         cpt = lnet_net_lock_current();
2648
2649         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
2650         if (md_cpt == CFS_CPT_ANY)
2651                 md_cpt = cpt;
2652
2653 again:
2654
2655         /*
2656          * If we're sending to ourselves then there is no need to go through
2657          * any selection. We can shortcut the entire process and send over
2658          * lolnd.
2659          *
2660          * However, we make two exceptions to this rule:
2661          * 1. If the src_nid is specified then our API defines that we must send
2662          *    via that interface.
2663          * 2. Recovery messages must be sent to the lnet_ni that is being
2664          *    recovered.
2665          */
2666         send_data.sd_msg = msg;
2667         send_data.sd_cpt = cpt;
2668         if (src_nid == LNET_NID_ANY && !msg->msg_recovery &&
2669             lnet_nid2ni_locked(dst_nid, cpt)) {
2670                 rc = lnet_handle_lo_send(&send_data);
2671                 lnet_net_unlock(cpt);
2672                 return rc;
2673         }
2674
2675         /*
2676          * find an existing peer_ni, or create one and mark it as having been
2677          * created due to network traffic. This call will create the
2678          * peer->peer_net->peer_ni tree.
2679          */
2680         lpni = lnet_nid2peerni_locked(dst_nid, LNET_NID_ANY, cpt);
2681         if (IS_ERR(lpni)) {
2682                 lnet_net_unlock(cpt);
2683                 return PTR_ERR(lpni);
2684         }
2685
2686         /*
2687          * Cache the original src_nid. If we need to resend the message
2688          * then we'll need to know whether the src_nid was originally
2689          * specified for this message. If it was originally specified,
2690          * then we need to keep using the same src_nid since it's
2691          * continuing the same sequence of messages.
2692          */
2693         msg->msg_src_nid_param = src_nid;
2694
2695         /*
2696          * If necessary, perform discovery on the peer that owns this peer_ni.
2697          * Note, this can result in the ownership of this peer_ni changing
2698          * to another peer object.
2699          */
2700         rc = lnet_initiate_peer_discovery(lpni, msg, rtr_nid, cpt);
2701         if (rc) {
2702                 lnet_peer_ni_decref_locked(lpni);
2703                 lnet_net_unlock(cpt);
2704                 return rc;
2705         }
2706         lnet_peer_ni_decref_locked(lpni);
2707
2708         peer = lpni->lpni_peer_net->lpn_peer;
2709
2710         /*
2711          * Identify the different send cases
2712          */
2713         if (src_nid == LNET_NID_ANY)
2714                 send_case |= SRC_ANY;
2715         else
2716                 send_case |= SRC_SPEC;
2717
2718         if (lnet_get_net_locked(LNET_NIDNET(dst_nid)))
2719                 send_case |= LOCAL_DST;
2720         else
2721                 send_case |= REMOTE_DST;
2722
2723         /*
2724          * if this is a non-MR peer or if we're recovering a peer ni then
2725          * let's consider this an NMR case so we can hit the destination
2726          * NID.
2727          */
2728         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery)
2729                 send_case |= NMR_DST;
2730         else
2731                 send_case |= MR_DST;
2732
2733         if (msg->msg_type == LNET_MSG_REPLY ||
2734             msg->msg_type == LNET_MSG_ACK)
2735                 send_case |= SND_RESP;
2736
2737         /* assign parameters to the send_data */
2738         send_data.sd_rtr_nid = rtr_nid;
2739         send_data.sd_src_nid = src_nid;
2740         send_data.sd_dst_nid = dst_nid;
2741         send_data.sd_best_lpni = lpni;
2742         /*
2743          * keep a pointer to the final destination in case we're going to
2744          * route, so we'll need to access it later
2745          */
2746         send_data.sd_final_dst_lpni = lpni;
2747         send_data.sd_peer = peer;
2748         send_data.sd_md_cpt = md_cpt;
2749         send_data.sd_send_case = send_case;
2750
2751         rc = lnet_handle_send_case_locked(&send_data);
2752
2753         /*
2754          * Update the local cpt since send_data.sd_cpt might've been
2755          * updated as a result of calling lnet_handle_send_case_locked().
2756          */
2757         cpt = send_data.sd_cpt;
2758
2759         if (rc == REPEAT_SEND)
2760                 goto again;
2761
2762         lnet_net_unlock(cpt);
2763
2764         return rc;
2765 }
2766
2767 int
2768 lnet_send(lnet_nid_t src_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid)
2769 {
2770         lnet_nid_t              dst_nid = msg->msg_target.nid;
2771         int                     rc;
2772
2773         /*
2774          * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
2775          * but we might want to use pre-determined router for ACK/REPLY
2776          * in the future
2777          */
2778         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
2779         LASSERT(msg->msg_txpeer == NULL);
2780         LASSERT(msg->msg_txni == NULL);
2781         LASSERT(!msg->msg_sending);
2782         LASSERT(!msg->msg_target_is_router);
2783         LASSERT(!msg->msg_receiving);
2784
2785         msg->msg_sending = 1;
2786
2787         LASSERT(!msg->msg_tx_committed);
2788
2789         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
2790         if (rc < 0) {
2791                 if (rc == -EHOSTUNREACH)
2792                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
2793                 else
2794                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
2795                 return rc;
2796         }
2797
2798         if (rc == LNET_CREDIT_OK)
2799                 lnet_ni_send(msg->msg_txni, msg);
2800
2801         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
2802         return 0;
2803 }
2804
2805 enum lnet_mt_event_type {
2806         MT_TYPE_LOCAL_NI = 0,
2807         MT_TYPE_PEER_NI
2808 };
2809
2810 struct lnet_mt_event_info {
2811         enum lnet_mt_event_type mt_type;
2812         lnet_nid_t mt_nid;
2813 };
2814
2815 /* called with res_lock held */
2816 void
2817 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
2818 {
2819         struct lnet_rsp_tracker *rspt;
2820
2821         /*
2822          * msg has a refcount on the MD so the MD is not going away.
2823          * The rspt queue for the cpt is protected by
2824          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
2825          */
2826         if (!md->md_rspt_ptr)
2827                 return;
2828
2829         rspt = md->md_rspt_ptr;
2830
2831         /* debug code */
2832         LASSERT(rspt->rspt_cpt == cpt);
2833
2834         md->md_rspt_ptr = NULL;
2835
2836         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
2837                 /*
2838                  * The monitor thread has invalidated this handle because the
2839                  * response timed out, but it failed to lookup the MD. That
2840                  * means this response tracker is on the zombie list. We can
2841                  * safely remove it under the resource lock (held by caller) and
2842                  * free the response tracker block.
2843                  */
2844                 list_del(&rspt->rspt_on_list);
2845                 lnet_rspt_free(rspt, cpt);
2846         } else {
2847                 /*
2848                  * invalidate the handle to indicate that a response has been
2849                  * received, which will then lead the monitor thread to clean up
2850                  * the rspt block.
2851                  */
2852                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
2853         }
2854 }
2855
2856 void
2857 lnet_clean_zombie_rstqs(void)
2858 {
2859         struct lnet_rsp_tracker *rspt, *tmp;
2860         int i;
2861
2862         cfs_cpt_for_each(i, lnet_cpt_table()) {
2863                 list_for_each_entry_safe(rspt, tmp,
2864                                          the_lnet.ln_mt_zombie_rstqs[i],
2865                                          rspt_on_list) {
2866                         list_del(&rspt->rspt_on_list);
2867                         lnet_rspt_free(rspt, i);
2868                 }
2869         }
2870
2871         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
2872 }
2873
2874 static void
2875 lnet_finalize_expired_responses(void)
2876 {
2877         struct lnet_libmd *md;
2878         struct list_head local_queue;
2879         struct lnet_rsp_tracker *rspt, *tmp;
2880         ktime_t now;
2881         int i;
2882
2883         if (the_lnet.ln_mt_rstq == NULL)
2884                 return;
2885
2886         cfs_cpt_for_each(i, lnet_cpt_table()) {
2887                 INIT_LIST_HEAD(&local_queue);
2888
2889                 lnet_net_lock(i);
2890                 if (!the_lnet.ln_mt_rstq[i]) {
2891                         lnet_net_unlock(i);
2892                         continue;
2893                 }
2894                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
2895                 lnet_net_unlock(i);
2896
2897                 now = ktime_get();
2898
2899                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
2900                         /*
2901                          * The rspt mdh will be invalidated when a response
2902                          * is received or whenever we want to discard the
2903                          * block the monitor thread will walk the queue
2904                          * and clean up any rsts with an invalid mdh.
2905                          * The monitor thread will walk the queue until
2906                          * the first unexpired rspt block. This means that
2907                          * some rspt blocks which received their
2908                          * corresponding responses will linger in the
2909                          * queue until they are cleaned up eventually.
2910                          */
2911                         lnet_res_lock(i);
2912                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
2913                                 lnet_res_unlock(i);
2914                                 list_del(&rspt->rspt_on_list);
2915                                 lnet_rspt_free(rspt, i);
2916                                 continue;
2917                         }
2918
2919                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
2920                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
2921                                 struct lnet_peer_ni *lpni;
2922                                 lnet_nid_t nid;
2923
2924                                 md = lnet_handle2md(&rspt->rspt_mdh);
2925                                 if (!md) {
2926                                         /* MD has been queued for unlink, but
2927                                          * rspt hasn't been detached (Note we've
2928                                          * checked above that the rspt_mdh is
2929                                          * valid). Since we cannot lookup the MD
2930                                          * we're unable to detach the rspt
2931                                          * ourselves. Thus, move the rspt to the
2932                                          * zombie list where we'll wait for
2933                                          * either:
2934                                          *   1. The remaining operations on the
2935                                          *   MD to complete. In this case the
2936                                          *   final operation will result in
2937                                          *   lnet_msg_detach_md()->
2938                                          *   lnet_detach_rsp_tracker() where
2939                                          *   we will clean up this response
2940                                          *   tracker.
2941                                          *   2. LNet to shutdown. In this case
2942                                          *   we'll wait until after all LND Nets
2943                                          *   have shutdown and then we can
2944                                          *   safely free any remaining response
2945                                          *   tracker blocks on the zombie list.
2946                                          * Note: We need to hold the resource
2947                                          * lock when adding to the zombie list
2948                                          * because we may have concurrent access
2949                                          * with lnet_detach_rsp_tracker().
2950                                          */
2951                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
2952                                         list_move(&rspt->rspt_on_list,
2953                                                   the_lnet.ln_mt_zombie_rstqs[i]);
2954                                         lnet_res_unlock(i);
2955                                         continue;
2956                                 }
2957                                 LASSERT(md->md_rspt_ptr == rspt);
2958                                 md->md_rspt_ptr = NULL;
2959                                 lnet_res_unlock(i);
2960
2961                                 LNetMDUnlink(rspt->rspt_mdh);
2962
2963                                 nid = rspt->rspt_next_hop_nid;
2964
2965                                 list_del(&rspt->rspt_on_list);
2966                                 lnet_rspt_free(rspt, i);
2967
2968                                 /* If we're shutting down we just want to clean
2969                                  * up the rspt blocks
2970                                  */
2971                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
2972                                         continue;
2973
2974                                 lnet_net_lock(i);
2975                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
2976                                 lnet_net_unlock(i);
2977
2978                                 CDEBUG(D_NET,
2979                                        "Response timeout: md = %p: nid = %s\n",
2980                                        md, libcfs_nid2str(nid));
2981
2982                                 /*
2983                                  * If there is a timeout on the response
2984                                  * from the next hop decrement its health
2985                                  * value so that we don't use it
2986                                  */
2987                                 lnet_net_lock(0);
2988                                 lpni = lnet_find_peer_ni_locked(nid);
2989                                 if (lpni) {
2990                                         lnet_handle_remote_failure_locked(lpni);
2991                                         lnet_peer_ni_decref_locked(lpni);
2992                                 }
2993                                 lnet_net_unlock(0);
2994                         } else {
2995                                 lnet_res_unlock(i);
2996                                 break;
2997                         }
2998                 }
2999
3000                 if (!list_empty(&local_queue)) {
3001                         lnet_net_lock(i);
3002                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
3003                         lnet_net_unlock(i);
3004                 }
3005         }
3006 }
3007
3008 static void
3009 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
3010 {
3011         struct lnet_msg *msg;
3012
3013         while (!list_empty(resendq)) {
3014                 struct lnet_peer_ni *lpni;
3015
3016                 msg = list_entry(resendq->next, struct lnet_msg,
3017                                  msg_list);
3018
3019                 list_del_init(&msg->msg_list);
3020
3021                 lpni = lnet_find_peer_ni_locked(msg->msg_hdr.dest_nid);
3022                 if (!lpni) {
3023                         lnet_net_unlock(cpt);
3024                         CERROR("Expected that a peer is already created for %s\n",
3025                                libcfs_nid2str(msg->msg_hdr.dest_nid));
3026                         msg->msg_no_resend = true;
3027                         lnet_finalize(msg, -EFAULT);
3028                         lnet_net_lock(cpt);
3029                 } else {
3030                         struct lnet_peer *peer;
3031                         int rc;
3032                         lnet_nid_t src_nid = LNET_NID_ANY;
3033
3034                         /*
3035                          * if this message is not being routed and the
3036                          * peer is non-MR then we must use the same
3037                          * src_nid that was used in the original send.
3038                          * Otherwise if we're routing the message (IE
3039                          * we're a router) then we can use any of our
3040                          * local interfaces. It doesn't matter to the
3041                          * final destination.
3042                          */
3043                         peer = lpni->lpni_peer_net->lpn_peer;
3044                         if (!msg->msg_routing &&
3045                             !lnet_peer_is_multi_rail(peer))
3046                                 src_nid = le64_to_cpu(msg->msg_hdr.src_nid);
3047
3048                         /*
3049                          * If we originally specified a src NID, then we
3050                          * must attempt to reuse it in the resend as well.
3051                          */
3052                         if (msg->msg_src_nid_param != LNET_NID_ANY)
3053                                 src_nid = msg->msg_src_nid_param;
3054                         lnet_peer_ni_decref_locked(lpni);
3055
3056                         lnet_net_unlock(cpt);
3057                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3058                                libcfs_nid2str(src_nid),
3059                                libcfs_id2str(msg->msg_target),
3060                                lnet_msgtyp2str(msg->msg_type),
3061                                msg->msg_recovery,
3062                                msg->msg_retry_count);
3063                         rc = lnet_send(src_nid, msg, LNET_NID_ANY);
3064                         if (rc) {
3065                                 CERROR("Error sending %s to %s: %d\n",
3066                                        lnet_msgtyp2str(msg->msg_type),
3067                                        libcfs_id2str(msg->msg_target), rc);
3068                                 msg->msg_no_resend = true;
3069                                 lnet_finalize(msg, rc);
3070                         }
3071                         lnet_net_lock(cpt);
3072                         if (!rc)
3073                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3074                 }
3075         }
3076 }
3077
3078 static void
3079 lnet_resend_pending_msgs(void)
3080 {
3081         int i;
3082
3083         cfs_cpt_for_each(i, lnet_cpt_table()) {
3084                 lnet_net_lock(i);
3085                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3086                 lnet_net_unlock(i);
3087         }
3088 }
3089
3090 /* called with cpt and ni_lock held */
3091 static void
3092 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3093 {
3094         struct lnet_handle_md recovery_mdh;
3095
3096         LNetInvalidateMDHandle(&recovery_mdh);
3097
3098         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3099             force) {
3100                 recovery_mdh = ni->ni_ping_mdh;
3101                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3102         }
3103         lnet_ni_unlock(ni);
3104         lnet_net_unlock(cpt);
3105         if (!LNetMDHandleIsInvalid(recovery_mdh))
3106                 LNetMDUnlink(recovery_mdh);
3107         lnet_net_lock(cpt);
3108         lnet_ni_lock(ni);
3109 }
3110
3111 static void
3112 lnet_recover_local_nis(void)
3113 {
3114         struct lnet_mt_event_info *ev_info;
3115         struct list_head processed_list;
3116         struct list_head local_queue;
3117         struct lnet_handle_md mdh;
3118         struct lnet_ni *tmp;
3119         struct lnet_ni *ni;
3120         lnet_nid_t nid;
3121         int healthv;
3122         int rc;
3123
3124         INIT_LIST_HEAD(&local_queue);
3125         INIT_LIST_HEAD(&processed_list);
3126
3127         /*
3128          * splice the recovery queue on a local queue. We will iterate
3129          * through the local queue and update it as needed. Once we're
3130          * done with the traversal, we'll splice the local queue back on
3131          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3132          * will be traversed in the next iteration.
3133          */
3134         lnet_net_lock(0);
3135         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3136                          &local_queue);
3137         lnet_net_unlock(0);
3138
3139         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3140                 /*
3141                  * if an NI is being deleted or it is now healthy, there
3142                  * is no need to keep it around in the recovery queue.
3143                  * The monitor thread is the only thread responsible for
3144                  * removing the NI from the recovery queue.
3145                  * Multiple threads can be adding NIs to the recovery
3146                  * queue.
3147                  */
3148                 healthv = atomic_read(&ni->ni_healthv);
3149
3150                 lnet_net_lock(0);
3151                 lnet_ni_lock(ni);
3152                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3153                     healthv == LNET_MAX_HEALTH_VALUE) {
3154                         list_del_init(&ni->ni_recovery);
3155                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3156                         lnet_ni_unlock(ni);
3157                         lnet_ni_decref_locked(ni, 0);
3158                         lnet_net_unlock(0);
3159                         continue;
3160                 }
3161
3162                 /*
3163                  * if the local NI failed recovery we must unlink the md.
3164                  * But we want to keep the local_ni on the recovery queue
3165                  * so we can continue the attempts to recover it.
3166                  */
3167                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3168                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3169                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3170                 }
3171
3172                 lnet_ni_unlock(ni);
3173                 lnet_net_unlock(0);
3174
3175
3176                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3177                        libcfs_nid2str(ni->ni_nid));
3178
3179                 lnet_ni_lock(ni);
3180                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3181                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3182                         lnet_ni_unlock(ni);
3183
3184                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3185                         if (!ev_info) {
3186                                 CERROR("out of memory. Can't recover %s\n",
3187                                        libcfs_nid2str(ni->ni_nid));
3188                                 lnet_ni_lock(ni);
3189                                 ni->ni_recovery_state &=
3190                                   ~LNET_NI_RECOVERY_PENDING;
3191                                 lnet_ni_unlock(ni);
3192                                 continue;
3193                         }
3194
3195                         mdh = ni->ni_ping_mdh;
3196                         /*
3197                          * Invalidate the ni mdh in case it's deleted.
3198                          * We'll unlink the mdh in this case below.
3199                          */
3200                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3201                         nid = ni->ni_nid;
3202
3203                         /*
3204                          * remove the NI from the local queue and drop the
3205                          * reference count to it while we're recovering
3206                          * it. The reason for that, is that the NI could
3207                          * be deleted, and the way the code is structured
3208                          * is if we don't drop the NI, then the deletion
3209                          * code will enter a loop waiting for the
3210                          * reference count to be removed while holding the
3211                          * ln_mutex_lock(). When we look up the peer to
3212                          * send to in lnet_select_pathway() we will try to
3213                          * lock the ln_mutex_lock() as well, leading to
3214                          * a deadlock. By dropping the refcount and
3215                          * removing it from the list, we allow for the NI
3216                          * to be removed, then we use the cached NID to
3217                          * look it up again. If it's gone, then we just
3218                          * continue examining the rest of the queue.
3219                          */
3220                         lnet_net_lock(0);
3221                         list_del_init(&ni->ni_recovery);
3222                         lnet_ni_decref_locked(ni, 0);
3223                         lnet_net_unlock(0);
3224
3225                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3226                         ev_info->mt_nid = nid;
3227                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3228                                             ev_info, the_lnet.ln_mt_eqh, true);
3229                         /* lookup the nid again */
3230                         lnet_net_lock(0);
3231                         ni = lnet_nid2ni_locked(nid, 0);
3232                         if (!ni) {
3233                                 /*
3234                                  * the NI has been deleted when we dropped
3235                                  * the ref count
3236                                  */
3237                                 lnet_net_unlock(0);
3238                                 LNetMDUnlink(mdh);
3239                                 continue;
3240                         }
3241                         /*
3242                          * Same note as in lnet_recover_peer_nis(). When
3243                          * we're sending the ping, the NI is free to be
3244                          * deleted or manipulated. By this point it
3245                          * could've been added back on the recovery queue,
3246                          * and a refcount taken on it.
3247                          * So we can't just add it blindly again or we'll
3248                          * corrupt the queue. We must check under lock if
3249                          * it's not on any list and if not then add it
3250                          * to the processed list, which will eventually be
3251                          * spliced back on to the recovery queue.
3252                          */
3253                         ni->ni_ping_mdh = mdh;
3254                         if (list_empty(&ni->ni_recovery)) {
3255                                 list_add_tail(&ni->ni_recovery, &processed_list);
3256                                 lnet_ni_addref_locked(ni, 0);
3257                         }
3258                         lnet_net_unlock(0);
3259
3260                         lnet_ni_lock(ni);
3261                         if (rc)
3262                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3263                 }
3264                 lnet_ni_unlock(ni);
3265         }
3266
3267         /*
3268          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3269          * reexamined in the next iteration.
3270          */
3271         list_splice_init(&processed_list, &local_queue);
3272         lnet_net_lock(0);
3273         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3274         lnet_net_unlock(0);
3275 }
3276
3277 static int
3278 lnet_resendqs_create(void)
3279 {
3280         struct list_head **resendqs;
3281         resendqs = lnet_create_array_of_queues();
3282
3283         if (!resendqs)
3284                 return -ENOMEM;
3285
3286         lnet_net_lock(LNET_LOCK_EX);
3287         the_lnet.ln_mt_resendqs = resendqs;
3288         lnet_net_unlock(LNET_LOCK_EX);
3289
3290         return 0;
3291 }
3292
3293 static void
3294 lnet_clean_local_ni_recoveryq(void)
3295 {
3296         struct lnet_ni *ni;
3297
3298         /* This is only called when the monitor thread has stopped */
3299         lnet_net_lock(0);
3300
3301         while (!list_empty(&the_lnet.ln_mt_localNIRecovq)) {
3302                 ni = list_entry(the_lnet.ln_mt_localNIRecovq.next,
3303                                 struct lnet_ni, ni_recovery);
3304                 list_del_init(&ni->ni_recovery);
3305                 lnet_ni_lock(ni);
3306                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3307                 lnet_ni_unlock(ni);
3308                 lnet_ni_decref_locked(ni, 0);
3309         }
3310
3311         lnet_net_unlock(0);
3312 }
3313
3314 static void
3315 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3316                                      bool force)
3317 {
3318         struct lnet_handle_md recovery_mdh;
3319
3320         LNetInvalidateMDHandle(&recovery_mdh);
3321
3322         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3323                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3324                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3325         }
3326         spin_unlock(&lpni->lpni_lock);
3327         lnet_net_unlock(cpt);
3328         if (!LNetMDHandleIsInvalid(recovery_mdh))
3329                 LNetMDUnlink(recovery_mdh);
3330         lnet_net_lock(cpt);
3331         spin_lock(&lpni->lpni_lock);
3332 }
3333
3334 static void
3335 lnet_clean_peer_ni_recoveryq(void)
3336 {
3337         struct lnet_peer_ni *lpni, *tmp;
3338
3339         lnet_net_lock(LNET_LOCK_EX);
3340
3341         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3342                                  lpni_recovery) {
3343                 list_del_init(&lpni->lpni_recovery);
3344                 spin_lock(&lpni->lpni_lock);
3345                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3346                 spin_unlock(&lpni->lpni_lock);
3347                 lnet_peer_ni_decref_locked(lpni);
3348         }
3349
3350         lnet_net_unlock(LNET_LOCK_EX);
3351 }
3352
3353 static void
3354 lnet_clean_resendqs(void)
3355 {
3356         struct lnet_msg *msg, *tmp;
3357         struct list_head msgs;
3358         int i;
3359
3360         INIT_LIST_HEAD(&msgs);
3361
3362         cfs_cpt_for_each(i, lnet_cpt_table()) {
3363                 lnet_net_lock(i);
3364                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3365                 lnet_net_unlock(i);
3366                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3367                         list_del_init(&msg->msg_list);
3368                         msg->msg_no_resend = true;
3369                         lnet_finalize(msg, -ESHUTDOWN);
3370                 }
3371         }
3372
3373         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3374 }
3375
3376 static void
3377 lnet_recover_peer_nis(void)
3378 {
3379         struct lnet_mt_event_info *ev_info;
3380         struct list_head processed_list;
3381         struct list_head local_queue;
3382         struct lnet_handle_md mdh;
3383         struct lnet_peer_ni *lpni;
3384         struct lnet_peer_ni *tmp;
3385         lnet_nid_t nid;
3386         int healthv;
3387         int rc;
3388
3389         INIT_LIST_HEAD(&local_queue);
3390         INIT_LIST_HEAD(&processed_list);
3391
3392         /*
3393          * Always use cpt 0 for locking across all interactions with
3394          * ln_mt_peerNIRecovq
3395          */
3396         lnet_net_lock(0);
3397         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3398                          &local_queue);
3399         lnet_net_unlock(0);
3400
3401         list_for_each_entry_safe(lpni, tmp, &local_queue,
3402                                  lpni_recovery) {
3403                 /*
3404                  * The same protection strategy is used here as is in the
3405                  * local recovery case.
3406                  */
3407                 lnet_net_lock(0);
3408                 healthv = atomic_read(&lpni->lpni_healthv);
3409                 spin_lock(&lpni->lpni_lock);
3410                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3411                     healthv == LNET_MAX_HEALTH_VALUE) {
3412                         list_del_init(&lpni->lpni_recovery);
3413                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3414                         spin_unlock(&lpni->lpni_lock);
3415                         lnet_peer_ni_decref_locked(lpni);
3416                         lnet_net_unlock(0);
3417                         continue;
3418                 }
3419
3420                 /*
3421                  * If the peer NI has failed recovery we must unlink the
3422                  * md. But we want to keep the peer ni on the recovery
3423                  * queue so we can try to continue recovering it
3424                  */
3425                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3426                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3427                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3428                 }
3429
3430                 spin_unlock(&lpni->lpni_lock);
3431                 lnet_net_unlock(0);
3432
3433                 /*
3434                  * NOTE: we're racing with peer deletion from user space.
3435                  * It's possible that a peer is deleted after we check its
3436                  * state. In this case the recovery can create a new peer
3437                  */
3438                 spin_lock(&lpni->lpni_lock);
3439                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3440                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3441                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3442                         spin_unlock(&lpni->lpni_lock);
3443
3444                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3445                         if (!ev_info) {
3446                                 CERROR("out of memory. Can't recover %s\n",
3447                                        libcfs_nid2str(lpni->lpni_nid));
3448                                 spin_lock(&lpni->lpni_lock);
3449                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3450                                 spin_unlock(&lpni->lpni_lock);
3451                                 continue;
3452                         }
3453
3454                         /* look at the comments in lnet_recover_local_nis() */
3455                         mdh = lpni->lpni_recovery_ping_mdh;
3456                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3457                         nid = lpni->lpni_nid;
3458                         lnet_net_lock(0);
3459                         list_del_init(&lpni->lpni_recovery);
3460                         lnet_peer_ni_decref_locked(lpni);
3461                         lnet_net_unlock(0);
3462
3463                         ev_info->mt_type = MT_TYPE_PEER_NI;
3464                         ev_info->mt_nid = nid;
3465                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3466                                             ev_info, the_lnet.ln_mt_eqh, true);
3467                         lnet_net_lock(0);
3468                         /*
3469                          * lnet_find_peer_ni_locked() grabs a refcount for
3470                          * us. No need to take it explicitly.
3471                          */
3472                         lpni = lnet_find_peer_ni_locked(nid);
3473                         if (!lpni) {
3474                                 lnet_net_unlock(0);
3475                                 LNetMDUnlink(mdh);
3476                                 continue;
3477                         }
3478
3479                         lpni->lpni_recovery_ping_mdh = mdh;
3480                         /*
3481                          * While we're unlocked the lpni could've been
3482                          * readded on the recovery queue. In this case we
3483                          * don't need to add it to the local queue, since
3484                          * it's already on there and the thread that added
3485                          * it would've incremented the refcount on the
3486                          * peer, which means we need to decref the refcount
3487                          * that was implicitly grabbed by find_peer_ni_locked.
3488                          * Otherwise, if the lpni is still not on
3489                          * the recovery queue, then we'll add it to the
3490                          * processed list.
3491                          */
3492                         if (list_empty(&lpni->lpni_recovery))
3493                                 list_add_tail(&lpni->lpni_recovery, &processed_list);
3494                         else
3495                                 lnet_peer_ni_decref_locked(lpni);
3496                         lnet_net_unlock(0);
3497
3498                         spin_lock(&lpni->lpni_lock);
3499                         if (rc)
3500                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3501                 }
3502                 spin_unlock(&lpni->lpni_lock);
3503         }
3504
3505         list_splice_init(&processed_list, &local_queue);
3506         lnet_net_lock(0);
3507         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3508         lnet_net_unlock(0);
3509 }
3510
3511 static int
3512 lnet_monitor_thread(void *arg)
3513 {
3514         time64_t recovery_timeout = 0;
3515         time64_t rsp_timeout = 0;
3516         int interval;
3517         time64_t now;
3518
3519         wait_for_completion(&the_lnet.ln_started);
3520         /*
3521          * The monitor thread takes care of the following:
3522          *  1. Checks the aliveness of routers
3523          *  2. Checks if there are messages on the resend queue to resend
3524          *     them.
3525          *  3. Check if there are any NIs on the local recovery queue and
3526          *     pings them
3527          *  4. Checks if there are any NIs on the remote recovery queue
3528          *     and pings them.
3529          */
3530         cfs_block_allsigs();
3531
3532         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
3533                 now = ktime_get_real_seconds();
3534
3535                 if (lnet_router_checker_active())
3536                         lnet_check_routers();
3537
3538                 lnet_resend_pending_msgs();
3539
3540                 if (now >= rsp_timeout) {
3541                         lnet_finalize_expired_responses();
3542                         rsp_timeout = now + (lnet_transaction_timeout / 2);
3543                 }
3544
3545                 if (now >= recovery_timeout) {
3546                         lnet_recover_local_nis();
3547                         lnet_recover_peer_nis();
3548                         recovery_timeout = now + lnet_recovery_interval;
3549                 }
3550
3551                 /*
3552                  * TODO do we need to check if we should sleep without
3553                  * timeout?  Technically, an active system will always
3554                  * have messages in flight so this check will always
3555                  * evaluate to false. And on an idle system do we care
3556                  * if we wake up every 1 second? Although, we've seen
3557                  * cases where we get a complaint that an idle thread
3558                  * is waking up unnecessarily.
3559                  *
3560                  * Take into account the current net_count when you wake
3561                  * up for alive router checking, since we need to check
3562                  * possibly as many networks as we have configured.
3563                  */
3564                 interval = min(lnet_recovery_interval,
3565                                min((unsigned int) alive_router_check_interval /
3566                                         lnet_current_net_count,
3567                                    lnet_transaction_timeout / 2));
3568                 wait_for_completion_interruptible_timeout(
3569                         &the_lnet.ln_mt_wait_complete,
3570                         cfs_time_seconds(interval));
3571                 /* Must re-init the completion before testing anything,
3572                  * including ln_mt_state.
3573                  */
3574                 reinit_completion(&the_lnet.ln_mt_wait_complete);
3575         }
3576
3577         /* Shutting down */
3578         lnet_net_lock(LNET_LOCK_EX);
3579         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3580         lnet_net_unlock(LNET_LOCK_EX);
3581
3582         /* signal that the monitor thread is exiting */
3583         up(&the_lnet.ln_mt_signal);
3584
3585         return 0;
3586 }
3587
3588 /*
3589  * lnet_send_ping
3590  * Sends a ping.
3591  * Returns == 0 if success
3592  * Returns > 0 if LNetMDBind or prior fails
3593  * Returns < 0 if LNetGet fails
3594  */
3595 int
3596 lnet_send_ping(lnet_nid_t dest_nid,
3597                struct lnet_handle_md *mdh, int nnis,
3598                void *user_data, struct lnet_handle_eq eqh, bool recovery)
3599 {
3600         struct lnet_md md = { NULL };
3601         struct lnet_process_id id;
3602         struct lnet_ping_buffer *pbuf;
3603         int rc;
3604
3605         if (dest_nid == LNET_NID_ANY) {
3606                 rc = -EHOSTUNREACH;
3607                 goto fail_error;
3608         }
3609
3610         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
3611         if (!pbuf) {
3612                 rc = ENOMEM;
3613                 goto fail_error;
3614         }
3615
3616         /* initialize md content */
3617         md.start     = &pbuf->pb_info;
3618         md.length    = LNET_PING_INFO_SIZE(nnis);
3619         md.threshold = 2; /* GET/REPLY */
3620         md.max_size  = 0;
3621         md.options   = LNET_MD_TRUNCATE;
3622         md.user_ptr  = user_data;
3623         md.eq_handle = eqh;
3624
3625         rc = LNetMDBind(md, LNET_UNLINK, mdh);
3626         if (rc) {
3627                 lnet_ping_buffer_decref(pbuf);
3628                 CERROR("Can't bind MD: %d\n", rc);
3629                 rc = -rc; /* change the rc to positive */
3630                 goto fail_error;
3631         }
3632         id.pid = LNET_PID_LUSTRE;
3633         id.nid = dest_nid;
3634
3635         rc = LNetGet(LNET_NID_ANY, *mdh, id,
3636                      LNET_RESERVED_PORTAL,
3637                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
3638
3639         if (rc)
3640                 goto fail_unlink_md;
3641
3642         return 0;
3643
3644 fail_unlink_md:
3645         LNetMDUnlink(*mdh);
3646         LNetInvalidateMDHandle(mdh);
3647 fail_error:
3648         return rc;
3649 }
3650
3651 static void
3652 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
3653                            int status, bool unlink_event)
3654 {
3655         lnet_nid_t nid = ev_info->mt_nid;
3656
3657         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
3658                 struct lnet_ni *ni;
3659
3660                 lnet_net_lock(0);
3661                 ni = lnet_nid2ni_locked(nid, 0);
3662                 if (!ni) {
3663                         lnet_net_unlock(0);
3664                         return;
3665                 }
3666                 lnet_ni_lock(ni);
3667                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3668                 if (status)
3669                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
3670                 lnet_ni_unlock(ni);
3671                 lnet_net_unlock(0);
3672
3673                 if (status != 0) {
3674                         CERROR("local NI (%s) recovery failed with %d\n",
3675                                libcfs_nid2str(nid), status);
3676                         return;
3677                 }
3678                 /*
3679                  * need to increment healthv for the ni here, because in
3680                  * the lnet_finalize() path we don't have access to this
3681                  * NI. And in order to get access to it, we'll need to
3682                  * carry forward too much information.
3683                  * In the peer case, it'll naturally be incremented
3684                  */
3685                 if (!unlink_event)
3686                         lnet_inc_healthv(&ni->ni_healthv);
3687         } else {
3688                 struct lnet_peer_ni *lpni;
3689                 int cpt;
3690
3691                 cpt = lnet_net_lock_current();
3692                 lpni = lnet_find_peer_ni_locked(nid);
3693                 if (!lpni) {
3694                         lnet_net_unlock(cpt);
3695                         return;
3696                 }
3697                 spin_lock(&lpni->lpni_lock);
3698                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3699                 if (status)
3700                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
3701                 spin_unlock(&lpni->lpni_lock);
3702                 lnet_peer_ni_decref_locked(lpni);
3703                 lnet_net_unlock(cpt);
3704
3705                 if (status != 0)
3706                         CERROR("peer NI (%s) recovery failed with %d\n",
3707                                libcfs_nid2str(nid), status);
3708         }
3709 }
3710
3711 void
3712 lnet_mt_event_handler(struct lnet_event *event)
3713 {
3714         struct lnet_mt_event_info *ev_info = event->md.user_ptr;
3715         struct lnet_ping_buffer *pbuf;
3716
3717         /* TODO: remove assert */
3718         LASSERT(event->type == LNET_EVENT_REPLY ||
3719                 event->type == LNET_EVENT_SEND ||
3720                 event->type == LNET_EVENT_UNLINK);
3721
3722         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
3723                event->status);
3724
3725         switch (event->type) {
3726         case LNET_EVENT_UNLINK:
3727                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
3728                        libcfs_nid2str(ev_info->mt_nid));
3729                 /* fallthrough */
3730         case LNET_EVENT_REPLY:
3731                 lnet_handle_recovery_reply(ev_info, event->status,
3732                                            event->type == LNET_EVENT_UNLINK);
3733                 break;
3734         case LNET_EVENT_SEND:
3735                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
3736                                libcfs_nid2str(ev_info->mt_nid),
3737                                (event->status) ? "unsuccessfully" :
3738                                "successfully", event->status);
3739                 break;
3740         default:
3741                 CERROR("Unexpected event: %d\n", event->type);
3742                 break;
3743         }
3744         if (event->unlinked) {
3745                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
3746                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md.start);
3747                 lnet_ping_buffer_decref(pbuf);
3748         }
3749 }
3750
3751 static int
3752 lnet_rsp_tracker_create(void)
3753 {
3754         struct list_head **rstqs;
3755         rstqs = lnet_create_array_of_queues();
3756
3757         if (!rstqs)
3758                 return -ENOMEM;
3759
3760         the_lnet.ln_mt_rstq = rstqs;
3761
3762         return 0;
3763 }
3764
3765 static void
3766 lnet_rsp_tracker_clean(void)
3767 {
3768         lnet_finalize_expired_responses();
3769
3770         cfs_percpt_free(the_lnet.ln_mt_rstq);
3771         the_lnet.ln_mt_rstq = NULL;
3772 }
3773
3774 int lnet_monitor_thr_start(void)
3775 {
3776         int rc = 0;
3777         struct task_struct *task;
3778
3779         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
3780                 return -EALREADY;
3781
3782         rc = lnet_resendqs_create();
3783         if (rc)
3784                 return rc;
3785
3786         rc = lnet_rsp_tracker_create();
3787         if (rc)
3788                 goto clean_queues;
3789
3790         sema_init(&the_lnet.ln_mt_signal, 0);
3791
3792         lnet_net_lock(LNET_LOCK_EX);
3793         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
3794         lnet_net_unlock(LNET_LOCK_EX);
3795         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
3796         if (IS_ERR(task)) {
3797                 rc = PTR_ERR(task);
3798                 CERROR("Can't start monitor thread: %d\n", rc);
3799                 goto clean_thread;
3800         }
3801
3802         return 0;
3803
3804 clean_thread:
3805         lnet_net_lock(LNET_LOCK_EX);
3806         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
3807         lnet_net_unlock(LNET_LOCK_EX);
3808         /* block until event callback signals exit */
3809         down(&the_lnet.ln_mt_signal);
3810         /* clean up */
3811         lnet_net_lock(LNET_LOCK_EX);
3812         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3813         lnet_net_unlock(LNET_LOCK_EX);
3814         lnet_rsp_tracker_clean();
3815         lnet_clean_local_ni_recoveryq();
3816         lnet_clean_peer_ni_recoveryq();
3817         lnet_clean_resendqs();
3818         LNetInvalidateEQHandle(&the_lnet.ln_mt_eqh);
3819         return rc;
3820 clean_queues:
3821         lnet_rsp_tracker_clean();
3822         lnet_clean_local_ni_recoveryq();
3823         lnet_clean_peer_ni_recoveryq();
3824         lnet_clean_resendqs();
3825         return rc;
3826 }
3827
3828 void lnet_monitor_thr_stop(void)
3829 {
3830         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3831                 return;
3832
3833         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
3834         lnet_net_lock(LNET_LOCK_EX);
3835         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
3836         lnet_net_unlock(LNET_LOCK_EX);
3837
3838         /* tell the monitor thread that we're shutting down */
3839         complete(&the_lnet.ln_mt_wait_complete);
3840
3841         /* block until monitor thread signals that it's done */
3842         down(&the_lnet.ln_mt_signal);
3843         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
3844
3845         /* perform cleanup tasks */
3846         lnet_rsp_tracker_clean();
3847         lnet_clean_local_ni_recoveryq();
3848         lnet_clean_peer_ni_recoveryq();
3849         lnet_clean_resendqs();
3850 }
3851
3852 void
3853 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
3854                   __u32 msg_type)
3855 {
3856         lnet_net_lock(cpt);
3857         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
3858         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
3859         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
3860         lnet_net_unlock(cpt);
3861
3862         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
3863 }
3864
3865 static void
3866 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
3867 {
3868         struct lnet_hdr *hdr = &msg->msg_hdr;
3869
3870         if (msg->msg_wanted != 0)
3871                 lnet_setpayloadbuffer(msg);
3872
3873         lnet_build_msg_event(msg, LNET_EVENT_PUT);
3874
3875         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
3876          * it back into the ACK during lnet_finalize() */
3877         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
3878                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
3879
3880         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
3881                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
3882 }
3883
3884 static int
3885 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
3886 {
3887         struct lnet_hdr         *hdr = &msg->msg_hdr;
3888         struct lnet_match_info  info;
3889         int                     rc;
3890         bool                    ready_delay;
3891
3892         /* Convert put fields to host byte order */
3893         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
3894         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
3895         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
3896
3897         /* Primary peer NID. */
3898         info.mi_id.nid  = msg->msg_initiator;
3899         info.mi_id.pid  = hdr->src_pid;
3900         info.mi_opc     = LNET_MD_OP_PUT;
3901         info.mi_portal  = hdr->msg.put.ptl_index;
3902         info.mi_rlength = hdr->payload_length;
3903         info.mi_roffset = hdr->msg.put.offset;
3904         info.mi_mbits   = hdr->msg.put.match_bits;
3905         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
3906
3907         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
3908         ready_delay = msg->msg_rx_ready_delay;
3909
3910  again:
3911         rc = lnet_ptl_match_md(&info, msg);
3912         switch (rc) {
3913         default:
3914                 LBUG();
3915
3916         case LNET_MATCHMD_OK:
3917                 lnet_recv_put(ni, msg);
3918                 return 0;
3919
3920         case LNET_MATCHMD_NONE:
3921                 if (ready_delay)
3922                         /* no eager_recv or has already called it, should
3923                          * have been attached on delayed list */
3924                         return 0;
3925
3926                 rc = lnet_ni_eager_recv(ni, msg);
3927                 if (rc == 0) {
3928                         ready_delay = true;
3929                         goto again;
3930                 }
3931                 /* fall through */
3932
3933         case LNET_MATCHMD_DROP:
3934                 CNETERR("Dropping PUT from %s portal %d match %llu"
3935                         " offset %d length %d: %d\n",
3936                         libcfs_id2str(info.mi_id), info.mi_portal,
3937                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
3938
3939                 return -ENOENT; /* -ve: OK but no match */
3940         }
3941 }
3942
3943 static int
3944 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
3945 {
3946         struct lnet_match_info info;
3947         struct lnet_hdr *hdr = &msg->msg_hdr;
3948         struct lnet_process_id source_id;
3949         struct lnet_handle_wire reply_wmd;
3950         int rc;
3951
3952         /* Convert get fields to host byte order */
3953         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
3954         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
3955         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
3956         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
3957
3958         source_id.nid = hdr->src_nid;
3959         source_id.pid = hdr->src_pid;
3960         /* Primary peer NID */
3961         info.mi_id.nid  = msg->msg_initiator;
3962         info.mi_id.pid  = hdr->src_pid;
3963         info.mi_opc     = LNET_MD_OP_GET;
3964         info.mi_portal  = hdr->msg.get.ptl_index;
3965         info.mi_rlength = hdr->msg.get.sink_length;
3966         info.mi_roffset = hdr->msg.get.src_offset;
3967         info.mi_mbits   = hdr->msg.get.match_bits;
3968         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
3969
3970         rc = lnet_ptl_match_md(&info, msg);
3971         if (rc == LNET_MATCHMD_DROP) {
3972                 CNETERR("Dropping GET from %s portal %d match %llu"
3973                         " offset %d length %d\n",
3974                         libcfs_id2str(info.mi_id), info.mi_portal,
3975                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
3976                 return -ENOENT; /* -ve: OK but no match */
3977         }
3978
3979         LASSERT(rc == LNET_MATCHMD_OK);
3980
3981         lnet_build_msg_event(msg, LNET_EVENT_GET);
3982
3983         reply_wmd = hdr->msg.get.return_wmd;
3984
3985         lnet_prep_send(msg, LNET_MSG_REPLY, source_id,
3986                        msg->msg_offset, msg->msg_wanted);
3987
3988         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
3989
3990         if (rdma_get) {
3991                 /* The LND completes the REPLY from her recv procedure */
3992                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
3993                              msg->msg_offset, msg->msg_len, msg->msg_len);
3994                 return 0;
3995         }
3996
3997         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
3998         msg->msg_receiving = 0;
3999
4000         rc = lnet_send(ni->ni_nid, msg, msg->msg_from);
4001         if (rc < 0) {
4002                 /* didn't get as far as lnet_ni_send() */
4003                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
4004                        libcfs_nid2str(ni->ni_nid),
4005                        libcfs_id2str(info.mi_id), rc);
4006
4007                 lnet_finalize(msg, rc);
4008         }
4009
4010         return 0;
4011 }
4012
4013 static int
4014 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
4015 {
4016         void *private = msg->msg_private;
4017         struct lnet_hdr *hdr = &msg->msg_hdr;
4018         struct lnet_process_id src = {0};
4019         struct lnet_libmd *md;
4020         int rlength;
4021         int mlength;
4022         int cpt;
4023
4024         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4025         lnet_res_lock(cpt);
4026
4027         src.nid = hdr->src_nid;
4028         src.pid = hdr->src_pid;
4029
4030         /* NB handles only looked up by creator (no flips) */
4031         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4032         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4033                 CNETERR("%s: Dropping REPLY from %s for %s "
4034                         "MD %#llx.%#llx\n",
4035                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4036                         (md == NULL) ? "invalid" : "inactive",
4037                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4038                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4039                 if (md != NULL && md->md_me != NULL)
4040                         CERROR("REPLY MD also attached to portal %d\n",
4041                                md->md_me->me_portal);
4042
4043                 lnet_res_unlock(cpt);
4044                 return -ENOENT; /* -ve: OK but no match */
4045         }
4046
4047         LASSERT(md->md_offset == 0);
4048
4049         rlength = hdr->payload_length;
4050         mlength = MIN(rlength, (int)md->md_length);
4051
4052         if (mlength < rlength &&
4053             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4054                 CNETERR("%s: Dropping REPLY from %s length %d "
4055                         "for MD %#llx would overflow (%d)\n",
4056                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4057                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4058                         mlength);
4059                 lnet_res_unlock(cpt);
4060                 return -ENOENT; /* -ve: OK but no match */
4061         }
4062
4063         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4064                libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4065                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4066
4067         lnet_msg_attach_md(msg, md, 0, mlength);
4068
4069         if (mlength != 0)
4070                 lnet_setpayloadbuffer(msg);
4071
4072         lnet_res_unlock(cpt);
4073
4074         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4075
4076         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4077         return 0;
4078 }
4079
4080 static int
4081 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4082 {
4083         struct lnet_hdr *hdr = &msg->msg_hdr;
4084         struct lnet_process_id src = {0};
4085         struct lnet_libmd *md;
4086         int cpt;
4087
4088         src.nid = hdr->src_nid;
4089         src.pid = hdr->src_pid;
4090
4091         /* Convert ack fields to host byte order */
4092         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4093         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4094
4095         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4096         lnet_res_lock(cpt);
4097
4098         /* NB handles only looked up by creator (no flips) */
4099         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4100         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4101                 /* Don't moan; this is expected */
4102                 CDEBUG(D_NET,
4103                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4104                        libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4105                        (md == NULL) ? "invalid" : "inactive",
4106                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4107                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4108                 if (md != NULL && md->md_me != NULL)
4109                         CERROR("Source MD also attached to portal %d\n",
4110                                md->md_me->me_portal);
4111
4112                 lnet_res_unlock(cpt);
4113                 return -ENOENT;                  /* -ve! */
4114         }
4115
4116         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4117                libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4118                hdr->msg.ack.dst_wmd.wh_object_cookie);
4119
4120         lnet_msg_attach_md(msg, md, 0, 0);
4121
4122         lnet_res_unlock(cpt);
4123
4124         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4125
4126         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4127         return 0;
4128 }
4129
4130 /**
4131  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4132  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4133  * \retval -ve                  error code
4134  */
4135 int
4136 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4137 {
4138         int     rc = 0;
4139
4140         if (!the_lnet.ln_routing)
4141                 return -ECANCELED;
4142
4143         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4144             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4145                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4146                         msg->msg_rx_ready_delay = 1;
4147                 } else {
4148                         lnet_net_unlock(msg->msg_rx_cpt);
4149                         rc = lnet_ni_eager_recv(ni, msg);
4150                         lnet_net_lock(msg->msg_rx_cpt);
4151                 }
4152         }
4153
4154         if (rc == 0)
4155                 rc = lnet_post_routed_recv_locked(msg, 0);
4156         return rc;
4157 }
4158
4159 int
4160 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4161 {
4162         int     rc;
4163
4164         switch (msg->msg_type) {
4165         case LNET_MSG_ACK:
4166                 rc = lnet_parse_ack(ni, msg);
4167                 break;
4168         case LNET_MSG_PUT:
4169                 rc = lnet_parse_put(ni, msg);
4170                 break;
4171         case LNET_MSG_GET:
4172                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4173                 break;
4174         case LNET_MSG_REPLY:
4175                 rc = lnet_parse_reply(ni, msg);
4176                 break;
4177         default: /* prevent an unused label if !kernel */
4178                 LASSERT(0);
4179                 return -EPROTO;
4180         }
4181
4182         LASSERT(rc == 0 || rc == -ENOENT);
4183         return rc;
4184 }
4185
4186 char *
4187 lnet_msgtyp2str (int type)
4188 {
4189         switch (type) {
4190         case LNET_MSG_ACK:
4191                 return ("ACK");
4192         case LNET_MSG_PUT:
4193                 return ("PUT");
4194         case LNET_MSG_GET:
4195                 return ("GET");
4196         case LNET_MSG_REPLY:
4197                 return ("REPLY");
4198         case LNET_MSG_HELLO:
4199                 return ("HELLO");
4200         default:
4201                 return ("<UNKNOWN>");
4202         }
4203 }
4204
4205 void
4206 lnet_print_hdr(struct lnet_hdr *hdr)
4207 {
4208         struct lnet_process_id src = {
4209                 .nid = hdr->src_nid,
4210                 .pid = hdr->src_pid,
4211         };
4212         struct lnet_process_id dst = {
4213                 .nid = hdr->dest_nid,
4214                 .pid = hdr->dest_pid,
4215         };
4216         char *type_str = lnet_msgtyp2str(hdr->type);
4217
4218         CWARN("P3 Header at %p of type %s\n", hdr, type_str);
4219         CWARN("    From %s\n", libcfs_id2str(src));
4220         CWARN("    To   %s\n", libcfs_id2str(dst));
4221
4222         switch (hdr->type) {
4223         default:
4224                 break;
4225
4226         case LNET_MSG_PUT:
4227                 CWARN("    Ptl index %d, ack md %#llx.%#llx, "
4228                       "match bits %llu\n",
4229                       hdr->msg.put.ptl_index,
4230                       hdr->msg.put.ack_wmd.wh_interface_cookie,
4231                       hdr->msg.put.ack_wmd.wh_object_cookie,
4232                       hdr->msg.put.match_bits);
4233                 CWARN("    Length %d, offset %d, hdr data %#llx\n",
4234                       hdr->payload_length, hdr->msg.put.offset,
4235                       hdr->msg.put.hdr_data);
4236                 break;
4237
4238         case LNET_MSG_GET:
4239                 CWARN("    Ptl index %d, return md %#llx.%#llx, "
4240                       "match bits %llu\n", hdr->msg.get.ptl_index,
4241                       hdr->msg.get.return_wmd.wh_interface_cookie,
4242                       hdr->msg.get.return_wmd.wh_object_cookie,
4243                       hdr->msg.get.match_bits);
4244                 CWARN("    Length %d, src offset %d\n",
4245                       hdr->msg.get.sink_length,
4246                       hdr->msg.get.src_offset);
4247                 break;
4248
4249         case LNET_MSG_ACK:
4250                 CWARN("    dst md %#llx.%#llx, "
4251                       "manipulated length %d\n",
4252                       hdr->msg.ack.dst_wmd.wh_interface_cookie,
4253                       hdr->msg.ack.dst_wmd.wh_object_cookie,
4254                       hdr->msg.ack.mlength);
4255                 break;
4256
4257         case LNET_MSG_REPLY:
4258                 CWARN("    dst md %#llx.%#llx, "
4259                       "length %d\n",
4260                       hdr->msg.reply.dst_wmd.wh_interface_cookie,
4261                       hdr->msg.reply.dst_wmd.wh_object_cookie,
4262                       hdr->payload_length);
4263         }
4264
4265 }
4266
4267 int
4268 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
4269            void *private, int rdma_req)
4270 {
4271         struct lnet_peer_ni *lpni;
4272         struct lnet_msg *msg;
4273         __u32 payload_length;
4274         lnet_pid_t dest_pid;
4275         lnet_nid_t dest_nid;
4276         lnet_nid_t src_nid;
4277         bool push = false;
4278         int for_me;
4279         __u32 type;
4280         int rc = 0;
4281         int cpt;
4282
4283         LASSERT (!in_interrupt ());
4284
4285         type = le32_to_cpu(hdr->type);
4286         src_nid = le64_to_cpu(hdr->src_nid);
4287         dest_nid = le64_to_cpu(hdr->dest_nid);
4288         dest_pid = le32_to_cpu(hdr->dest_pid);
4289         payload_length = le32_to_cpu(hdr->payload_length);
4290
4291         for_me = (ni->ni_nid == dest_nid);
4292         cpt = lnet_cpt_of_nid(from_nid, ni);
4293
4294         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4295                 libcfs_nid2str(dest_nid),
4296                 libcfs_nid2str(ni->ni_nid),
4297                 libcfs_nid2str(src_nid),
4298                 lnet_msgtyp2str(type),
4299                 (for_me) ? "for me" : "routed");
4300
4301         switch (type) {
4302         case LNET_MSG_ACK:
4303         case LNET_MSG_GET:
4304                 if (payload_length > 0) {
4305                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4306                                libcfs_nid2str(from_nid),
4307                                libcfs_nid2str(src_nid),
4308                                lnet_msgtyp2str(type), payload_length);
4309                         return -EPROTO;
4310                 }
4311                 break;
4312
4313         case LNET_MSG_PUT:
4314         case LNET_MSG_REPLY:
4315                 if (payload_length >
4316                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4317                         CERROR("%s, src %s: bad %s payload %d "
4318                                "(%d max expected)\n",
4319                                libcfs_nid2str(from_nid),
4320                                libcfs_nid2str(src_nid),
4321                                lnet_msgtyp2str(type),
4322                                payload_length,
4323                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4324                         return -EPROTO;
4325                 }
4326                 break;
4327
4328         default:
4329                 CERROR("%s, src %s: Bad message type 0x%x\n",
4330                        libcfs_nid2str(from_nid),
4331                        libcfs_nid2str(src_nid), type);
4332                 return -EPROTO;
4333         }
4334
4335         if (the_lnet.ln_routing &&
4336             ni->ni_net->net_last_alive != ktime_get_real_seconds()) {
4337                 lnet_ni_lock(ni);
4338                 spin_lock(&ni->ni_net->net_lock);
4339                 ni->ni_net->net_last_alive = ktime_get_real_seconds();
4340                 spin_unlock(&ni->ni_net->net_lock);
4341                 if (ni->ni_status != NULL &&
4342                     ni->ni_status->ns_status == LNET_NI_STATUS_DOWN) {
4343                         ni->ni_status->ns_status = LNET_NI_STATUS_UP;
4344                         push = true;
4345                 }
4346                 lnet_ni_unlock(ni);
4347         }
4348
4349         if (push)
4350                 lnet_push_update_to_peers(1);
4351
4352         /* Regard a bad destination NID as a protocol error.  Senders should
4353          * know what they're doing; if they don't they're misconfigured, buggy
4354          * or malicious so we chop them off at the knees :) */
4355
4356         if (!for_me) {
4357                 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
4358                         /* should have gone direct */
4359                         CERROR("%s, src %s: Bad dest nid %s "
4360                                "(should have been sent direct)\n",
4361                                 libcfs_nid2str(from_nid),
4362                                 libcfs_nid2str(src_nid),
4363                                 libcfs_nid2str(dest_nid));
4364                         return -EPROTO;
4365                 }
4366
4367                 if (lnet_islocalnid(dest_nid)) {
4368                         /* dest is another local NI; sender should have used
4369                          * this node's NID on its own network */
4370                         CERROR("%s, src %s: Bad dest nid %s "
4371                                "(it's my nid but on a different network)\n",
4372                                 libcfs_nid2str(from_nid),
4373                                 libcfs_nid2str(src_nid),
4374                                 libcfs_nid2str(dest_nid));
4375                         return -EPROTO;
4376                 }
4377
4378                 if (rdma_req && type == LNET_MSG_GET) {
4379                         CERROR("%s, src %s: Bad optimized GET for %s "
4380                                "(final destination must be me)\n",
4381                                 libcfs_nid2str(from_nid),
4382                                 libcfs_nid2str(src_nid),
4383                                 libcfs_nid2str(dest_nid));
4384                         return -EPROTO;
4385                 }
4386
4387                 if (!the_lnet.ln_routing) {
4388                         CERROR("%s, src %s: Dropping message for %s "
4389                                "(routing not enabled)\n",
4390                                 libcfs_nid2str(from_nid),
4391                                 libcfs_nid2str(src_nid),
4392                                 libcfs_nid2str(dest_nid));
4393                         goto drop;
4394                 }
4395         }
4396
4397         /* Message looks OK; we're not going to return an error, so we MUST
4398          * call back lnd_recv() come what may... */
4399
4400         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4401             fail_peer(src_nid, 0)) {                    /* shall we now? */
4402                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4403                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4404                        lnet_msgtyp2str(type));
4405                 goto drop;
4406         }
4407
4408         if (!list_empty(&the_lnet.ln_drop_rules) &&
4409             lnet_drop_rule_match(hdr, ni->ni_nid, NULL)) {
4410                 CDEBUG(D_NET, "%s, src %s, dst %s: Dropping %s to simulate"
4411                               "silent message loss\n",
4412                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4413                        libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
4414                 goto drop;
4415         }
4416
4417         if (lnet_drop_asym_route && for_me &&
4418             LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
4419                 struct lnet_net *net;
4420                 struct lnet_remotenet *rnet;
4421                 bool found = true;
4422
4423                 /* we are dealing with a routed message,
4424                  * so see if route to reach src_nid goes through from_nid
4425                  */
4426                 lnet_net_lock(cpt);
4427                 net = lnet_get_net_locked(LNET_NIDNET(ni->ni_nid));
4428                 if (!net) {
4429                         lnet_net_unlock(cpt);
4430                         CERROR("net %s not found\n",
4431                                libcfs_net2str(LNET_NIDNET(ni->ni_nid)));
4432                         return -EPROTO;
4433                 }
4434
4435                 rnet = lnet_find_rnet_locked(LNET_NIDNET(src_nid));
4436                 if (rnet) {
4437                         struct lnet_peer *gw = NULL;
4438                         struct lnet_peer_ni *lpni = NULL;
4439                         struct lnet_route *route;
4440
4441                         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
4442                                 found = false;
4443                                 gw = route->lr_gateway;
4444                                 if (route->lr_lnet != net->net_id)
4445                                         continue;
4446                                 /*
4447                                  * if the nid is one of the gateway's NIDs
4448                                  * then this is a valid gateway
4449                                  */
4450                                 while ((lpni = lnet_get_next_peer_ni_locked(gw,
4451                                                 NULL, lpni)) != NULL) {
4452                                         if (lpni->lpni_nid == from_nid) {
4453                                                 found = true;
4454                                                 break;
4455                                         }
4456                                 }
4457                         }
4458                 }
4459                 lnet_net_unlock(cpt);
4460                 if (!found) {
4461                         /* we would not use from_nid to route a message to
4462                          * src_nid
4463                          * => asymmetric routing detected but forbidden
4464                          */
4465                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4466                                libcfs_nid2str(from_nid),
4467                                libcfs_nid2str(src_nid), lnet_msgtyp2str(type));
4468                         goto drop;
4469                 }
4470         }
4471
4472         msg = lnet_msg_alloc();
4473         if (msg == NULL) {
4474                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4475                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4476                        lnet_msgtyp2str(type));
4477                 goto drop;
4478         }
4479
4480         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4481          * pointers NULL etc */
4482
4483         msg->msg_type = type;
4484         msg->msg_private = private;
4485         msg->msg_receiving = 1;
4486         msg->msg_rdma_get = rdma_req;
4487         msg->msg_len = msg->msg_wanted = payload_length;
4488         msg->msg_offset = 0;
4489         msg->msg_hdr = *hdr;
4490         /* for building message event */
4491         msg->msg_from = from_nid;
4492         if (!for_me) {
4493                 msg->msg_target.pid     = dest_pid;
4494                 msg->msg_target.nid     = dest_nid;
4495                 msg->msg_routing        = 1;
4496
4497         } else {
4498                 /* convert common msg->hdr fields to host byteorder */
4499                 msg->msg_hdr.type       = type;
4500                 msg->msg_hdr.src_nid    = src_nid;
4501                 msg->msg_hdr.src_pid    = le32_to_cpu(msg->msg_hdr.src_pid);
4502                 msg->msg_hdr.dest_nid   = dest_nid;
4503                 msg->msg_hdr.dest_pid   = dest_pid;
4504                 msg->msg_hdr.payload_length = payload_length;
4505         }
4506
4507         lnet_net_lock(cpt);
4508         lpni = lnet_nid2peerni_locked(from_nid, ni->ni_nid, cpt);
4509         if (IS_ERR(lpni)) {
4510                 lnet_net_unlock(cpt);
4511                 CERROR("%s, src %s: Dropping %s "
4512                        "(error %ld looking up sender)\n",
4513                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4514                        lnet_msgtyp2str(type), PTR_ERR(lpni));
4515                 lnet_msg_free(msg);
4516                 if (rc == -ESHUTDOWN)
4517                         /* We are shutting down.  Don't do anything more */
4518                         return 0;
4519                 goto drop;
4520         }
4521
4522         if (the_lnet.ln_routing)
4523                 lpni->lpni_last_alive = ktime_get_seconds();
4524
4525         msg->msg_rxpeer = lpni;
4526         msg->msg_rxni = ni;
4527         lnet_ni_addref_locked(ni, cpt);
4528         /* Multi-Rail: Primary NID of source. */
4529         msg->msg_initiator = lnet_peer_primary_nid_locked(src_nid);
4530
4531         /*
4532          * mark the status of this lpni as UP since we received a message
4533          * from it. The ping response reports back the ns_status which is
4534          * marked on the remote as up or down and we cache it here.
4535          */
4536         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
4537
4538         lnet_msg_commit(msg, cpt);
4539
4540         /* message delay simulation */
4541         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
4542                      lnet_delay_rule_match_locked(hdr, msg))) {
4543                 lnet_net_unlock(cpt);
4544                 return 0;
4545         }
4546
4547         if (!for_me) {
4548                 rc = lnet_parse_forward_locked(ni, msg);
4549                 lnet_net_unlock(cpt);
4550
4551                 if (rc < 0)
4552                         goto free_drop;
4553
4554                 if (rc == LNET_CREDIT_OK) {
4555                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
4556                                      0, payload_length, payload_length);
4557                 }
4558                 return 0;
4559         }
4560
4561         lnet_net_unlock(cpt);
4562
4563         rc = lnet_parse_local(ni, msg);
4564         if (rc != 0)
4565                 goto free_drop;
4566         return 0;
4567
4568  free_drop:
4569         LASSERT(msg->msg_md == NULL);
4570         lnet_finalize(msg, rc);
4571
4572  drop:
4573         lnet_drop_message(ni, cpt, private, payload_length, type);
4574         return 0;
4575 }
4576 EXPORT_SYMBOL(lnet_parse);
4577
4578 void
4579 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
4580 {
4581         while (!list_empty(head)) {
4582                 struct lnet_process_id id = {0};
4583                 struct lnet_msg *msg;
4584
4585                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4586                 list_del(&msg->msg_list);
4587
4588                 id.nid = msg->msg_hdr.src_nid;
4589                 id.pid = msg->msg_hdr.src_pid;
4590
4591                 LASSERT(msg->msg_md == NULL);
4592                 LASSERT(msg->msg_rx_delayed);
4593                 LASSERT(msg->msg_rxpeer != NULL);
4594                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4595
4596                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
4597                       " offset %d length %d: %s\n",
4598                       libcfs_id2str(id),
4599                       msg->msg_hdr.msg.put.ptl_index,
4600                       msg->msg_hdr.msg.put.match_bits,
4601                       msg->msg_hdr.msg.put.offset,
4602                       msg->msg_hdr.payload_length, reason);
4603
4604                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
4605                  * called lnet_drop_message(), so I just hang onto msg as well
4606                  * until that's done */
4607
4608                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
4609                                   msg->msg_private, msg->msg_len,
4610                                   msg->msg_type);
4611
4612                 msg->msg_no_resend = true;
4613                 /*
4614                  * NB: message will not generate event because w/o attached MD,
4615                  * but we still should give error code so lnet_msg_decommit()
4616                  * can skip counters operations and other checks.
4617                  */
4618                 lnet_finalize(msg, -ENOENT);
4619         }
4620 }
4621
4622 void
4623 lnet_recv_delayed_msg_list(struct list_head *head)
4624 {
4625         while (!list_empty(head)) {
4626                 struct lnet_msg *msg;
4627                 struct lnet_process_id id;
4628
4629                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4630                 list_del(&msg->msg_list);
4631
4632                 /* md won't disappear under me, since each msg
4633                  * holds a ref on it */
4634
4635                 id.nid = msg->msg_hdr.src_nid;
4636                 id.pid = msg->msg_hdr.src_pid;
4637
4638                 LASSERT(msg->msg_rx_delayed);
4639                 LASSERT(msg->msg_md != NULL);
4640                 LASSERT(msg->msg_rxpeer != NULL);
4641                 LASSERT(msg->msg_rxni != NULL);
4642                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4643
4644                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
4645                        "match %llu offset %d length %d.\n",
4646                         libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
4647                         msg->msg_hdr.msg.put.match_bits,
4648                         msg->msg_hdr.msg.put.offset,
4649                         msg->msg_hdr.payload_length);
4650
4651                 lnet_recv_put(msg->msg_rxni, msg);
4652         }
4653 }
4654
4655 static void
4656 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
4657                         struct lnet_libmd *md, struct lnet_handle_md mdh)
4658 {
4659         s64 timeout_ns;
4660         bool new_entry = true;
4661         struct lnet_rsp_tracker *local_rspt;
4662
4663         /*
4664          * MD has a refcount taken by message so it's not going away.
4665          * The MD however can be looked up. We need to secure the access
4666          * to the md_rspt_ptr by taking the res_lock.
4667          * The rspt can be accessed without protection up to when it gets
4668          * added to the list.
4669          */
4670
4671         lnet_res_lock(cpt);
4672         local_rspt = md->md_rspt_ptr;
4673         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
4674         if (local_rspt != NULL) {
4675                 /*
4676                  * we already have an rspt attached to the md, so we'll
4677                  * update the deadline on that one.
4678                  */
4679                 lnet_rspt_free(rspt, cpt);
4680                 new_entry = false;
4681         } else {
4682                 /* new md */
4683                 rspt->rspt_mdh = mdh;
4684                 rspt->rspt_cpt = cpt;
4685                 /* store the rspt so we can access it when we get the REPLY */
4686                 md->md_rspt_ptr = rspt;
4687                 local_rspt = rspt;
4688         }
4689         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
4690
4691         /*
4692          * add to the list of tracked responses. It's added to tail of the
4693          * list in order to expire all the older entries first.
4694          */
4695         lnet_net_lock(cpt);
4696         if (!new_entry && !list_empty(&local_rspt->rspt_on_list))
4697                 list_del_init(&local_rspt->rspt_on_list);
4698         list_add_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
4699         lnet_net_unlock(cpt);
4700         lnet_res_unlock(cpt);
4701 }
4702
4703 /**
4704  * Initiate an asynchronous PUT operation.
4705  *
4706  * There are several events associated with a PUT: completion of the send on
4707  * the initiator node (LNET_EVENT_SEND), and when the send completes
4708  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
4709  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
4710  * used at the target node to indicate the completion of incoming data
4711  * delivery.
4712  *
4713  * The local events will be logged in the EQ associated with the MD pointed to
4714  * by \a mdh handle. Using a MD without an associated EQ results in these
4715  * events being discarded. In this case, the caller must have another
4716  * mechanism (e.g., a higher level protocol) for determining when it is safe
4717  * to modify the memory region associated with the MD.
4718  *
4719  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
4720  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
4721  *
4722  * \param self Indicates the NID of a local interface through which to send
4723  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
4724  * \param mdh A handle for the MD that describes the memory to be sent. The MD
4725  * must be "free floating" (See LNetMDBind()).
4726  * \param ack Controls whether an acknowledgment is requested.
4727  * Acknowledgments are only sent when they are requested by the initiating
4728  * process and the target MD enables them.
4729  * \param target A process identifier for the target process.
4730  * \param portal The index in the \a target's portal table.
4731  * \param match_bits The match bits to use for MD selection at the target
4732  * process.
4733  * \param offset The offset into the target MD (only used when the target
4734  * MD has the LNET_MD_MANAGE_REMOTE option set).
4735  * \param hdr_data 64 bits of user data that can be included in the message
4736  * header. This data is written to an event queue entry at the target if an
4737  * EQ is present on the matching MD.
4738  *
4739  * \retval  0      Success, and only in this case events will be generated
4740  * and logged to EQ (if it exists).
4741  * \retval -EIO    Simulated failure.
4742  * \retval -ENOMEM Memory allocation failure.
4743  * \retval -ENOENT Invalid MD object.
4744  *
4745  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
4746  */
4747 int
4748 LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
4749         struct lnet_process_id target, unsigned int portal,
4750         __u64 match_bits, unsigned int offset,
4751         __u64 hdr_data)
4752 {
4753         struct lnet_msg *msg;
4754         struct lnet_libmd *md;
4755         int cpt;
4756         int rc;
4757         struct lnet_rsp_tracker *rspt = NULL;
4758
4759         LASSERT(the_lnet.ln_refcount > 0);
4760
4761         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4762             fail_peer(target.nid, 1)) {                 /* shall we now? */
4763                 CERROR("Dropping PUT to %s: simulated failure\n",
4764                        libcfs_id2str(target));
4765                 return -EIO;
4766         }
4767
4768         msg = lnet_msg_alloc();
4769         if (msg == NULL) {
4770                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
4771                        libcfs_id2str(target));
4772                 return -ENOMEM;
4773         }
4774         msg->msg_vmflush = !!memory_pressure_get();
4775
4776         cpt = lnet_cpt_of_cookie(mdh.cookie);
4777
4778         if (ack == LNET_ACK_REQ) {
4779                 rspt = lnet_rspt_alloc(cpt);
4780                 if (!rspt) {
4781                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
4782                                 libcfs_id2str(target));
4783                         return -ENOMEM;
4784                 }
4785                 INIT_LIST_HEAD(&rspt->rspt_on_list);
4786         }
4787
4788         lnet_res_lock(cpt);
4789
4790         md = lnet_handle2md(&mdh);
4791         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4792                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
4793                        match_bits, portal, libcfs_id2str(target),
4794                        md == NULL ? -1 : md->md_threshold);
4795                 if (md != NULL && md->md_me != NULL)
4796                         CERROR("Source MD also attached to portal %d\n",
4797                                md->md_me->me_portal);
4798                 lnet_res_unlock(cpt);
4799
4800                 lnet_rspt_free(rspt, cpt);
4801                 lnet_msg_free(msg);
4802                 return -ENOENT;
4803         }
4804
4805         CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
4806
4807         lnet_msg_attach_md(msg, md, 0, 0);
4808
4809         lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
4810
4811         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
4812         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
4813         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
4814         msg->msg_hdr.msg.put.hdr_data = hdr_data;
4815
4816         /* NB handles only looked up by creator (no flips) */
4817         if (ack == LNET_ACK_REQ) {
4818                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
4819                         the_lnet.ln_interface_cookie;
4820                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
4821                         md->md_lh.lh_cookie;
4822         } else {
4823                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
4824                         LNET_WIRE_HANDLE_COOKIE_NONE;
4825                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
4826                         LNET_WIRE_HANDLE_COOKIE_NONE;
4827         }
4828
4829         lnet_res_unlock(cpt);
4830
4831         lnet_build_msg_event(msg, LNET_EVENT_SEND);
4832
4833         if (ack == LNET_ACK_REQ)
4834                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
4835
4836         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
4837                                  CFS_FAIL_ONCE))
4838                 rc = -EIO;
4839         else
4840                 rc = lnet_send(self, msg, LNET_NID_ANY);
4841
4842         if (rc != 0) {
4843                 CNETERR("Error sending PUT to %s: %d\n",
4844                         libcfs_id2str(target), rc);
4845                 msg->msg_no_resend = true;
4846                 lnet_finalize(msg, rc);
4847         }
4848
4849         /* completion will be signalled by an event */
4850         return 0;
4851 }
4852 EXPORT_SYMBOL(LNetPut);
4853
4854 /*
4855  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
4856  * returns a msg for the LND to pass to lnet_finalize() when the sink
4857  * data has been received.
4858  *
4859  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
4860  * lnet_finalize() is called on it, so the LND must call this first
4861  */
4862 struct lnet_msg *
4863 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
4864 {
4865         struct lnet_msg *msg = lnet_msg_alloc();
4866         struct lnet_libmd *getmd = getmsg->msg_md;
4867         struct lnet_process_id peer_id = getmsg->msg_target;
4868         int cpt;
4869
4870         LASSERT(!getmsg->msg_target_is_router);
4871         LASSERT(!getmsg->msg_routing);
4872
4873         if (msg == NULL) {
4874                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
4875                        libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
4876                 goto drop;
4877         }
4878
4879         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
4880         lnet_res_lock(cpt);
4881
4882         LASSERT(getmd->md_refcount > 0);
4883
4884         if (getmd->md_threshold == 0) {
4885                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
4886                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
4887                         getmd);
4888                 lnet_res_unlock(cpt);
4889                 goto drop;
4890         }
4891
4892         LASSERT(getmd->md_offset == 0);
4893
4894         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
4895                libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
4896
4897         /* setup information for lnet_build_msg_event */
4898         msg->msg_initiator = getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
4899         msg->msg_from = peer_id.nid;
4900         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
4901         msg->msg_hdr.src_nid = peer_id.nid;
4902         msg->msg_hdr.payload_length = getmd->md_length;
4903         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
4904
4905         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
4906         lnet_res_unlock(cpt);
4907
4908         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
4909
4910         lnet_net_lock(cpt);
4911         lnet_msg_commit(msg, cpt);
4912         lnet_net_unlock(cpt);
4913
4914         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4915
4916         return msg;
4917
4918  drop:
4919         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
4920
4921         lnet_net_lock(cpt);
4922         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
4923         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4924         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
4925                 getmd->md_length;
4926         lnet_net_unlock(cpt);
4927
4928         if (msg != NULL)
4929                 lnet_msg_free(msg);
4930
4931         return NULL;
4932 }
4933 EXPORT_SYMBOL(lnet_create_reply_msg);
4934
4935 void
4936 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
4937                        unsigned int len)
4938 {
4939         /* Set the REPLY length, now the RDMA that elides the REPLY message has
4940          * completed and I know it. */
4941         LASSERT(reply != NULL);
4942         LASSERT(reply->msg_type == LNET_MSG_GET);
4943         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
4944
4945         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
4946          * the end of my buffer, I might as well be dead. */
4947         LASSERT(len <= reply->msg_ev.mlength);
4948
4949         reply->msg_ev.mlength = len;
4950 }
4951 EXPORT_SYMBOL(lnet_set_reply_msg_len);
4952
4953 /**
4954  * Initiate an asynchronous GET operation.
4955  *
4956  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
4957  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
4958  * the target node in the REPLY has been written to local MD.
4959  *
4960  * On the target node, an LNET_EVENT_GET is logged when the GET request
4961  * arrives and is accepted into a MD.
4962  *
4963  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
4964  * \param mdh A handle for the MD that describes the memory into which the
4965  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
4966  *
4967  * \retval  0      Success, and only in this case events will be generated
4968  * and logged to EQ (if it exists) of the MD.
4969  * \retval -EIO    Simulated failure.
4970  * \retval -ENOMEM Memory allocation failure.
4971  * \retval -ENOENT Invalid MD object.
4972  */
4973 int
4974 LNetGet(lnet_nid_t self, struct lnet_handle_md mdh,
4975         struct lnet_process_id target, unsigned int portal,
4976         __u64 match_bits, unsigned int offset, bool recovery)
4977 {
4978         struct lnet_msg *msg;
4979         struct lnet_libmd *md;
4980         struct lnet_rsp_tracker *rspt;
4981         int cpt;
4982         int rc;
4983
4984         LASSERT(the_lnet.ln_refcount > 0);
4985
4986         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4987             fail_peer(target.nid, 1))                   /* shall we now? */
4988         {
4989                 CERROR("Dropping GET to %s: simulated failure\n",
4990                        libcfs_id2str(target));
4991                 return -EIO;
4992         }
4993
4994         msg = lnet_msg_alloc();
4995         if (!msg) {
4996                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
4997                        libcfs_id2str(target));
4998                 return -ENOMEM;
4999         }
5000
5001         cpt = lnet_cpt_of_cookie(mdh.cookie);
5002
5003         rspt = lnet_rspt_alloc(cpt);
5004         if (!rspt) {
5005                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
5006                        libcfs_id2str(target));
5007                 return -ENOMEM;
5008         }
5009         INIT_LIST_HEAD(&rspt->rspt_on_list);
5010
5011         msg->msg_recovery = recovery;
5012
5013         lnet_res_lock(cpt);
5014
5015         md = lnet_handle2md(&mdh);
5016         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5017                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
5018                        match_bits, portal, libcfs_id2str(target),
5019                        md == NULL ? -1 : md->md_threshold);
5020                 if (md != NULL && md->md_me != NULL)
5021                         CERROR("REPLY MD also attached to portal %d\n",
5022                                md->md_me->me_portal);
5023
5024                 lnet_res_unlock(cpt);
5025
5026                 lnet_msg_free(msg);
5027                 lnet_rspt_free(rspt, cpt);
5028                 return -ENOENT;
5029         }
5030
5031         CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
5032
5033         lnet_msg_attach_md(msg, md, 0, 0);
5034
5035         lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
5036
5037         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5038         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5039         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5040         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5041
5042         /* NB handles only looked up by creator (no flips) */
5043         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5044                 the_lnet.ln_interface_cookie;
5045         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5046                 md->md_lh.lh_cookie;
5047
5048         lnet_res_unlock(cpt);
5049
5050         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5051
5052         lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5053
5054         rc = lnet_send(self, msg, LNET_NID_ANY);
5055         if (rc < 0) {
5056                 CNETERR("Error sending GET to %s: %d\n",
5057                         libcfs_id2str(target), rc);
5058                 msg->msg_no_resend = true;
5059                 lnet_finalize(msg, rc);
5060         }
5061
5062         /* completion will be signalled by an event */
5063         return 0;
5064 }
5065 EXPORT_SYMBOL(LNetGet);
5066
5067 /**
5068  * Calculate distance to node at \a dstnid.
5069  *
5070  * \param dstnid Target NID.
5071  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5072  * is saved here.
5073  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5074  * here.
5075  *
5076  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5077  * local_nid_dist_zero is set, which is the default.
5078  * \retval positives Distance to target NID, i.e. number of hops plus one.
5079  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5080  */
5081 int
5082 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
5083 {
5084         struct list_head        *e;
5085         struct lnet_ni *ni = NULL;
5086         struct lnet_remotenet *rnet;
5087         __u32                   dstnet = LNET_NIDNET(dstnid);
5088         int                     hops;
5089         int                     cpt;
5090         __u32                   order = 2;
5091         struct list_head        *rn_list;
5092
5093         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5094          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5095          * keep order 0 free for 0@lo and order 1 free for a local NID
5096          * match */
5097
5098         LASSERT(the_lnet.ln_refcount > 0);
5099
5100         cpt = lnet_net_lock_current();
5101
5102         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5103                 if (ni->ni_nid == dstnid) {
5104                         if (srcnidp != NULL)
5105                                 *srcnidp = dstnid;
5106                         if (orderp != NULL) {
5107                                 if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
5108                                         *orderp = 0;
5109                                 else
5110                                         *orderp = 1;
5111                         }
5112                         lnet_net_unlock(cpt);
5113
5114                         return local_nid_dist_zero ? 0 : 1;
5115                 }
5116
5117                 if (LNET_NIDNET(ni->ni_nid) == dstnet) {
5118                         /* Check if ni was originally created in
5119                          * current net namespace.
5120                          * If not, assign order above 0xffff0000,
5121                          * to make this ni not a priority. */
5122                         if (current->nsproxy &&
5123                             !net_eq(ni->ni_net_ns, current->nsproxy->net_ns))
5124                                         order += 0xffff0000;
5125                         if (srcnidp != NULL)
5126                                 *srcnidp = ni->ni_nid;
5127                         if (orderp != NULL)
5128                                 *orderp = order;
5129                         lnet_net_unlock(cpt);
5130                         return 1;
5131                 }
5132
5133                 order++;
5134         }
5135
5136         rn_list = lnet_net2rnethash(dstnet);
5137         list_for_each(e, rn_list) {
5138                 rnet = list_entry(e, struct lnet_remotenet, lrn_list);
5139
5140                 if (rnet->lrn_net == dstnet) {
5141                         struct lnet_route *route;
5142                         struct lnet_route *shortest = NULL;
5143                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5144                         __u32 route_hops;
5145
5146                         LASSERT(!list_empty(&rnet->lrn_routes));
5147
5148                         list_for_each_entry(route, &rnet->lrn_routes,
5149                                             lr_list) {
5150                                 route_hops = route->lr_hops;
5151                                 if (route_hops == LNET_UNDEFINED_HOPS)
5152                                         route_hops = 1;
5153                                 if (shortest == NULL ||
5154                                     route_hops < shortest_hops) {
5155                                         shortest = route;
5156                                         shortest_hops = route_hops;
5157                                 }
5158                         }
5159
5160                         LASSERT(shortest != NULL);
5161                         hops = shortest_hops;
5162                         if (srcnidp != NULL) {
5163                                 struct lnet_net *net;
5164                                 net = lnet_get_net_locked(shortest->lr_lnet);
5165                                 LASSERT(net);
5166                                 ni = lnet_get_next_ni_locked(net, NULL);
5167                                 *srcnidp = ni->ni_nid;
5168                         }
5169                         if (orderp != NULL)
5170                                 *orderp = order;
5171                         lnet_net_unlock(cpt);
5172                         return hops + 1;
5173                 }
5174                 order++;
5175         }
5176
5177         lnet_net_unlock(cpt);
5178         return -EHOSTUNREACH;
5179 }
5180 EXPORT_SYMBOL(LNetDist);