Whamcloud - gitweb
3d9bed4444d51301adce5dd8dff8a15e692dd778
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/lnet/lib-move.c
33  *
34  * Data movement routines
35  */
36
37 #define DEBUG_SUBSYSTEM S_LNET
38
39 #include <linux/pagemap.h>
40
41 #include <lnet/lib-lnet.h>
42 #include <linux/nsproxy.h>
43 #include <net/net_namespace.h>
44
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48
49 struct lnet_send_data {
50         struct lnet_ni *sd_best_ni;
51         struct lnet_peer_ni *sd_best_lpni;
52         struct lnet_peer_ni *sd_final_dst_lpni;
53         struct lnet_peer *sd_peer;
54         struct lnet_peer *sd_gw_peer;
55         struct lnet_peer_ni *sd_gw_lpni;
56         struct lnet_peer_net *sd_peer_net;
57         struct lnet_msg *sd_msg;
58         lnet_nid_t sd_dst_nid;
59         lnet_nid_t sd_src_nid;
60         lnet_nid_t sd_rtr_nid;
61         int sd_cpt;
62         int sd_md_cpt;
63         __u32 sd_send_case;
64 };
65
66 static inline struct lnet_comm_count *
67 get_stats_counts(struct lnet_element_stats *stats,
68                  enum lnet_stats_type stats_type)
69 {
70         switch (stats_type) {
71         case LNET_STATS_TYPE_SEND:
72                 return &stats->el_send_stats;
73         case LNET_STATS_TYPE_RECV:
74                 return &stats->el_recv_stats;
75         case LNET_STATS_TYPE_DROP:
76                 return &stats->el_drop_stats;
77         default:
78                 CERROR("Unknown stats type\n");
79         }
80
81         return NULL;
82 }
83
84 void lnet_incr_stats(struct lnet_element_stats *stats,
85                      enum lnet_msg_type msg_type,
86                      enum lnet_stats_type stats_type)
87 {
88         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
89         if (!counts)
90                 return;
91
92         switch (msg_type) {
93         case LNET_MSG_ACK:
94                 atomic_inc(&counts->co_ack_count);
95                 break;
96         case LNET_MSG_PUT:
97                 atomic_inc(&counts->co_put_count);
98                 break;
99         case LNET_MSG_GET:
100                 atomic_inc(&counts->co_get_count);
101                 break;
102         case LNET_MSG_REPLY:
103                 atomic_inc(&counts->co_reply_count);
104                 break;
105         case LNET_MSG_HELLO:
106                 atomic_inc(&counts->co_hello_count);
107                 break;
108         default:
109                 CERROR("There is a BUG in the code. Unknown message type\n");
110                 break;
111         }
112 }
113
114 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
115                      enum lnet_stats_type stats_type)
116 {
117         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
118         if (!counts)
119                 return 0;
120
121         return (atomic_read(&counts->co_ack_count) +
122                 atomic_read(&counts->co_put_count) +
123                 atomic_read(&counts->co_get_count) +
124                 atomic_read(&counts->co_reply_count) +
125                 atomic_read(&counts->co_hello_count));
126 }
127
128 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
129                                 struct lnet_comm_count *counts)
130 {
131         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
132         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
133         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
134         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
135         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
136 }
137
138 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
139                               struct lnet_element_stats *stats)
140 {
141         struct lnet_comm_count *counts;
142
143         LASSERT(msg_stats);
144         LASSERT(stats);
145
146         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
147         if (!counts)
148                 return;
149         assign_stats(&msg_stats->im_send_stats, counts);
150
151         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
152         if (!counts)
153                 return;
154         assign_stats(&msg_stats->im_recv_stats, counts);
155
156         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
157         if (!counts)
158                 return;
159         assign_stats(&msg_stats->im_drop_stats, counts);
160 }
161
162 int
163 lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
164 {
165         struct lnet_test_peer *tp;
166         struct list_head *el;
167         struct list_head *next;
168         struct list_head  cull;
169
170         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
171         if (threshold != 0) {
172                 /* Adding a new entry */
173                 LIBCFS_ALLOC(tp, sizeof(*tp));
174                 if (tp == NULL)
175                         return -ENOMEM;
176
177                 tp->tp_nid = nid;
178                 tp->tp_threshold = threshold;
179
180                 lnet_net_lock(0);
181                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
182                 lnet_net_unlock(0);
183                 return 0;
184         }
185
186         /* removing entries */
187         INIT_LIST_HEAD(&cull);
188
189         lnet_net_lock(0);
190
191         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
192                 tp = list_entry(el, struct lnet_test_peer, tp_list);
193
194                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
195                     nid == LNET_NID_ANY ||      /* removing all entries */
196                     tp->tp_nid == nid) {        /* matched this one */
197                         list_del(&tp->tp_list);
198                         list_add(&tp->tp_list, &cull);
199                 }
200         }
201
202         lnet_net_unlock(0);
203
204         while (!list_empty(&cull)) {
205                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
206
207                 list_del(&tp->tp_list);
208                 LIBCFS_FREE(tp, sizeof(*tp));
209         }
210         return 0;
211 }
212
213 static int
214 fail_peer (lnet_nid_t nid, int outgoing)
215 {
216         struct lnet_test_peer *tp;
217         struct list_head *el;
218         struct list_head *next;
219         struct list_head  cull;
220         int               fail = 0;
221
222         INIT_LIST_HEAD(&cull);
223
224         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
225         lnet_net_lock(0);
226
227         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
228                 tp = list_entry(el, struct lnet_test_peer, tp_list);
229
230                 if (tp->tp_threshold == 0) {
231                         /* zombie entry */
232                         if (outgoing) {
233                                 /* only cull zombies on outgoing tests,
234                                  * since we may be at interrupt priority on
235                                  * incoming messages. */
236                                 list_del(&tp->tp_list);
237                                 list_add(&tp->tp_list, &cull);
238                         }
239                         continue;
240                 }
241
242                 if (tp->tp_nid == LNET_NID_ANY ||       /* fail every peer */
243                     nid == tp->tp_nid) {                /* fail this peer */
244                         fail = 1;
245
246                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
247                                 tp->tp_threshold--;
248                                 if (outgoing &&
249                                     tp->tp_threshold == 0) {
250                                         /* see above */
251                                         list_del(&tp->tp_list);
252                                         list_add(&tp->tp_list, &cull);
253                                 }
254                         }
255                         break;
256                 }
257         }
258
259         lnet_net_unlock(0);
260
261         while (!list_empty(&cull)) {
262                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
263                 list_del(&tp->tp_list);
264
265                 LIBCFS_FREE(tp, sizeof(*tp));
266         }
267
268         return fail;
269 }
270
271 unsigned int
272 lnet_iov_nob(unsigned int niov, struct kvec *iov)
273 {
274         unsigned int nob = 0;
275
276         LASSERT(niov == 0 || iov != NULL);
277         while (niov-- > 0)
278                 nob += (iov++)->iov_len;
279
280         return (nob);
281 }
282 EXPORT_SYMBOL(lnet_iov_nob);
283
284 void
285 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
286                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
287                   unsigned int nob)
288 {
289         /* NB diov, siov are READ-ONLY */
290         unsigned int  this_nob;
291
292         if (nob == 0)
293                 return;
294
295         /* skip complete frags before 'doffset' */
296         LASSERT(ndiov > 0);
297         while (doffset >= diov->iov_len) {
298                 doffset -= diov->iov_len;
299                 diov++;
300                 ndiov--;
301                 LASSERT(ndiov > 0);
302         }
303
304         /* skip complete frags before 'soffset' */
305         LASSERT(nsiov > 0);
306         while (soffset >= siov->iov_len) {
307                 soffset -= siov->iov_len;
308                 siov++;
309                 nsiov--;
310                 LASSERT(nsiov > 0);
311         }
312
313         do {
314                 LASSERT(ndiov > 0);
315                 LASSERT(nsiov > 0);
316                 this_nob = MIN(diov->iov_len - doffset,
317                                siov->iov_len - soffset);
318                 this_nob = MIN(this_nob, nob);
319
320                 memcpy((char *)diov->iov_base + doffset,
321                        (char *)siov->iov_base + soffset, this_nob);
322                 nob -= this_nob;
323
324                 if (diov->iov_len > doffset + this_nob) {
325                         doffset += this_nob;
326                 } else {
327                         diov++;
328                         ndiov--;
329                         doffset = 0;
330                 }
331
332                 if (siov->iov_len > soffset + this_nob) {
333                         soffset += this_nob;
334                 } else {
335                         siov++;
336                         nsiov--;
337                         soffset = 0;
338                 }
339         } while (nob > 0);
340 }
341 EXPORT_SYMBOL(lnet_copy_iov2iov);
342
343 int
344 lnet_extract_iov(int dst_niov, struct kvec *dst,
345                  int src_niov, struct kvec *src,
346                  unsigned int offset, unsigned int len)
347 {
348         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
349          * for exactly 'len' bytes, and return the number of entries.
350          * NB not destructive to 'src' */
351         unsigned int    frag_len;
352         unsigned int    niov;
353
354         if (len == 0)                           /* no data => */
355                 return (0);                     /* no frags */
356
357         LASSERT(src_niov > 0);
358         while (offset >= src->iov_len) {      /* skip initial frags */
359                 offset -= src->iov_len;
360                 src_niov--;
361                 src++;
362                 LASSERT(src_niov > 0);
363         }
364
365         niov = 1;
366         for (;;) {
367                 LASSERT(src_niov > 0);
368                 LASSERT((int)niov <= dst_niov);
369
370                 frag_len = src->iov_len - offset;
371                 dst->iov_base = ((char *)src->iov_base) + offset;
372
373                 if (len <= frag_len) {
374                         dst->iov_len = len;
375                         return (niov);
376                 }
377
378                 dst->iov_len = frag_len;
379
380                 len -= frag_len;
381                 dst++;
382                 src++;
383                 niov++;
384                 src_niov--;
385                 offset = 0;
386         }
387 }
388 EXPORT_SYMBOL(lnet_extract_iov);
389
390
391 unsigned int
392 lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
393 {
394         unsigned int  nob = 0;
395
396         LASSERT(niov == 0 || kiov != NULL);
397         while (niov-- > 0)
398                 nob += (kiov++)->kiov_len;
399
400         return (nob);
401 }
402 EXPORT_SYMBOL(lnet_kiov_nob);
403
404 void
405 lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
406                     unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
407                     unsigned int nob)
408 {
409         /* NB diov, siov are READ-ONLY */
410         unsigned int    this_nob;
411         char           *daddr = NULL;
412         char           *saddr = NULL;
413
414         if (nob == 0)
415                 return;
416
417         LASSERT (!in_interrupt ());
418
419         LASSERT (ndiov > 0);
420         while (doffset >= diov->kiov_len) {
421                 doffset -= diov->kiov_len;
422                 diov++;
423                 ndiov--;
424                 LASSERT(ndiov > 0);
425         }
426
427         LASSERT(nsiov > 0);
428         while (soffset >= siov->kiov_len) {
429                 soffset -= siov->kiov_len;
430                 siov++;
431                 nsiov--;
432                 LASSERT(nsiov > 0);
433         }
434
435         do {
436                 LASSERT(ndiov > 0);
437                 LASSERT(nsiov > 0);
438                 this_nob = MIN(diov->kiov_len - doffset,
439                                siov->kiov_len - soffset);
440                 this_nob = MIN(this_nob, nob);
441
442                 if (daddr == NULL)
443                         daddr = ((char *)kmap(diov->kiov_page)) +
444                                 diov->kiov_offset + doffset;
445                 if (saddr == NULL)
446                         saddr = ((char *)kmap(siov->kiov_page)) +
447                                 siov->kiov_offset + soffset;
448
449                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
450                  * However in practice at least one of the kiovs will be mapped
451                  * kernel pages and the map/unmap will be NOOPs */
452
453                 memcpy (daddr, saddr, this_nob);
454                 nob -= this_nob;
455
456                 if (diov->kiov_len > doffset + this_nob) {
457                         daddr += this_nob;
458                         doffset += this_nob;
459                 } else {
460                         kunmap(diov->kiov_page);
461                         daddr = NULL;
462                         diov++;
463                         ndiov--;
464                         doffset = 0;
465                 }
466
467                 if (siov->kiov_len > soffset + this_nob) {
468                         saddr += this_nob;
469                         soffset += this_nob;
470                 } else {
471                         kunmap(siov->kiov_page);
472                         saddr = NULL;
473                         siov++;
474                         nsiov--;
475                         soffset = 0;
476                 }
477         } while (nob > 0);
478
479         if (daddr != NULL)
480                 kunmap(diov->kiov_page);
481         if (saddr != NULL)
482                 kunmap(siov->kiov_page);
483 }
484 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
485
486 void
487 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
488                     unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
489                     unsigned int nob)
490 {
491         /* NB iov, kiov are READ-ONLY */
492         unsigned int    this_nob;
493         char           *addr = NULL;
494
495         if (nob == 0)
496                 return;
497
498         LASSERT (!in_interrupt ());
499
500         LASSERT (niov > 0);
501         while (iovoffset >= iov->iov_len) {
502                 iovoffset -= iov->iov_len;
503                 iov++;
504                 niov--;
505                 LASSERT(niov > 0);
506         }
507
508         LASSERT(nkiov > 0);
509         while (kiovoffset >= kiov->kiov_len) {
510                 kiovoffset -= kiov->kiov_len;
511                 kiov++;
512                 nkiov--;
513                 LASSERT(nkiov > 0);
514         }
515
516         do {
517                 LASSERT(niov > 0);
518                 LASSERT(nkiov > 0);
519                 this_nob = MIN(iov->iov_len - iovoffset,
520                                kiov->kiov_len - kiovoffset);
521                 this_nob = MIN(this_nob, nob);
522
523                 if (addr == NULL)
524                         addr = ((char *)kmap(kiov->kiov_page)) +
525                                 kiov->kiov_offset + kiovoffset;
526
527                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
528                 nob -= this_nob;
529
530                 if (iov->iov_len > iovoffset + this_nob) {
531                         iovoffset += this_nob;
532                 } else {
533                         iov++;
534                         niov--;
535                         iovoffset = 0;
536                 }
537
538                 if (kiov->kiov_len > kiovoffset + this_nob) {
539                         addr += this_nob;
540                         kiovoffset += this_nob;
541                 } else {
542                         kunmap(kiov->kiov_page);
543                         addr = NULL;
544                         kiov++;
545                         nkiov--;
546                         kiovoffset = 0;
547                 }
548
549         } while (nob > 0);
550
551         if (addr != NULL)
552                 kunmap(kiov->kiov_page);
553 }
554 EXPORT_SYMBOL(lnet_copy_kiov2iov);
555
556 void
557 lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
558                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
559                    unsigned int nob)
560 {
561         /* NB kiov, iov are READ-ONLY */
562         unsigned int    this_nob;
563         char           *addr = NULL;
564
565         if (nob == 0)
566                 return;
567
568         LASSERT (!in_interrupt ());
569
570         LASSERT (nkiov > 0);
571         while (kiovoffset >= kiov->kiov_len) {
572                 kiovoffset -= kiov->kiov_len;
573                 kiov++;
574                 nkiov--;
575                 LASSERT(nkiov > 0);
576         }
577
578         LASSERT(niov > 0);
579         while (iovoffset >= iov->iov_len) {
580                 iovoffset -= iov->iov_len;
581                 iov++;
582                 niov--;
583                 LASSERT(niov > 0);
584         }
585
586         do {
587                 LASSERT(nkiov > 0);
588                 LASSERT(niov > 0);
589                 this_nob = MIN(kiov->kiov_len - kiovoffset,
590                                iov->iov_len - iovoffset);
591                 this_nob = MIN(this_nob, nob);
592
593                 if (addr == NULL)
594                         addr = ((char *)kmap(kiov->kiov_page)) +
595                                 kiov->kiov_offset + kiovoffset;
596
597                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
598                 nob -= this_nob;
599
600                 if (kiov->kiov_len > kiovoffset + this_nob) {
601                         addr += this_nob;
602                         kiovoffset += this_nob;
603                 } else {
604                         kunmap(kiov->kiov_page);
605                         addr = NULL;
606                         kiov++;
607                         nkiov--;
608                         kiovoffset = 0;
609                 }
610
611                 if (iov->iov_len > iovoffset + this_nob) {
612                         iovoffset += this_nob;
613                 } else {
614                         iov++;
615                         niov--;
616                         iovoffset = 0;
617                 }
618         } while (nob > 0);
619
620         if (addr != NULL)
621                 kunmap(kiov->kiov_page);
622 }
623 EXPORT_SYMBOL(lnet_copy_iov2kiov);
624
625 int
626 lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
627                   int src_niov, lnet_kiov_t *src,
628                   unsigned int offset, unsigned int len)
629 {
630         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
631          * for exactly 'len' bytes, and return the number of entries.
632          * NB not destructive to 'src' */
633         unsigned int    frag_len;
634         unsigned int    niov;
635
636         if (len == 0)                           /* no data => */
637                 return (0);                     /* no frags */
638
639         LASSERT(src_niov > 0);
640         while (offset >= src->kiov_len) {      /* skip initial frags */
641                 offset -= src->kiov_len;
642                 src_niov--;
643                 src++;
644                 LASSERT(src_niov > 0);
645         }
646
647         niov = 1;
648         for (;;) {
649                 LASSERT(src_niov > 0);
650                 LASSERT((int)niov <= dst_niov);
651
652                 frag_len = src->kiov_len - offset;
653                 dst->kiov_page = src->kiov_page;
654                 dst->kiov_offset = src->kiov_offset + offset;
655
656                 if (len <= frag_len) {
657                         dst->kiov_len = len;
658                         LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
659                         return niov;
660                 }
661
662                 dst->kiov_len = frag_len;
663                 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
664
665                 len -= frag_len;
666                 dst++;
667                 src++;
668                 niov++;
669                 src_niov--;
670                 offset = 0;
671         }
672 }
673 EXPORT_SYMBOL(lnet_extract_kiov);
674
675 void
676 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
677              int delayed, unsigned int offset, unsigned int mlen,
678              unsigned int rlen)
679 {
680         unsigned int  niov = 0;
681         struct kvec *iov = NULL;
682         lnet_kiov_t  *kiov = NULL;
683         int           rc;
684
685         LASSERT (!in_interrupt ());
686         LASSERT (mlen == 0 || msg != NULL);
687
688         if (msg != NULL) {
689                 LASSERT(msg->msg_receiving);
690                 LASSERT(!msg->msg_sending);
691                 LASSERT(rlen == msg->msg_len);
692                 LASSERT(mlen <= msg->msg_len);
693                 LASSERT(msg->msg_offset == offset);
694                 LASSERT(msg->msg_wanted == mlen);
695
696                 msg->msg_receiving = 0;
697
698                 if (mlen != 0) {
699                         niov = msg->msg_niov;
700                         iov  = msg->msg_iov;
701                         kiov = msg->msg_kiov;
702
703                         LASSERT (niov > 0);
704                         LASSERT ((iov == NULL) != (kiov == NULL));
705                 }
706         }
707
708         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
709                                              niov, iov, kiov, offset, mlen,
710                                              rlen);
711         if (rc < 0)
712                 lnet_finalize(msg, rc);
713 }
714
715 static void
716 lnet_setpayloadbuffer(struct lnet_msg *msg)
717 {
718         struct lnet_libmd *md = msg->msg_md;
719
720         LASSERT(msg->msg_len > 0);
721         LASSERT(!msg->msg_routing);
722         LASSERT(md != NULL);
723         LASSERT(msg->msg_niov == 0);
724         LASSERT(msg->msg_iov == NULL);
725         LASSERT(msg->msg_kiov == NULL);
726
727         msg->msg_niov = md->md_niov;
728         if ((md->md_options & LNET_MD_KIOV) != 0)
729                 msg->msg_kiov = md->md_iov.kiov;
730         else
731                 msg->msg_iov = md->md_iov.iov;
732 }
733
734 void
735 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target,
736                unsigned int offset, unsigned int len)
737 {
738         msg->msg_type = type;
739         msg->msg_target = target;
740         msg->msg_len = len;
741         msg->msg_offset = offset;
742
743         if (len != 0)
744                 lnet_setpayloadbuffer(msg);
745
746         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
747         msg->msg_hdr.type           = cpu_to_le32(type);
748         /* dest_nid will be overwritten by lnet_select_pathway() */
749         msg->msg_hdr.dest_nid       = cpu_to_le64(target.nid);
750         msg->msg_hdr.dest_pid       = cpu_to_le32(target.pid);
751         /* src_nid will be set later */
752         msg->msg_hdr.src_pid        = cpu_to_le32(the_lnet.ln_pid);
753         msg->msg_hdr.payload_length = cpu_to_le32(len);
754 }
755
756 static void
757 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
758 {
759         void   *priv = msg->msg_private;
760         int rc;
761
762         LASSERT (!in_interrupt ());
763         LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
764                  (msg->msg_txcredit && msg->msg_peertxcredit));
765
766         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
767         if (rc < 0) {
768                 msg->msg_no_resend = true;
769                 lnet_finalize(msg, rc);
770         }
771 }
772
773 static int
774 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
775 {
776         int     rc;
777
778         LASSERT(!msg->msg_sending);
779         LASSERT(msg->msg_receiving);
780         LASSERT(!msg->msg_rx_ready_delay);
781         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
782
783         msg->msg_rx_ready_delay = 1;
784         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
785                                                   &msg->msg_private);
786         if (rc != 0) {
787                 CERROR("recv from %s / send to %s aborted: "
788                        "eager_recv failed %d\n",
789                        libcfs_nid2str(msg->msg_rxpeer->lpni_nid),
790                        libcfs_id2str(msg->msg_target), rc);
791                 LASSERT(rc < 0); /* required by my callers */
792         }
793
794         return rc;
795 }
796
797 static bool
798 lnet_is_peer_deadline_passed(struct lnet_peer_ni *lpni, time64_t now)
799 {
800         time64_t deadline;
801
802         deadline = lpni->lpni_last_alive +
803                    lpni->lpni_net->net_tunables.lct_peer_timeout;
804
805         /*
806          * assume peer_ni is alive as long as we're within the configured
807          * peer timeout
808          */
809         if (deadline > now)
810                 return false;
811
812         return true;
813 }
814
815 /* NB: returns 1 when alive, 0 when dead, negative when error;
816  *     may drop the lnet_net_lock */
817 static int
818 lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
819                        struct lnet_msg *msg)
820 {
821         time64_t now = ktime_get_seconds();
822
823         if (!lnet_peer_aliveness_enabled(lpni))
824                 return -ENODEV;
825
826         /*
827          * If we're resending a message, let's attempt to send it even if
828          * the peer is down to fulfill our resend quota on the message
829          */
830         if (msg->msg_retry_count > 0)
831                 return 1;
832
833         /* try and send recovery messages irregardless */
834         if (msg->msg_recovery)
835                 return 1;
836
837         /* always send any responses */
838         if (msg->msg_type == LNET_MSG_ACK ||
839             msg->msg_type == LNET_MSG_REPLY)
840                 return 1;
841
842         if (!lnet_is_peer_deadline_passed(lpni, now))
843                 return true;
844
845         return lnet_is_peer_ni_alive(lpni);
846 }
847
848 /**
849  * \param msg The message to be sent.
850  * \param do_send True if lnet_ni_send() should be called in this function.
851  *        lnet_send() is going to lnet_net_unlock immediately after this, so
852  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
853  *
854  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
855  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
856  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
857  * \retval -ECANCELED If the MD of the message has been unlinked.
858  */
859 static int
860 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
861 {
862         struct lnet_peer_ni     *lp = msg->msg_txpeer;
863         struct lnet_ni          *ni = msg->msg_txni;
864         int                     cpt = msg->msg_tx_cpt;
865         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
866
867         /* non-lnet_send() callers have checked before */
868         LASSERT(!do_send || msg->msg_tx_delayed);
869         LASSERT(!msg->msg_receiving);
870         LASSERT(msg->msg_tx_committed);
871         /* can't get here if we're sending to the loopback interface */
872         LASSERT(lp->lpni_nid != the_lnet.ln_loni->ni_nid);
873
874         /* NB 'lp' is always the next hop */
875         if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
876             lnet_peer_alive_locked(ni, lp, msg) == 0) {
877                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
878                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
879                         msg->msg_len;
880                 lnet_net_unlock(cpt);
881                 if (msg->msg_txpeer)
882                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
883                                         msg->msg_type,
884                                         LNET_STATS_TYPE_DROP);
885                 if (msg->msg_txni)
886                         lnet_incr_stats(&msg->msg_txni->ni_stats,
887                                         msg->msg_type,
888                                         LNET_STATS_TYPE_DROP);
889
890                 CNETERR("Dropping message for %s: peer not alive\n",
891                         libcfs_id2str(msg->msg_target));
892                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
893                 if (do_send)
894                         lnet_finalize(msg, -EHOSTUNREACH);
895
896                 lnet_net_lock(cpt);
897                 return -EHOSTUNREACH;
898         }
899
900         if (msg->msg_md != NULL &&
901             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
902                 lnet_net_unlock(cpt);
903
904                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
905                         "called on the MD/ME.\n",
906                         libcfs_id2str(msg->msg_target));
907                 if (do_send) {
908                         msg->msg_no_resend = true;
909                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
910                                msg, libcfs_id2str(msg->msg_target));
911                         lnet_finalize(msg, -ECANCELED);
912                 }
913
914                 lnet_net_lock(cpt);
915                 return -ECANCELED;
916         }
917
918         if (!msg->msg_peertxcredit) {
919                 spin_lock(&lp->lpni_lock);
920                 LASSERT((lp->lpni_txcredits < 0) ==
921                         !list_empty(&lp->lpni_txq));
922
923                 msg->msg_peertxcredit = 1;
924                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr);
925                 lp->lpni_txcredits--;
926
927                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
928                         lp->lpni_mintxcredits = lp->lpni_txcredits;
929
930                 if (lp->lpni_txcredits < 0) {
931                         msg->msg_tx_delayed = 1;
932                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
933                         spin_unlock(&lp->lpni_lock);
934                         return LNET_CREDIT_WAIT;
935                 }
936                 spin_unlock(&lp->lpni_lock);
937         }
938
939         if (!msg->msg_txcredit) {
940                 LASSERT((tq->tq_credits < 0) ==
941                         !list_empty(&tq->tq_delayed));
942
943                 msg->msg_txcredit = 1;
944                 tq->tq_credits--;
945                 atomic_dec(&ni->ni_tx_credits);
946
947                 if (tq->tq_credits < tq->tq_credits_min)
948                         tq->tq_credits_min = tq->tq_credits;
949
950                 if (tq->tq_credits < 0) {
951                         msg->msg_tx_delayed = 1;
952                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
953                         return LNET_CREDIT_WAIT;
954                 }
955         }
956
957         /* unset the tx_delay flag as we're going to send it now */
958         msg->msg_tx_delayed = 0;
959
960         if (do_send) {
961                 lnet_net_unlock(cpt);
962                 lnet_ni_send(ni, msg);
963                 lnet_net_lock(cpt);
964         }
965         return LNET_CREDIT_OK;
966 }
967
968
969 static struct lnet_rtrbufpool *
970 lnet_msg2bufpool(struct lnet_msg *msg)
971 {
972         struct lnet_rtrbufpool  *rbp;
973         int                     cpt;
974
975         LASSERT(msg->msg_rx_committed);
976
977         cpt = msg->msg_rx_cpt;
978         rbp = &the_lnet.ln_rtrpools[cpt][0];
979
980         LASSERT(msg->msg_len <= LNET_MTU);
981         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
982                 rbp++;
983                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
984         }
985
986         return rbp;
987 }
988
989 static int
990 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
991 {
992         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
993          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
994          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
995          * received or OK to receive */
996         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
997         struct lnet_peer *lp;
998         struct lnet_rtrbufpool *rbp;
999         struct lnet_rtrbuf *rb;
1000
1001         LASSERT(msg->msg_iov == NULL);
1002         LASSERT(msg->msg_kiov == NULL);
1003         LASSERT(msg->msg_niov == 0);
1004         LASSERT(msg->msg_routing);
1005         LASSERT(msg->msg_receiving);
1006         LASSERT(!msg->msg_sending);
1007         LASSERT(lpni->lpni_peer_net);
1008         LASSERT(lpni->lpni_peer_net->lpn_peer);
1009
1010         lp = lpni->lpni_peer_net->lpn_peer;
1011
1012         /* non-lnet_parse callers only receive delayed messages */
1013         LASSERT(!do_recv || msg->msg_rx_delayed);
1014
1015         if (!msg->msg_peerrtrcredit) {
1016                 /* lpni_lock protects the credit manipulation */
1017                 spin_lock(&lpni->lpni_lock);
1018                 /* lp_lock protects the lp_rtrq */
1019                 spin_lock(&lp->lp_lock);
1020
1021                 msg->msg_peerrtrcredit = 1;
1022                 lpni->lpni_rtrcredits--;
1023                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
1024                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
1025
1026                 if (lpni->lpni_rtrcredits < 0) {
1027                         /* must have checked eager_recv before here */
1028                         LASSERT(msg->msg_rx_ready_delay);
1029                         msg->msg_rx_delayed = 1;
1030                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1031                         spin_unlock(&lp->lp_lock);
1032                         spin_unlock(&lpni->lpni_lock);
1033                         return LNET_CREDIT_WAIT;
1034                 }
1035                 spin_unlock(&lp->lp_lock);
1036                 spin_unlock(&lpni->lpni_lock);
1037         }
1038
1039         rbp = lnet_msg2bufpool(msg);
1040
1041         if (!msg->msg_rtrcredit) {
1042                 msg->msg_rtrcredit = 1;
1043                 rbp->rbp_credits--;
1044                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1045                         rbp->rbp_mincredits = rbp->rbp_credits;
1046
1047                 if (rbp->rbp_credits < 0) {
1048                         /* must have checked eager_recv before here */
1049                         LASSERT(msg->msg_rx_ready_delay);
1050                         msg->msg_rx_delayed = 1;
1051                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1052                         return LNET_CREDIT_WAIT;
1053                 }
1054         }
1055
1056         LASSERT(!list_empty(&rbp->rbp_bufs));
1057         rb = list_entry(rbp->rbp_bufs.next, struct lnet_rtrbuf, rb_list);
1058         list_del(&rb->rb_list);
1059
1060         msg->msg_niov = rbp->rbp_npages;
1061         msg->msg_kiov = &rb->rb_kiov[0];
1062
1063         /* unset the msg-rx_delayed flag since we're receiving the message */
1064         msg->msg_rx_delayed = 0;
1065
1066         if (do_recv) {
1067                 int cpt = msg->msg_rx_cpt;
1068
1069                 lnet_net_unlock(cpt);
1070                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1071                              0, msg->msg_len, msg->msg_len);
1072                 lnet_net_lock(cpt);
1073         }
1074         return LNET_CREDIT_OK;
1075 }
1076
1077 void
1078 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1079 {
1080         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1081         struct lnet_ni          *txni = msg->msg_txni;
1082         struct lnet_msg         *msg2;
1083
1084         if (msg->msg_txcredit) {
1085                 struct lnet_ni       *ni = msg->msg_txni;
1086                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1087
1088                 /* give back NI txcredits */
1089                 msg->msg_txcredit = 0;
1090
1091                 LASSERT((tq->tq_credits < 0) ==
1092                         !list_empty(&tq->tq_delayed));
1093
1094                 tq->tq_credits++;
1095                 atomic_inc(&ni->ni_tx_credits);
1096                 if (tq->tq_credits <= 0) {
1097                         msg2 = list_entry(tq->tq_delayed.next,
1098                                           struct lnet_msg, msg_list);
1099                         list_del(&msg2->msg_list);
1100
1101                         LASSERT(msg2->msg_txni == ni);
1102                         LASSERT(msg2->msg_tx_delayed);
1103                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1104
1105                         (void) lnet_post_send_locked(msg2, 1);
1106                 }
1107         }
1108
1109         if (msg->msg_peertxcredit) {
1110                 /* give back peer txcredits */
1111                 msg->msg_peertxcredit = 0;
1112
1113                 spin_lock(&txpeer->lpni_lock);
1114                 LASSERT((txpeer->lpni_txcredits < 0) ==
1115                         !list_empty(&txpeer->lpni_txq));
1116
1117                 txpeer->lpni_txqnob -= msg->msg_len + sizeof(struct lnet_hdr);
1118                 LASSERT(txpeer->lpni_txqnob >= 0);
1119
1120                 txpeer->lpni_txcredits++;
1121                 if (txpeer->lpni_txcredits <= 0) {
1122                         int msg2_cpt;
1123
1124                         msg2 = list_entry(txpeer->lpni_txq.next,
1125                                               struct lnet_msg, msg_list);
1126                         list_del(&msg2->msg_list);
1127                         spin_unlock(&txpeer->lpni_lock);
1128
1129                         LASSERT(msg2->msg_txpeer == txpeer);
1130                         LASSERT(msg2->msg_tx_delayed);
1131
1132                         msg2_cpt = msg2->msg_tx_cpt;
1133
1134                         /*
1135                          * The msg_cpt can be different from the msg2_cpt
1136                          * so we need to make sure we lock the correct cpt
1137                          * for msg2.
1138                          * Once we call lnet_post_send_locked() it is no
1139                          * longer safe to access msg2, since it could've
1140                          * been freed by lnet_finalize(), but we still
1141                          * need to relock the correct cpt, so we cache the
1142                          * msg2_cpt for the purpose of the check that
1143                          * follows the call to lnet_pose_send_locked().
1144                          */
1145                         if (msg2_cpt != msg->msg_tx_cpt) {
1146                                 lnet_net_unlock(msg->msg_tx_cpt);
1147                                 lnet_net_lock(msg2_cpt);
1148                         }
1149                         (void) lnet_post_send_locked(msg2, 1);
1150                         if (msg2_cpt != msg->msg_tx_cpt) {
1151                                 lnet_net_unlock(msg2_cpt);
1152                                 lnet_net_lock(msg->msg_tx_cpt);
1153                         }
1154                 } else {
1155                         spin_unlock(&txpeer->lpni_lock);
1156                 }
1157         }
1158
1159         if (txni != NULL) {
1160                 msg->msg_txni = NULL;
1161                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1162         }
1163
1164         if (txpeer != NULL) {
1165                 msg->msg_txpeer = NULL;
1166                 lnet_peer_ni_decref_locked(txpeer);
1167         }
1168 }
1169
1170 void
1171 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1172 {
1173         struct lnet_msg *msg;
1174
1175         if (list_empty(&rbp->rbp_msgs))
1176                 return;
1177         msg = list_entry(rbp->rbp_msgs.next,
1178                          struct lnet_msg, msg_list);
1179         list_del(&msg->msg_list);
1180
1181         (void)lnet_post_routed_recv_locked(msg, 1);
1182 }
1183
1184 void
1185 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1186 {
1187         struct lnet_msg *msg;
1188         struct lnet_msg *tmp;
1189
1190         lnet_net_unlock(cpt);
1191
1192         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1193                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1194                              0, 0, 0, msg->msg_hdr.payload_length);
1195                 list_del_init(&msg->msg_list);
1196                 msg->msg_no_resend = true;
1197                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1198                 lnet_finalize(msg, -ECANCELED);
1199         }
1200
1201         lnet_net_lock(cpt);
1202 }
1203
1204 void
1205 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1206 {
1207         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1208         struct lnet_peer *lp;
1209         struct lnet_ni *rxni = msg->msg_rxni;
1210         struct lnet_msg *msg2;
1211
1212         if (msg->msg_rtrcredit) {
1213                 /* give back global router credits */
1214                 struct lnet_rtrbuf *rb;
1215                 struct lnet_rtrbufpool *rbp;
1216
1217                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1218                  * there until it gets one allocated, or aborts the wait
1219                  * itself */
1220                 LASSERT(msg->msg_kiov != NULL);
1221
1222                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1223                 rbp = rb->rb_pool;
1224
1225                 msg->msg_kiov = NULL;
1226                 msg->msg_rtrcredit = 0;
1227
1228                 LASSERT(rbp == lnet_msg2bufpool(msg));
1229
1230                 LASSERT((rbp->rbp_credits > 0) ==
1231                         !list_empty(&rbp->rbp_bufs));
1232
1233                 /* If routing is now turned off, we just drop this buffer and
1234                  * don't bother trying to return credits.  */
1235                 if (!the_lnet.ln_routing) {
1236                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1237                         goto routing_off;
1238                 }
1239
1240                 /* It is possible that a user has lowered the desired number of
1241                  * buffers in this pool.  Make sure we never put back
1242                  * more buffers than the stated number. */
1243                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1244                         /* Discard this buffer so we don't have too
1245                          * many. */
1246                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1247                         rbp->rbp_nbuffers--;
1248                 } else {
1249                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1250                         rbp->rbp_credits++;
1251                         if (rbp->rbp_credits <= 0)
1252                                 lnet_schedule_blocked_locked(rbp);
1253                 }
1254         }
1255
1256 routing_off:
1257         if (msg->msg_peerrtrcredit) {
1258                 LASSERT(rxpeerni);
1259                 LASSERT(rxpeerni->lpni_peer_net);
1260                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1261
1262                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1263
1264                 /* give back peer router credits */
1265                 msg->msg_peerrtrcredit = 0;
1266
1267                 spin_lock(&rxpeerni->lpni_lock);
1268                 spin_lock(&lp->lp_lock);
1269
1270                 rxpeerni->lpni_rtrcredits++;
1271
1272                 /* drop all messages which are queued to be routed on that
1273                  * peer. */
1274                 if (!the_lnet.ln_routing) {
1275                         struct list_head drop;
1276                         INIT_LIST_HEAD(&drop);
1277                         list_splice_init(&lp->lp_rtrq, &drop);
1278                         spin_unlock(&lp->lp_lock);
1279                         spin_unlock(&rxpeerni->lpni_lock);
1280                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1281                 } else if (!list_empty(&lp->lp_rtrq)) {
1282                         int msg2_cpt;
1283
1284                         msg2 = list_entry(lp->lp_rtrq.next,
1285                                           struct lnet_msg, msg_list);
1286                         list_del(&msg2->msg_list);
1287                         msg2_cpt = msg2->msg_rx_cpt;
1288                         spin_unlock(&lp->lp_lock);
1289                         spin_unlock(&rxpeerni->lpni_lock);
1290                         /*
1291                          * messages on the lp_rtrq can be from any NID in
1292                          * the peer, which means they might have different
1293                          * cpts. We need to make sure we lock the right
1294                          * one.
1295                          */
1296                         if (msg2_cpt != msg->msg_rx_cpt) {
1297                                 lnet_net_unlock(msg->msg_rx_cpt);
1298                                 lnet_net_lock(msg2_cpt);
1299                         }
1300                         (void) lnet_post_routed_recv_locked(msg2, 1);
1301                         if (msg2_cpt != msg->msg_rx_cpt) {
1302                                 lnet_net_unlock(msg2_cpt);
1303                                 lnet_net_lock(msg->msg_rx_cpt);
1304                         }
1305                 } else {
1306                         spin_unlock(&lp->lp_lock);
1307                         spin_unlock(&rxpeerni->lpni_lock);
1308                 }
1309         }
1310         if (rxni != NULL) {
1311                 msg->msg_rxni = NULL;
1312                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1313         }
1314         if (rxpeerni != NULL) {
1315                 msg->msg_rxpeer = NULL;
1316                 lnet_peer_ni_decref_locked(rxpeerni);
1317         }
1318 }
1319
1320 static int
1321 lnet_compare_peers(struct lnet_peer_ni *p1, struct lnet_peer_ni *p2)
1322 {
1323         if (p1->lpni_txqnob < p2->lpni_txqnob)
1324                 return 1;
1325
1326         if (p1->lpni_txqnob > p2->lpni_txqnob)
1327                 return -1;
1328
1329         if (p1->lpni_txcredits > p2->lpni_txcredits)
1330                 return 1;
1331
1332         if (p1->lpni_txcredits < p2->lpni_txcredits)
1333                 return -1;
1334
1335         return 0;
1336 }
1337
1338 static struct lnet_peer_ni *
1339 lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid,
1340                     struct lnet_peer *peer,
1341                     struct lnet_peer_net *peer_net)
1342 {
1343         /*
1344          * Look at the peer NIs for the destination peer that connect
1345          * to the chosen net. If a peer_ni is preferred when using the
1346          * best_ni to communicate, we use that one. If there is no
1347          * preferred peer_ni, or there are multiple preferred peer_ni,
1348          * the available transmit credits are used. If the transmit
1349          * credits are equal, we round-robin over the peer_ni.
1350          */
1351         struct lnet_peer_ni *lpni = NULL;
1352         struct lnet_peer_ni *best_lpni = NULL;
1353         int best_lpni_credits = INT_MIN;
1354         bool preferred = false;
1355         bool ni_is_pref;
1356         int best_lpni_healthv = 0;
1357         int lpni_healthv;
1358
1359         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1360                 /*
1361                  * if the best_ni we've chosen aleady has this lpni
1362                  * preferred, then let's use it
1363                  */
1364                 if (best_ni) {
1365                         ni_is_pref = lnet_peer_is_pref_nid_locked(lpni,
1366                                                                 best_ni->ni_nid);
1367                         CDEBUG(D_NET, "%s ni_is_pref = %d\n",
1368                                libcfs_nid2str(best_ni->ni_nid), ni_is_pref);
1369                 } else {
1370                         ni_is_pref = false;
1371                 }
1372
1373                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1374
1375                 if (best_lpni)
1376                         CDEBUG(D_NET, "%s c:[%d, %d], s:[%d, %d]\n",
1377                                 libcfs_nid2str(lpni->lpni_nid),
1378                                 lpni->lpni_txcredits, best_lpni_credits,
1379                                 lpni->lpni_seq, best_lpni->lpni_seq);
1380
1381                 /* pick the healthiest peer ni */
1382                 if (lpni_healthv < best_lpni_healthv) {
1383                         continue;
1384                 } else if (lpni_healthv > best_lpni_healthv) {
1385                         best_lpni_healthv = lpni_healthv;
1386                 /* if this is a preferred peer use it */
1387                 } else if (!preferred && ni_is_pref) {
1388                         preferred = true;
1389                 } else if (preferred && !ni_is_pref) {
1390                         /*
1391                          * this is not the preferred peer so let's ignore
1392                          * it.
1393                          */
1394                         continue;
1395                 } else if (lpni->lpni_txcredits < best_lpni_credits) {
1396                         /*
1397                          * We already have a peer that has more credits
1398                          * available than this one. No need to consider
1399                          * this peer further.
1400                          */
1401                         continue;
1402                 } else if (lpni->lpni_txcredits == best_lpni_credits) {
1403                         /*
1404                          * The best peer found so far and the current peer
1405                          * have the same number of available credits let's
1406                          * make sure to select between them using Round
1407                          * Robin
1408                          */
1409                         if (best_lpni) {
1410                                 if (best_lpni->lpni_seq <= lpni->lpni_seq)
1411                                         continue;
1412                         }
1413                 }
1414
1415                 best_lpni = lpni;
1416                 best_lpni_credits = lpni->lpni_txcredits;
1417         }
1418
1419         /* if we still can't find a peer ni then we can't reach it */
1420         if (!best_lpni) {
1421                 __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1422                         LNET_NIDNET(dst_nid);
1423                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1424                                 libcfs_net2str(net_id));
1425                 return NULL;
1426         }
1427
1428         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1429                libcfs_nid2str(best_lpni->lpni_nid));
1430
1431         return best_lpni;
1432 }
1433
1434 /*
1435  * Prerequisite: the best_ni should already be set in the sd
1436  */
1437 static inline struct lnet_peer_ni *
1438 lnet_find_best_lpni_on_net(struct lnet_send_data *sd, struct lnet_peer *peer,
1439                            __u32 net_id)
1440 {
1441         struct lnet_peer_net *peer_net;
1442
1443         /*
1444          * The gateway is Multi-Rail capable so now we must select the
1445          * proper peer_ni
1446          */
1447         peer_net = lnet_peer_get_net_locked(peer, net_id);
1448
1449         if (!peer_net) {
1450                 CERROR("gateway peer %s has no NI on net %s\n",
1451                        libcfs_nid2str(peer->lp_primary_nid),
1452                        libcfs_net2str(net_id));
1453                 return NULL;
1454         }
1455
1456         return lnet_select_peer_ni(sd->sd_best_ni, sd->sd_dst_nid,
1457                                    peer, peer_net);
1458 }
1459
1460 static int
1461 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2,
1462                     struct lnet_peer_ni **best_lpni)
1463 {
1464         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1465         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1466         struct lnet_peer *lp1 = r1->lr_gateway;
1467         struct lnet_peer *lp2 = r2->lr_gateway;
1468         struct lnet_peer_ni *lpni1;
1469         struct lnet_peer_ni *lpni2;
1470         struct lnet_send_data sd;
1471         int rc;
1472
1473         sd.sd_best_ni = NULL;
1474         sd.sd_dst_nid = LNET_NID_ANY;
1475         lpni1 = lnet_find_best_lpni_on_net(&sd, lp1, r1->lr_lnet);
1476         lpni2 = lnet_find_best_lpni_on_net(&sd, lp2, r2->lr_lnet);
1477         LASSERT(lpni1 && lpni2);
1478
1479         if (r1->lr_priority < r2->lr_priority) {
1480                 *best_lpni = lpni1;
1481                 return 1;
1482         }
1483
1484         if (r1->lr_priority > r2->lr_priority) {
1485                 *best_lpni = lpni2;
1486                 return -1;
1487         }
1488
1489         if (r1_hops < r2_hops) {
1490                 *best_lpni = lpni1;
1491                 return 1;
1492         }
1493
1494         if (r1_hops > r2_hops) {
1495                 *best_lpni = lpni2;
1496                 return -1;
1497         }
1498
1499         rc = lnet_compare_peers(lpni1, lpni2);
1500         if (rc == 1) {
1501                 *best_lpni = lpni1;
1502                 return rc;
1503         } else if (rc == -1) {
1504                 *best_lpni = lpni2;
1505                 return rc;
1506         }
1507
1508         if (r1->lr_seq - r2->lr_seq <= 0) {
1509                 *best_lpni = lpni1;
1510                 return 1;
1511         }
1512
1513         *best_lpni = lpni2;
1514         return -1;
1515 }
1516
1517 static struct lnet_route *
1518 lnet_find_route_locked(struct lnet_net *net, __u32 remote_net,
1519                        struct lnet_route **prev_route,
1520                        struct lnet_peer_ni **gwni)
1521 {
1522         struct lnet_peer_ni *best_gw_ni = NULL;
1523         struct lnet_route *best_route;
1524         struct lnet_route *last_route;
1525         struct lnet_remotenet *rnet;
1526         struct lnet_peer *lp_best;
1527         struct lnet_route *route;
1528         struct lnet_peer *lp;
1529         int rc;
1530
1531         rnet = lnet_find_rnet_locked(remote_net);
1532         if (rnet == NULL)
1533                 return NULL;
1534
1535         lp_best = NULL;
1536         best_route = last_route = NULL;
1537         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1538                 lp = route->lr_gateway;
1539
1540                 if (!lnet_is_route_alive(route))
1541                         continue;
1542
1543                 if (lp_best == NULL) {
1544                         best_route = last_route = route;
1545                         lp_best = lp;
1546                 }
1547
1548                 /* no protection on below fields, but it's harmless */
1549                 if (last_route->lr_seq - route->lr_seq < 0)
1550                         last_route = route;
1551
1552                 rc = lnet_compare_routes(route, best_route, &best_gw_ni);
1553                 if (rc < 0)
1554                         continue;
1555
1556                 best_route = route;
1557                 lp_best = lp;
1558         }
1559
1560         *prev_route = last_route;
1561         *gwni = best_gw_ni;
1562
1563         return best_route;
1564 }
1565
1566 static struct lnet_ni *
1567 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1568                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1569                  int md_cpt)
1570 {
1571         struct lnet_ni *ni = NULL;
1572         unsigned int shortest_distance;
1573         int best_credits;
1574         int best_healthv;
1575
1576         /*
1577          * If there is no peer_ni that we can send to on this network,
1578          * then there is no point in looking for a new best_ni here.
1579         */
1580         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1581                 return best_ni;
1582
1583         if (best_ni == NULL) {
1584                 shortest_distance = UINT_MAX;
1585                 best_credits = INT_MIN;
1586                 best_healthv = 0;
1587         } else {
1588                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1589                                                      best_ni->ni_dev_cpt);
1590                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1591                 best_healthv = atomic_read(&best_ni->ni_healthv);
1592         }
1593
1594         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1595                 unsigned int distance;
1596                 int ni_credits;
1597                 int ni_healthv;
1598                 int ni_fatal;
1599
1600                 ni_credits = atomic_read(&ni->ni_tx_credits);
1601                 ni_healthv = atomic_read(&ni->ni_healthv);
1602                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1603
1604                 /*
1605                  * calculate the distance from the CPT on which
1606                  * the message memory is allocated to the CPT of
1607                  * the NI's physical device
1608                  */
1609                 distance = cfs_cpt_distance(lnet_cpt_table(),
1610                                             md_cpt,
1611                                             ni->ni_dev_cpt);
1612
1613                 CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d] with best_ni %s [c:%d, d:%d, s:%d]\n",
1614                        libcfs_nid2str(ni->ni_nid), ni_credits, distance,
1615                        ni->ni_seq, (best_ni) ? libcfs_nid2str(best_ni->ni_nid)
1616                         : "not seleced", best_credits, shortest_distance,
1617                         (best_ni) ? best_ni->ni_seq : 0);
1618
1619                 /*
1620                  * All distances smaller than the NUMA range
1621                  * are treated equally.
1622                  */
1623                 if (distance < lnet_numa_range)
1624                         distance = lnet_numa_range;
1625
1626                 /*
1627                  * Select on health, shorter distance, available
1628                  * credits, then round-robin.
1629                  */
1630                 if (ni_fatal) {
1631                         continue;
1632                 } else if (ni_healthv < best_healthv) {
1633                         continue;
1634                 } else if (ni_healthv > best_healthv) {
1635                         best_healthv = ni_healthv;
1636                         /*
1637                          * If we're going to prefer this ni because it's
1638                          * the healthiest, then we should set the
1639                          * shortest_distance in the algorithm in case
1640                          * there are multiple NIs with the same health but
1641                          * different distances.
1642                          */
1643                         if (distance < shortest_distance)
1644                                 shortest_distance = distance;
1645                 } else if (distance > shortest_distance) {
1646                         continue;
1647                 } else if (distance < shortest_distance) {
1648                         shortest_distance = distance;
1649                 } else if (ni_credits < best_credits) {
1650                         continue;
1651                 } else if (ni_credits == best_credits) {
1652                         if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1653                                 continue;
1654                 }
1655                 best_ni = ni;
1656                 best_credits = ni_credits;
1657         }
1658
1659         CDEBUG(D_NET, "selected best_ni %s\n",
1660                (best_ni) ? libcfs_nid2str(best_ni->ni_nid) : "no selection");
1661
1662         return best_ni;
1663 }
1664
1665 /*
1666  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1667  * because such traffic is required to perform discovery. We therefore
1668  * exclude all GET and PUT on that portal. We also exclude all ACK and
1669  * REPLY traffic, but that is because the portal is not tracked in the
1670  * message structure for these message types. We could restrict this
1671  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1672  */
1673 static bool
1674 lnet_msg_discovery(struct lnet_msg *msg)
1675 {
1676         if (msg->msg_type == LNET_MSG_PUT) {
1677                 if (msg->msg_hdr.msg.put.ptl_index != LNET_RESERVED_PORTAL)
1678                         return true;
1679         } else if (msg->msg_type == LNET_MSG_GET) {
1680                 if (msg->msg_hdr.msg.get.ptl_index != LNET_RESERVED_PORTAL)
1681                         return true;
1682         }
1683         return false;
1684 }
1685
1686 #define SRC_SPEC        0x0001
1687 #define SRC_ANY         0x0002
1688 #define LOCAL_DST       0x0004
1689 #define REMOTE_DST      0x0008
1690 #define MR_DST          0x0010
1691 #define NMR_DST         0x0020
1692 #define SND_RESP        0x0040
1693
1694 /* The following to defines are used for return codes */
1695 #define REPEAT_SEND     0x1000
1696 #define PASS_THROUGH    0x2000
1697
1698 /* The different cases lnet_select pathway needs to handle */
1699 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1700 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1701 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1702 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1703 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1704 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1705 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1706 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1707
1708 static int
1709 lnet_handle_lo_send(struct lnet_send_data *sd)
1710 {
1711         struct lnet_msg *msg = sd->sd_msg;
1712         int cpt = sd->sd_cpt;
1713
1714         /* No send credit hassles with LOLND */
1715         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1716         msg->msg_hdr.dest_nid = cpu_to_le64(the_lnet.ln_loni->ni_nid);
1717         if (!msg->msg_routing)
1718                 msg->msg_hdr.src_nid =
1719                         cpu_to_le64(the_lnet.ln_loni->ni_nid);
1720         msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
1721         lnet_msg_commit(msg, cpt);
1722         msg->msg_txni = the_lnet.ln_loni;
1723
1724         return LNET_CREDIT_OK;
1725 }
1726
1727 static int
1728 lnet_handle_send(struct lnet_send_data *sd)
1729 {
1730         struct lnet_ni *best_ni = sd->sd_best_ni;
1731         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1732         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1733         struct lnet_msg *msg = sd->sd_msg;
1734         int cpt2;
1735         __u32 send_case = sd->sd_send_case;
1736         int rc;
1737         __u32 routing = send_case & REMOTE_DST;
1738          struct lnet_rsp_tracker *rspt;
1739
1740         /*
1741          * Increment sequence number of the selected peer so that we
1742          * pick the next one in Round Robin.
1743          */
1744         best_lpni->lpni_seq++;
1745
1746         /*
1747          * grab a reference on the peer_ni so it sticks around even if
1748          * we need to drop and relock the lnet_net_lock below.
1749          */
1750         lnet_peer_ni_addref_locked(best_lpni);
1751
1752         /*
1753          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1754          * message. This ensures that we get a CPT that is correct for
1755          * the NI when the NI has been restricted to a subset of all CPTs.
1756          * If the selected CPT differs from the one currently locked, we
1757          * must unlock and relock the lnet_net_lock(), and then check whether
1758          * the configuration has changed. We don't have a hold on the best_ni
1759          * yet, and it may have vanished.
1760          */
1761         cpt2 = lnet_cpt_of_nid_locked(best_lpni->lpni_nid, best_ni);
1762         if (sd->sd_cpt != cpt2) {
1763                 __u32 seq = lnet_get_dlc_seq_locked();
1764                 lnet_net_unlock(sd->sd_cpt);
1765                 sd->sd_cpt = cpt2;
1766                 lnet_net_lock(sd->sd_cpt);
1767                 if (seq != lnet_get_dlc_seq_locked()) {
1768                         lnet_peer_ni_decref_locked(best_lpni);
1769                         return REPEAT_SEND;
1770                 }
1771         }
1772
1773         /*
1774          * store the best_lpni in the message right away to avoid having
1775          * to do the same operation under different conditions
1776          */
1777         msg->msg_txpeer = best_lpni;
1778         msg->msg_txni = best_ni;
1779
1780         /*
1781          * grab a reference for the best_ni since now it's in use in this
1782          * send. The reference will be dropped in lnet_finalize()
1783          */
1784         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1785
1786         /*
1787          * Always set the target.nid to the best peer picked. Either the
1788          * NID will be one of the peer NIDs selected, or the same NID as
1789          * what was originally set in the target or it will be the NID of
1790          * a router if this message should be routed
1791          */
1792         msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1793
1794         /*
1795          * lnet_msg_commit assigns the correct cpt to the message, which
1796          * is used to decrement the correct refcount on the ni when it's
1797          * time to return the credits
1798          */
1799         lnet_msg_commit(msg, sd->sd_cpt);
1800
1801         /*
1802          * If we are routing the message then we keep the src_nid that was
1803          * set by the originator. If we are not routing then we are the
1804          * originator and set it here.
1805          */
1806         if (!msg->msg_routing)
1807                 msg->msg_hdr.src_nid = cpu_to_le64(msg->msg_txni->ni_nid);
1808
1809         if (routing) {
1810                 msg->msg_target_is_router = 1;
1811                 msg->msg_target.pid = LNET_PID_LUSTRE;
1812                 /*
1813                  * since we're routing we want to ensure that the
1814                  * msg_hdr.dest_nid is set to the final destination. When
1815                  * the router receives this message it knows how to route
1816                  * it.
1817                  *
1818                  * final_dst_lpni is set at the beginning of the
1819                  * lnet_select_pathway() function and is never changed.
1820                  * It's safe to use it here.
1821                  */
1822                 msg->msg_hdr.dest_nid = cpu_to_le64(final_dst_lpni->lpni_nid);
1823         } else {
1824                 /*
1825                  * if we're not routing set the dest_nid to the best peer
1826                  * ni NID that we picked earlier in the algorithm.
1827                  */
1828                 msg->msg_hdr.dest_nid = cpu_to_le64(msg->msg_txpeer->lpni_nid);
1829         }
1830
1831         /*
1832          * if we have response tracker block update it with the next hop
1833          * nid
1834          */
1835         if (msg->msg_md) {
1836                 rspt = msg->msg_md->md_rspt_ptr;
1837                 if (rspt) {
1838                         rspt->rspt_next_hop_nid = msg->msg_txpeer->lpni_nid;
1839                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1840                                libcfs_nid2str(rspt->rspt_next_hop_nid));
1841                 }
1842         }
1843
1844         rc = lnet_post_send_locked(msg, 0);
1845
1846         if (!rc)
1847                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
1848                        libcfs_nid2str(msg->msg_hdr.src_nid),
1849                        libcfs_nid2str(msg->msg_txni->ni_nid),
1850                        libcfs_nid2str(sd->sd_src_nid),
1851                        libcfs_nid2str(msg->msg_hdr.dest_nid),
1852                        libcfs_nid2str(sd->sd_dst_nid),
1853                        libcfs_nid2str(msg->msg_txpeer->lpni_nid),
1854                        libcfs_nid2str(sd->sd_rtr_nid),
1855                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
1856
1857         return rc;
1858 }
1859
1860 static inline void
1861 lnet_set_non_mr_pref_nid(struct lnet_send_data *sd)
1862 {
1863         if (sd->sd_send_case & NMR_DST &&
1864             sd->sd_msg->msg_type != LNET_MSG_REPLY &&
1865             sd->sd_msg->msg_type != LNET_MSG_ACK &&
1866             sd->sd_best_lpni->lpni_pref_nnids == 0) {
1867                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
1868                        libcfs_nid2str(sd->sd_best_ni->ni_nid),
1869                        libcfs_nid2str(sd->sd_best_lpni->lpni_nid));
1870                 lnet_peer_ni_set_non_mr_pref_nid(sd->sd_best_lpni,
1871                                                  sd->sd_best_ni->ni_nid);
1872         }
1873 }
1874
1875 /*
1876  * Source Specified
1877  * Local Destination
1878  * non-mr peer
1879  *
1880  * use the source and destination NIDs as the pathway
1881  */
1882 static int
1883 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
1884 {
1885         /* the destination lpni is set before we get here. */
1886
1887         /* find local NI */
1888         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
1889         if (!sd->sd_best_ni) {
1890                 CERROR("Can't send to %s: src %s is not a "
1891                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
1892                                 libcfs_nid2str(sd->sd_src_nid));
1893                 return -EINVAL;
1894         }
1895
1896         /*
1897          * the preferred NID will only be set for NMR peers
1898          */
1899         lnet_set_non_mr_pref_nid(sd);
1900
1901         return lnet_handle_send(sd);
1902 }
1903
1904 /*
1905  * Source Specified
1906  * Local Destination
1907  * MR Peer
1908  *
1909  * Don't run the selection algorithm on the peer NIs. By specifying the
1910  * local NID, we're also saying that we should always use the destination NID
1911  * provided. This handles the case where we should be using the same
1912  * destination NID for the all the messages which belong to the same RPC
1913  * request.
1914  */
1915 static int
1916 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
1917 {
1918         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
1919         if (!sd->sd_best_ni) {
1920                 CERROR("Can't send to %s: src %s is not a "
1921                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
1922                                 libcfs_nid2str(sd->sd_src_nid));
1923                 return -EINVAL;
1924         }
1925
1926         if (sd->sd_best_lpni &&
1927             sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid)
1928                 return lnet_handle_lo_send(sd);
1929         else if (sd->sd_best_lpni)
1930                 return lnet_handle_send(sd);
1931
1932         CERROR("can't send to %s. no NI on %s\n",
1933                libcfs_nid2str(sd->sd_dst_nid),
1934                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
1935
1936         return -EHOSTUNREACH;
1937 }
1938
1939 struct lnet_ni *
1940 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
1941                               struct lnet_peer *peer,
1942                               struct lnet_peer_net *peer_net,
1943                               int cpt,
1944                               bool incr_seq)
1945 {
1946         struct lnet_net *local_net;
1947         struct lnet_ni *best_ni;
1948
1949         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
1950         if (!local_net)
1951                 return NULL;
1952
1953         /*
1954          * Iterate through the NIs in this local Net and select
1955          * the NI to send from. The selection is determined by
1956          * these 3 criterion in the following priority:
1957          *      1. NUMA
1958          *      2. NI available credits
1959          *      3. Round Robin
1960          */
1961         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
1962                                    peer, peer_net, cpt);
1963
1964         if (incr_seq && best_ni)
1965                 best_ni->ni_seq++;
1966
1967         return best_ni;
1968 }
1969
1970 static int
1971 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni,
1972                              struct lnet_msg *msg, lnet_nid_t rtr_nid,
1973                              int cpt)
1974 {
1975         struct lnet_peer *peer;
1976         lnet_nid_t primary_nid;
1977         int rc;
1978
1979         lnet_peer_ni_addref_locked(lpni);
1980
1981         peer = lpni->lpni_peer_net->lpn_peer;
1982
1983         if (lnet_peer_gw_discovery(peer)) {
1984                 lnet_peer_ni_decref_locked(lpni);
1985                 return 0;
1986         }
1987
1988         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
1989                 lnet_peer_ni_decref_locked(lpni);
1990                 return 0;
1991         }
1992
1993         rc = lnet_discover_peer_locked(lpni, cpt, false);
1994         if (rc) {
1995                 lnet_peer_ni_decref_locked(lpni);
1996                 return rc;
1997         }
1998         /* The peer may have changed. */
1999         peer = lpni->lpni_peer_net->lpn_peer;
2000         spin_lock(&peer->lp_lock);
2001         if (lnet_peer_is_uptodate_locked(peer)) {
2002                 spin_unlock(&peer->lp_lock);
2003                 lnet_peer_ni_decref_locked(lpni);
2004                 return 0;
2005         }
2006         /* queue message and return */
2007         msg->msg_rtr_nid_param = rtr_nid;
2008         msg->msg_sending = 0;
2009         msg->msg_txpeer = NULL;
2010         list_add_tail(&msg->msg_list, &peer->lp_dc_pendq);
2011         primary_nid = peer->lp_primary_nid;
2012         spin_unlock(&peer->lp_lock);
2013
2014         lnet_peer_ni_decref_locked(lpni);
2015
2016         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2017                 msg, libcfs_nid2str(primary_nid));
2018
2019         return LNET_DC_WAIT;
2020 }
2021
2022 static int
2023 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2024                              lnet_nid_t dst_nid,
2025                              struct lnet_peer_ni **gw_lpni,
2026                              struct lnet_peer **gw_peer)
2027 {
2028         int rc;
2029         __u32 local_lnet;
2030         struct lnet_peer *gw;
2031         struct lnet_peer *lp;
2032         struct lnet_peer_net *lpn;
2033         struct lnet_peer_net *best_lpn = NULL;
2034         struct lnet_remotenet *rnet;
2035         struct lnet_route *best_route = NULL;
2036         struct lnet_route *last_route = NULL;
2037         struct lnet_peer_ni *lpni = NULL;
2038         struct lnet_peer_ni *gwni = NULL;
2039         lnet_nid_t src_nid = sd->sd_src_nid;
2040
2041         /* If a router nid was specified then we are replying to a GET or
2042          * sending an ACK. In this case we use the gateway associated with the
2043          * specified router nid.
2044          */
2045         if (sd->sd_rtr_nid != LNET_NID_ANY) {
2046                 gwni = lnet_find_peer_ni_locked(sd->sd_rtr_nid);
2047                 if (!gwni) {
2048                         CERROR("No peer NI for gateway %s\n",
2049                                libcfs_nid2str(sd->sd_rtr_nid));
2050                         return -EHOSTUNREACH;
2051                 }
2052                 gw = gwni->lpni_peer_net->lpn_peer;
2053                 lnet_peer_ni_decref_locked(gwni);
2054                 local_lnet = LNET_NIDNET(sd->sd_rtr_nid);
2055         } else {
2056                 /* we've already looked up the initial lpni using dst_nid */
2057                 lpni = sd->sd_best_lpni;
2058                 /* the peer tree must be in existence */
2059                 LASSERT(lpni && lpni->lpni_peer_net &&
2060                         lpni->lpni_peer_net->lpn_peer);
2061                 lp = lpni->lpni_peer_net->lpn_peer;
2062
2063                 list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2064                         /* is this remote network reachable?  */
2065                         rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2066                         if (!rnet)
2067                                 continue;
2068
2069                         if (!best_lpn)
2070                                 best_lpn = lpn;
2071
2072                         if (best_lpn->lpn_seq <= lpn->lpn_seq)
2073                                 continue;
2074
2075                         best_lpn = lpn;
2076                 }
2077
2078                 if (!best_lpn) {
2079                         CERROR("peer %s has no available nets\n",
2080                                libcfs_nid2str(sd->sd_dst_nid));
2081                         return -EHOSTUNREACH;
2082                 }
2083
2084                 sd->sd_best_lpni = lnet_find_best_lpni_on_net(sd, lp, best_lpn->lpn_net_id);
2085                 if (!sd->sd_best_lpni) {
2086                         CERROR("peer %s down\n",
2087                                libcfs_nid2str(sd->sd_dst_nid));
2088                         return -EHOSTUNREACH;
2089                 }
2090
2091                 best_route = lnet_find_route_locked(NULL, best_lpn->lpn_net_id,
2092                                                     &last_route, &gwni);
2093                 if (!best_route) {
2094                         CERROR("no route to %s from %s\n",
2095                                libcfs_nid2str(dst_nid),
2096                                libcfs_nid2str(src_nid));
2097                         return -EHOSTUNREACH;
2098                 }
2099
2100                 if (!gwni) {
2101                         CERROR("Internal Error. Route expected to %s from %s\n",
2102                                libcfs_nid2str(dst_nid),
2103                                libcfs_nid2str(src_nid));
2104                         return -EFAULT;
2105                 }
2106
2107                 gw = best_route->lr_gateway;
2108                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2109                 local_lnet = best_route->lr_lnet;
2110
2111         }
2112
2113         /*
2114          * Discover this gateway if it hasn't already been discovered.
2115          * This means we might delay the message until discovery has
2116          * completed
2117          */
2118         sd->sd_msg->msg_src_nid_param = sd->sd_src_nid;
2119         rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_rtr_nid,
2120                                           sd->sd_cpt);
2121         if (rc)
2122                 return rc;
2123
2124         if (!sd->sd_best_ni)
2125                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw,
2126                                         lnet_peer_get_net_locked(gw,
2127                                                                  local_lnet),
2128                                         sd->sd_md_cpt,
2129                                         true);
2130
2131         if (!sd->sd_best_ni) {
2132                 CERROR("Internal Error. Expected local ni on %s but non found :%s\n",
2133                        libcfs_net2str(local_lnet),
2134                        libcfs_nid2str(sd->sd_src_nid));
2135                 return -EFAULT;
2136         }
2137
2138         *gw_lpni = gwni;
2139         *gw_peer = gw;
2140
2141         /*
2142          * increment the sequence numbers since now we're sure we're
2143          * going to use this path
2144          */
2145         if (sd->sd_rtr_nid == LNET_NID_ANY) {
2146                 LASSERT(best_route && last_route);
2147                 best_route->lr_seq = last_route->lr_seq + 1;
2148                 best_lpn->lpn_seq++;
2149         }
2150
2151         return 0;
2152 }
2153
2154 /*
2155  * Handle two cases:
2156  *
2157  * Case 1:
2158  *  Source specified
2159  *  Remote destination
2160  *  Non-MR destination
2161  *
2162  * Case 2:
2163  *  Source specified
2164  *  Remote destination
2165  *  MR destination
2166  *
2167  * The handling of these two cases is similar. Even though the destination
2168  * can be MR or non-MR, we'll deal directly with the router.
2169  */
2170 static int
2171 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2172 {
2173         int rc;
2174         struct lnet_peer_ni *gw_lpni = NULL;
2175         struct lnet_peer *gw_peer = NULL;
2176
2177         /* find local NI */
2178         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
2179         if (!sd->sd_best_ni) {
2180                 CERROR("Can't send to %s: src %s is not a "
2181                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
2182                                 libcfs_nid2str(sd->sd_src_nid));
2183                 return -EINVAL;
2184         }
2185
2186         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2187                                      &gw_peer);
2188         if (rc)
2189                 return rc;
2190
2191         if (sd->sd_send_case & NMR_DST)
2192                 /*
2193                 * since the final destination is non-MR let's set its preferred
2194                 * NID before we send
2195                 */
2196                 lnet_set_non_mr_pref_nid(sd);
2197
2198         /*
2199          * We're going to send to the gw found so let's set its
2200          * info
2201          */
2202         sd->sd_peer = gw_peer;
2203         sd->sd_best_lpni = gw_lpni;
2204
2205         return lnet_handle_send(sd);
2206 }
2207
2208 struct lnet_ni *
2209 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2210                                bool discovery)
2211 {
2212         struct lnet_peer_net *peer_net = NULL;
2213         struct lnet_ni *best_ni = NULL;
2214
2215         /*
2216          * The peer can have multiple interfaces, some of them can be on
2217          * the local network and others on a routed network. We should
2218          * prefer the local network. However if the local network is not
2219          * available then we need to try the routed network
2220          */
2221
2222         /* go through all the peer nets and find the best_ni */
2223         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
2224                 /*
2225                  * The peer's list of nets can contain non-local nets. We
2226                  * want to only examine the local ones.
2227                  */
2228                 if (!lnet_get_net_locked(peer_net->lpn_net_id))
2229                         continue;
2230                 best_ni = lnet_find_best_ni_on_spec_net(best_ni, peer,
2231                                                    peer_net, md_cpt, false);
2232
2233                 /*
2234                  * if this is a discovery message and lp_disc_net_id is
2235                  * specified then use that net to send the discovery on.
2236                  */
2237                 if (peer->lp_disc_net_id == peer_net->lpn_net_id &&
2238                     discovery)
2239                         break;
2240         }
2241
2242         if (best_ni)
2243                 /* increment sequence number so we can round robin */
2244                 best_ni->ni_seq++;
2245
2246         return best_ni;
2247 }
2248
2249 static struct lnet_ni *
2250 lnet_find_existing_preferred_best_ni(struct lnet_send_data *sd)
2251 {
2252         struct lnet_ni *best_ni = NULL;
2253         struct lnet_peer_net *peer_net;
2254         struct lnet_peer *peer = sd->sd_peer;
2255         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
2256         struct lnet_peer_ni *lpni;
2257         int cpt = sd->sd_cpt;
2258
2259         /*
2260          * We must use a consistent source address when sending to a
2261          * non-MR peer. However, a non-MR peer can have multiple NIDs
2262          * on multiple networks, and we may even need to talk to this
2263          * peer on multiple networks -- certain types of
2264          * load-balancing configuration do this.
2265          *
2266          * So we need to pick the NI the peer prefers for this
2267          * particular network.
2268          */
2269
2270         /* Get the target peer_ni */
2271         peer_net = lnet_peer_get_net_locked(peer,
2272                         LNET_NIDNET(best_lpni->lpni_nid));
2273         LASSERT(peer_net != NULL);
2274         list_for_each_entry(lpni, &peer_net->lpn_peer_nis,
2275                                 lpni_peer_nis) {
2276                 if (lpni->lpni_pref_nnids == 0)
2277                         continue;
2278                 LASSERT(lpni->lpni_pref_nnids == 1);
2279                 best_ni = lnet_nid2ni_locked(
2280                                 lpni->lpni_pref.nid, cpt);
2281                 break;
2282         }
2283
2284         return best_ni;
2285 }
2286
2287 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2288 static int
2289 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2290 {
2291         struct lnet_ni *best_ni = NULL;
2292         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
2293
2294         /*
2295          * We must use a consistent source address when sending to a
2296          * non-MR peer. However, a non-MR peer can have multiple NIDs
2297          * on multiple networks, and we may even need to talk to this
2298          * peer on multiple networks -- certain types of
2299          * load-balancing configuration do this.
2300          *
2301          * So we need to pick the NI the peer prefers for this
2302          * particular network.
2303          */
2304
2305         best_ni = lnet_find_existing_preferred_best_ni(sd);
2306
2307         /* if best_ni is still not set just pick one */
2308         if (!best_ni) {
2309                 best_ni =
2310                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2311                                                 sd->sd_best_lpni->lpni_peer_net,
2312                                                 sd->sd_md_cpt, true);
2313                 /* If there is no best_ni we don't have a route */
2314                 if (!best_ni) {
2315                         CERROR("no path to %s from net %s\n",
2316                                 libcfs_nid2str(best_lpni->lpni_nid),
2317                                 libcfs_net2str(best_lpni->lpni_net->net_id));
2318                         return -EHOSTUNREACH;
2319                 }
2320         }
2321
2322         sd->sd_best_ni = best_ni;
2323
2324         /* Set preferred NI if necessary. */
2325         lnet_set_non_mr_pref_nid(sd);
2326
2327         return 0;
2328 }
2329
2330
2331 /*
2332  * Source not specified
2333  * Local destination
2334  * Non-MR Peer
2335  *
2336  * always use the same source NID for NMR peers
2337  * If we've talked to that peer before then we already have a preferred
2338  * source NI associated with it. Otherwise, we select a preferred local NI
2339  * and store it in the peer
2340  */
2341 static int
2342 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2343 {
2344         int rc;
2345
2346         /* sd->sd_best_lpni is already set to the final destination */
2347
2348         /*
2349          * At this point we should've created the peer ni and peer. If we
2350          * can't find it, then something went wrong. Instead of assert
2351          * output a relevant message and fail the send
2352          */
2353         if (!sd->sd_best_lpni) {
2354                 CERROR("Internal fault. Unable to send msg %s to %s. "
2355                        "NID not known\n",
2356                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2357                        libcfs_nid2str(sd->sd_dst_nid));
2358                 return -EFAULT;
2359         }
2360
2361         rc = lnet_select_preferred_best_ni(sd);
2362         if (!rc)
2363                 rc = lnet_handle_send(sd);
2364
2365         return rc;
2366 }
2367
2368 static int
2369 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2370 {
2371         /*
2372          * NOTE we've already handled the remote peer case. So we only
2373          * need to worry about the local case here.
2374          *
2375          * if we're sending a response, ACK or reply, we need to send it
2376          * to the destination NID given to us. At this point we already
2377          * have the peer_ni we're suppose to send to, so just find the
2378          * best_ni on the peer net and use that. Since we're sending to an
2379          * MR peer then we can just run the selection algorithm on our
2380          * local NIs and pick the best one.
2381          */
2382         if (sd->sd_send_case & SND_RESP) {
2383                 sd->sd_best_ni =
2384                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2385                                                 sd->sd_best_lpni->lpni_peer_net,
2386                                                 sd->sd_md_cpt, true);
2387
2388                 if (!sd->sd_best_ni) {
2389                         /*
2390                          * We're not going to deal with not able to send
2391                          * a response to the provided final destination
2392                          */
2393                         CERROR("Can't send response to %s. "
2394                                "No local NI available\n",
2395                                 libcfs_nid2str(sd->sd_dst_nid));
2396                         return -EHOSTUNREACH;
2397                 }
2398
2399                 return lnet_handle_send(sd);
2400         }
2401
2402         /*
2403          * If we get here that means we're sending a fresh request, PUT or
2404          * GET, so we need to run our standard selection algorithm.
2405          * First find the best local interface that's on any of the peer's
2406          * networks.
2407          */
2408         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2409                                         sd->sd_md_cpt,
2410                                         lnet_msg_discovery(sd->sd_msg));
2411         if (sd->sd_best_ni) {
2412                 sd->sd_best_lpni =
2413                   lnet_find_best_lpni_on_net(sd, sd->sd_peer,
2414                                              sd->sd_best_ni->ni_net->net_id);
2415
2416                 /*
2417                  * if we're successful in selecting a peer_ni on the local
2418                  * network, then send to it. Otherwise fall through and
2419                  * try and see if we can reach it over another routed
2420                  * network
2421                  */
2422                 if (sd->sd_best_lpni &&
2423                     sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid) {
2424                         /*
2425                          * in case we initially started with a routed
2426                          * destination, let's reset to local
2427                          */
2428                         sd->sd_send_case &= ~REMOTE_DST;
2429                         sd->sd_send_case |= LOCAL_DST;
2430                         return lnet_handle_lo_send(sd);
2431                 } else if (sd->sd_best_lpni) {
2432                         /*
2433                          * in case we initially started with a routed
2434                          * destination, let's reset to local
2435                          */
2436                         sd->sd_send_case &= ~REMOTE_DST;
2437                         sd->sd_send_case |= LOCAL_DST;
2438                         return lnet_handle_send(sd);
2439                 }
2440
2441                 CERROR("Internal Error. Expected to have a best_lpni: "
2442                        "%s -> %s\n",
2443                        libcfs_nid2str(sd->sd_src_nid),
2444                        libcfs_nid2str(sd->sd_dst_nid));
2445
2446                 return -EFAULT;
2447         }
2448
2449         /*
2450          * Peer doesn't have a local network. Let's see if there is
2451          * a remote network we can reach it on.
2452          */
2453         return PASS_THROUGH;
2454 }
2455
2456 /*
2457  * Case 1:
2458  *      Source NID not specified
2459  *      Local destination
2460  *      MR peer
2461  *
2462  * Case 2:
2463  *      Source NID not speified
2464  *      Remote destination
2465  *      MR peer
2466  *
2467  * In both of these cases if we're sending a response, ACK or REPLY, then
2468  * we need to send to the destination NID provided.
2469  *
2470  * In the remote case let's deal with MR routers.
2471  *
2472  */
2473
2474 static int
2475 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2476 {
2477         int rc = 0;
2478         struct lnet_peer *gw_peer = NULL;
2479         struct lnet_peer_ni *gw_lpni = NULL;
2480
2481         /*
2482          * handle sending a response to a remote peer here so we don't
2483          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2484          */
2485         if (sd->sd_send_case & REMOTE_DST &&
2486             sd->sd_send_case & SND_RESP) {
2487                 struct lnet_peer_ni *gw;
2488                 struct lnet_peer *gw_peer;
2489
2490                 rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw,
2491                                                   &gw_peer);
2492                 if (rc < 0) {
2493                         CERROR("Can't send response to %s. "
2494                                "No route available\n",
2495                                 libcfs_nid2str(sd->sd_dst_nid));
2496                         return -EHOSTUNREACH;
2497                 } else if (rc > 0) {
2498                         return rc;
2499                 }
2500
2501                 sd->sd_best_lpni = gw;
2502                 sd->sd_peer = gw_peer;
2503
2504                 return lnet_handle_send(sd);
2505         }
2506
2507         /*
2508          * Even though the NID for the peer might not be on a local network,
2509          * since the peer is MR there could be other interfaces on the
2510          * local network. In that case we'd still like to prefer the local
2511          * network over the routed network. If we're unable to do that
2512          * then we select the best router among the different routed networks,
2513          * and if the router is MR then we can deal with it as such.
2514          */
2515         rc = lnet_handle_any_mr_dsta(sd);
2516         if (rc != PASS_THROUGH)
2517                 return rc;
2518
2519         /*
2520          * Now that we must route to the destination, we must consider the
2521          * MR case, where the destination has multiple interfaces, some of
2522          * which we can route to and others we do not. For this reason we
2523          * need to select the destination which we can route to and if
2524          * there are multiple, we need to round robin.
2525          */
2526         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2527                                           &gw_peer);
2528         if (rc)
2529                 return rc;
2530
2531         sd->sd_send_case &= ~LOCAL_DST;
2532         sd->sd_send_case |= REMOTE_DST;
2533
2534         sd->sd_peer = gw_peer;
2535         sd->sd_best_lpni = gw_lpni;
2536
2537         return lnet_handle_send(sd);
2538 }
2539
2540 /*
2541  * Source not specified
2542  * Remote destination
2543  * Non-MR peer
2544  *
2545  * Must send to the specified peer NID using the same source NID that
2546  * we've used before. If it's the first time to talk to that peer then
2547  * find the source NI and assign it as preferred to that peer
2548  */
2549 static int
2550 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2551 {
2552         int rc;
2553         struct lnet_peer_ni *gw_lpni = NULL;
2554         struct lnet_peer *gw_peer = NULL;
2555
2556         /*
2557          * Let's set if we have a preferred NI to talk to this NMR peer
2558          */
2559         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd);
2560
2561         /*
2562          * find the router and that'll find the best NI if we didn't find
2563          * it already.
2564          */
2565         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2566                                           &gw_peer);
2567         if (rc)
2568                 return rc;
2569
2570         /*
2571          * set the best_ni we've chosen as the preferred one for
2572          * this peer
2573          */
2574         lnet_set_non_mr_pref_nid(sd);
2575
2576         /* we'll be sending to the gw */
2577         sd->sd_best_lpni = gw_lpni;
2578         sd->sd_peer = gw_peer;
2579
2580         return lnet_handle_send(sd);
2581 }
2582
2583 static int
2584 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2585 {
2586         /*
2587          * turn off the SND_RESP bit.
2588          * It will be checked in the case handling
2589          */
2590         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2591
2592         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2593                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2594                 (send_case & SRC_SPEC) ? libcfs_nid2str(sd->sd_src_nid) : "",
2595                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2596                 libcfs_nid2str(sd->sd_dst_nid),
2597                 (send_case & LOCAL_DST) ? "local" : "routed");
2598
2599         switch (send_case) {
2600         /*
2601          * For all cases where the source is specified, we should always
2602          * use the destination NID, whether it's an MR destination or not,
2603          * since we're continuing a series of related messages for the
2604          * same RPC
2605          */
2606         case SRC_SPEC_LOCAL_NMR_DST:
2607                 return lnet_handle_spec_local_nmr_dst(sd);
2608         case SRC_SPEC_LOCAL_MR_DST:
2609                 return lnet_handle_spec_local_mr_dst(sd);
2610         case SRC_SPEC_ROUTER_NMR_DST:
2611         case SRC_SPEC_ROUTER_MR_DST:
2612                 return lnet_handle_spec_router_dst(sd);
2613         case SRC_ANY_LOCAL_NMR_DST:
2614                 return lnet_handle_any_local_nmr_dst(sd);
2615         case SRC_ANY_LOCAL_MR_DST:
2616         case SRC_ANY_ROUTER_MR_DST:
2617                 return lnet_handle_any_mr_dst(sd);
2618         case SRC_ANY_ROUTER_NMR_DST:
2619                 return lnet_handle_any_router_nmr_dst(sd);
2620         default:
2621                 CERROR("Unknown send case\n");
2622                 return -1;
2623         }
2624 }
2625
2626 static int
2627 lnet_select_pathway(lnet_nid_t src_nid, lnet_nid_t dst_nid,
2628                     struct lnet_msg *msg, lnet_nid_t rtr_nid)
2629 {
2630         struct lnet_peer_ni     *lpni;
2631         struct lnet_peer        *peer;
2632         struct lnet_send_data   send_data;
2633         int                     cpt, rc;
2634         int                     md_cpt;
2635         __u32                   send_case = 0;
2636
2637         memset(&send_data, 0, sizeof(send_data));
2638
2639         /*
2640          * get an initial CPT to use for locking. The idea here is not to
2641          * serialize the calls to select_pathway, so that as many
2642          * operations can run concurrently as possible. To do that we use
2643          * the CPT where this call is being executed. Later on when we
2644          * determine the CPT to use in lnet_message_commit, we switch the
2645          * lock and check if there was any configuration change.  If none,
2646          * then we proceed, if there is, then we restart the operation.
2647          */
2648         cpt = lnet_net_lock_current();
2649
2650         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
2651         if (md_cpt == CFS_CPT_ANY)
2652                 md_cpt = cpt;
2653
2654 again:
2655
2656         /*
2657          * If we're sending to ourselves then there is no need to go through
2658          * any selection. We can shortcut the entire process and send over
2659          * lolnd.
2660          *
2661          * However, we make two exceptions to this rule:
2662          * 1. If the src_nid is specified then our API defines that we must send
2663          *    via that interface.
2664          * 2. Recovery messages must be sent to the lnet_ni that is being
2665          *    recovered.
2666          */
2667         send_data.sd_msg = msg;
2668         send_data.sd_cpt = cpt;
2669         if (src_nid == LNET_NID_ANY && !msg->msg_recovery &&
2670             lnet_nid2ni_locked(dst_nid, cpt)) {
2671                 rc = lnet_handle_lo_send(&send_data);
2672                 lnet_net_unlock(cpt);
2673                 return rc;
2674         }
2675
2676         /*
2677          * find an existing peer_ni, or create one and mark it as having been
2678          * created due to network traffic. This call will create the
2679          * peer->peer_net->peer_ni tree.
2680          */
2681         lpni = lnet_nid2peerni_locked(dst_nid, LNET_NID_ANY, cpt);
2682         if (IS_ERR(lpni)) {
2683                 lnet_net_unlock(cpt);
2684                 return PTR_ERR(lpni);
2685         }
2686
2687         /*
2688          * Cache the original src_nid. If we need to resend the message
2689          * then we'll need to know whether the src_nid was originally
2690          * specified for this message. If it was originally specified,
2691          * then we need to keep using the same src_nid since it's
2692          * continuing the same sequence of messages.
2693          */
2694         msg->msg_src_nid_param = src_nid;
2695
2696         /*
2697          * If necessary, perform discovery on the peer that owns this peer_ni.
2698          * Note, this can result in the ownership of this peer_ni changing
2699          * to another peer object.
2700          */
2701         rc = lnet_initiate_peer_discovery(lpni, msg, rtr_nid, cpt);
2702         if (rc) {
2703                 lnet_peer_ni_decref_locked(lpni);
2704                 lnet_net_unlock(cpt);
2705                 return rc;
2706         }
2707         lnet_peer_ni_decref_locked(lpni);
2708
2709         peer = lpni->lpni_peer_net->lpn_peer;
2710
2711         /*
2712          * Identify the different send cases
2713          */
2714         if (src_nid == LNET_NID_ANY)
2715                 send_case |= SRC_ANY;
2716         else
2717                 send_case |= SRC_SPEC;
2718
2719         if (lnet_get_net_locked(LNET_NIDNET(dst_nid)))
2720                 send_case |= LOCAL_DST;
2721         else
2722                 send_case |= REMOTE_DST;
2723
2724         /*
2725          * if this is a non-MR peer or if we're recovering a peer ni then
2726          * let's consider this an NMR case so we can hit the destination
2727          * NID.
2728          */
2729         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery)
2730                 send_case |= NMR_DST;
2731         else
2732                 send_case |= MR_DST;
2733
2734         if (msg->msg_type == LNET_MSG_REPLY ||
2735             msg->msg_type == LNET_MSG_ACK)
2736                 send_case |= SND_RESP;
2737
2738         /* assign parameters to the send_data */
2739         send_data.sd_rtr_nid = rtr_nid;
2740         send_data.sd_src_nid = src_nid;
2741         send_data.sd_dst_nid = dst_nid;
2742         send_data.sd_best_lpni = lpni;
2743         /*
2744          * keep a pointer to the final destination in case we're going to
2745          * route, so we'll need to access it later
2746          */
2747         send_data.sd_final_dst_lpni = lpni;
2748         send_data.sd_peer = peer;
2749         send_data.sd_md_cpt = md_cpt;
2750         send_data.sd_send_case = send_case;
2751
2752         rc = lnet_handle_send_case_locked(&send_data);
2753
2754         /*
2755          * Update the local cpt since send_data.sd_cpt might've been
2756          * updated as a result of calling lnet_handle_send_case_locked().
2757          */
2758         cpt = send_data.sd_cpt;
2759
2760         if (rc == REPEAT_SEND)
2761                 goto again;
2762
2763         lnet_net_unlock(cpt);
2764
2765         return rc;
2766 }
2767
2768 int
2769 lnet_send(lnet_nid_t src_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid)
2770 {
2771         lnet_nid_t              dst_nid = msg->msg_target.nid;
2772         int                     rc;
2773
2774         /*
2775          * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
2776          * but we might want to use pre-determined router for ACK/REPLY
2777          * in the future
2778          */
2779         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
2780         LASSERT(msg->msg_txpeer == NULL);
2781         LASSERT(msg->msg_txni == NULL);
2782         LASSERT(!msg->msg_sending);
2783         LASSERT(!msg->msg_target_is_router);
2784         LASSERT(!msg->msg_receiving);
2785
2786         msg->msg_sending = 1;
2787
2788         LASSERT(!msg->msg_tx_committed);
2789
2790         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
2791         if (rc < 0) {
2792                 if (rc == -EHOSTUNREACH)
2793                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
2794                 else
2795                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
2796                 return rc;
2797         }
2798
2799         if (rc == LNET_CREDIT_OK)
2800                 lnet_ni_send(msg->msg_txni, msg);
2801
2802         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
2803         return 0;
2804 }
2805
2806 enum lnet_mt_event_type {
2807         MT_TYPE_LOCAL_NI = 0,
2808         MT_TYPE_PEER_NI
2809 };
2810
2811 struct lnet_mt_event_info {
2812         enum lnet_mt_event_type mt_type;
2813         lnet_nid_t mt_nid;
2814 };
2815
2816 /* called with res_lock held */
2817 void
2818 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
2819 {
2820         struct lnet_rsp_tracker *rspt;
2821
2822         /*
2823          * msg has a refcount on the MD so the MD is not going away.
2824          * The rspt queue for the cpt is protected by
2825          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
2826          */
2827         if (!md->md_rspt_ptr)
2828                 return;
2829
2830         rspt = md->md_rspt_ptr;
2831
2832         /* debug code */
2833         LASSERT(rspt->rspt_cpt == cpt);
2834
2835         md->md_rspt_ptr = NULL;
2836
2837         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
2838                 /*
2839                  * The monitor thread has invalidated this handle because the
2840                  * response timed out, but it failed to lookup the MD. That
2841                  * means this response tracker is on the zombie list. We can
2842                  * safely remove it under the resource lock (held by caller) and
2843                  * free the response tracker block.
2844                  */
2845                 list_del(&rspt->rspt_on_list);
2846                 lnet_rspt_free(rspt, cpt);
2847         } else {
2848                 /*
2849                  * invalidate the handle to indicate that a response has been
2850                  * received, which will then lead the monitor thread to clean up
2851                  * the rspt block.
2852                  */
2853                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
2854         }
2855 }
2856
2857 void
2858 lnet_clean_zombie_rstqs(void)
2859 {
2860         struct lnet_rsp_tracker *rspt, *tmp;
2861         int i;
2862
2863         cfs_cpt_for_each(i, lnet_cpt_table()) {
2864                 list_for_each_entry_safe(rspt, tmp,
2865                                          the_lnet.ln_mt_zombie_rstqs[i],
2866                                          rspt_on_list) {
2867                         list_del(&rspt->rspt_on_list);
2868                         lnet_rspt_free(rspt, i);
2869                 }
2870         }
2871
2872         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
2873 }
2874
2875 static void
2876 lnet_finalize_expired_responses(void)
2877 {
2878         struct lnet_libmd *md;
2879         struct list_head local_queue;
2880         struct lnet_rsp_tracker *rspt, *tmp;
2881         ktime_t now;
2882         int i;
2883
2884         if (the_lnet.ln_mt_rstq == NULL)
2885                 return;
2886
2887         cfs_cpt_for_each(i, lnet_cpt_table()) {
2888                 INIT_LIST_HEAD(&local_queue);
2889
2890                 lnet_net_lock(i);
2891                 if (!the_lnet.ln_mt_rstq[i]) {
2892                         lnet_net_unlock(i);
2893                         continue;
2894                 }
2895                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
2896                 lnet_net_unlock(i);
2897
2898                 now = ktime_get();
2899
2900                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
2901                         /*
2902                          * The rspt mdh will be invalidated when a response
2903                          * is received or whenever we want to discard the
2904                          * block the monitor thread will walk the queue
2905                          * and clean up any rsts with an invalid mdh.
2906                          * The monitor thread will walk the queue until
2907                          * the first unexpired rspt block. This means that
2908                          * some rspt blocks which received their
2909                          * corresponding responses will linger in the
2910                          * queue until they are cleaned up eventually.
2911                          */
2912                         lnet_res_lock(i);
2913                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
2914                                 lnet_res_unlock(i);
2915                                 list_del(&rspt->rspt_on_list);
2916                                 lnet_rspt_free(rspt, i);
2917                                 continue;
2918                         }
2919
2920                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
2921                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
2922                                 struct lnet_peer_ni *lpni;
2923                                 lnet_nid_t nid;
2924
2925                                 md = lnet_handle2md(&rspt->rspt_mdh);
2926                                 if (!md) {
2927                                         /* MD has been queued for unlink, but
2928                                          * rspt hasn't been detached (Note we've
2929                                          * checked above that the rspt_mdh is
2930                                          * valid). Since we cannot lookup the MD
2931                                          * we're unable to detach the rspt
2932                                          * ourselves. Thus, move the rspt to the
2933                                          * zombie list where we'll wait for
2934                                          * either:
2935                                          *   1. The remaining operations on the
2936                                          *   MD to complete. In this case the
2937                                          *   final operation will result in
2938                                          *   lnet_msg_detach_md()->
2939                                          *   lnet_detach_rsp_tracker() where
2940                                          *   we will clean up this response
2941                                          *   tracker.
2942                                          *   2. LNet to shutdown. In this case
2943                                          *   we'll wait until after all LND Nets
2944                                          *   have shutdown and then we can
2945                                          *   safely free any remaining response
2946                                          *   tracker blocks on the zombie list.
2947                                          * Note: We need to hold the resource
2948                                          * lock when adding to the zombie list
2949                                          * because we may have concurrent access
2950                                          * with lnet_detach_rsp_tracker().
2951                                          */
2952                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
2953                                         list_move(&rspt->rspt_on_list,
2954                                                   the_lnet.ln_mt_zombie_rstqs[i]);
2955                                         lnet_res_unlock(i);
2956                                         continue;
2957                                 }
2958                                 LASSERT(md->md_rspt_ptr == rspt);
2959                                 md->md_rspt_ptr = NULL;
2960                                 lnet_res_unlock(i);
2961
2962                                 LNetMDUnlink(rspt->rspt_mdh);
2963
2964                                 nid = rspt->rspt_next_hop_nid;
2965
2966                                 list_del(&rspt->rspt_on_list);
2967                                 lnet_rspt_free(rspt, i);
2968
2969                                 /* If we're shutting down we just want to clean
2970                                  * up the rspt blocks
2971                                  */
2972                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
2973                                         continue;
2974
2975                                 lnet_net_lock(i);
2976                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
2977                                 lnet_net_unlock(i);
2978
2979                                 CDEBUG(D_NET,
2980                                        "Response timeout: md = %p: nid = %s\n",
2981                                        md, libcfs_nid2str(nid));
2982
2983                                 /*
2984                                  * If there is a timeout on the response
2985                                  * from the next hop decrement its health
2986                                  * value so that we don't use it
2987                                  */
2988                                 lnet_net_lock(0);
2989                                 lpni = lnet_find_peer_ni_locked(nid);
2990                                 if (lpni) {
2991                                         lnet_handle_remote_failure_locked(lpni);
2992                                         lnet_peer_ni_decref_locked(lpni);
2993                                 }
2994                                 lnet_net_unlock(0);
2995                         } else {
2996                                 lnet_res_unlock(i);
2997                                 break;
2998                         }
2999                 }
3000
3001                 if (!list_empty(&local_queue)) {
3002                         lnet_net_lock(i);
3003                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
3004                         lnet_net_unlock(i);
3005                 }
3006         }
3007 }
3008
3009 static void
3010 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
3011 {
3012         struct lnet_msg *msg;
3013
3014         while (!list_empty(resendq)) {
3015                 struct lnet_peer_ni *lpni;
3016
3017                 msg = list_entry(resendq->next, struct lnet_msg,
3018                                  msg_list);
3019
3020                 list_del_init(&msg->msg_list);
3021
3022                 lpni = lnet_find_peer_ni_locked(msg->msg_hdr.dest_nid);
3023                 if (!lpni) {
3024                         lnet_net_unlock(cpt);
3025                         CERROR("Expected that a peer is already created for %s\n",
3026                                libcfs_nid2str(msg->msg_hdr.dest_nid));
3027                         msg->msg_no_resend = true;
3028                         lnet_finalize(msg, -EFAULT);
3029                         lnet_net_lock(cpt);
3030                 } else {
3031                         struct lnet_peer *peer;
3032                         int rc;
3033                         lnet_nid_t src_nid = LNET_NID_ANY;
3034
3035                         /*
3036                          * if this message is not being routed and the
3037                          * peer is non-MR then we must use the same
3038                          * src_nid that was used in the original send.
3039                          * Otherwise if we're routing the message (IE
3040                          * we're a router) then we can use any of our
3041                          * local interfaces. It doesn't matter to the
3042                          * final destination.
3043                          */
3044                         peer = lpni->lpni_peer_net->lpn_peer;
3045                         if (!msg->msg_routing &&
3046                             !lnet_peer_is_multi_rail(peer))
3047                                 src_nid = le64_to_cpu(msg->msg_hdr.src_nid);
3048
3049                         /*
3050                          * If we originally specified a src NID, then we
3051                          * must attempt to reuse it in the resend as well.
3052                          */
3053                         if (msg->msg_src_nid_param != LNET_NID_ANY)
3054                                 src_nid = msg->msg_src_nid_param;
3055                         lnet_peer_ni_decref_locked(lpni);
3056
3057                         lnet_net_unlock(cpt);
3058                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3059                                libcfs_nid2str(src_nid),
3060                                libcfs_id2str(msg->msg_target),
3061                                lnet_msgtyp2str(msg->msg_type),
3062                                msg->msg_recovery,
3063                                msg->msg_retry_count);
3064                         rc = lnet_send(src_nid, msg, LNET_NID_ANY);
3065                         if (rc) {
3066                                 CERROR("Error sending %s to %s: %d\n",
3067                                        lnet_msgtyp2str(msg->msg_type),
3068                                        libcfs_id2str(msg->msg_target), rc);
3069                                 msg->msg_no_resend = true;
3070                                 lnet_finalize(msg, rc);
3071                         }
3072                         lnet_net_lock(cpt);
3073                         if (!rc)
3074                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3075                 }
3076         }
3077 }
3078
3079 static void
3080 lnet_resend_pending_msgs(void)
3081 {
3082         int i;
3083
3084         cfs_cpt_for_each(i, lnet_cpt_table()) {
3085                 lnet_net_lock(i);
3086                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3087                 lnet_net_unlock(i);
3088         }
3089 }
3090
3091 /* called with cpt and ni_lock held */
3092 static void
3093 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3094 {
3095         struct lnet_handle_md recovery_mdh;
3096
3097         LNetInvalidateMDHandle(&recovery_mdh);
3098
3099         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3100             force) {
3101                 recovery_mdh = ni->ni_ping_mdh;
3102                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3103         }
3104         lnet_ni_unlock(ni);
3105         lnet_net_unlock(cpt);
3106         if (!LNetMDHandleIsInvalid(recovery_mdh))
3107                 LNetMDUnlink(recovery_mdh);
3108         lnet_net_lock(cpt);
3109         lnet_ni_lock(ni);
3110 }
3111
3112 static void
3113 lnet_recover_local_nis(void)
3114 {
3115         struct lnet_mt_event_info *ev_info;
3116         struct list_head processed_list;
3117         struct list_head local_queue;
3118         struct lnet_handle_md mdh;
3119         struct lnet_ni *tmp;
3120         struct lnet_ni *ni;
3121         lnet_nid_t nid;
3122         int healthv;
3123         int rc;
3124
3125         INIT_LIST_HEAD(&local_queue);
3126         INIT_LIST_HEAD(&processed_list);
3127
3128         /*
3129          * splice the recovery queue on a local queue. We will iterate
3130          * through the local queue and update it as needed. Once we're
3131          * done with the traversal, we'll splice the local queue back on
3132          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3133          * will be traversed in the next iteration.
3134          */
3135         lnet_net_lock(0);
3136         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3137                          &local_queue);
3138         lnet_net_unlock(0);
3139
3140         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3141                 /*
3142                  * if an NI is being deleted or it is now healthy, there
3143                  * is no need to keep it around in the recovery queue.
3144                  * The monitor thread is the only thread responsible for
3145                  * removing the NI from the recovery queue.
3146                  * Multiple threads can be adding NIs to the recovery
3147                  * queue.
3148                  */
3149                 healthv = atomic_read(&ni->ni_healthv);
3150
3151                 lnet_net_lock(0);
3152                 lnet_ni_lock(ni);
3153                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3154                     healthv == LNET_MAX_HEALTH_VALUE) {
3155                         list_del_init(&ni->ni_recovery);
3156                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3157                         lnet_ni_unlock(ni);
3158                         lnet_ni_decref_locked(ni, 0);
3159                         lnet_net_unlock(0);
3160                         continue;
3161                 }
3162
3163                 /*
3164                  * if the local NI failed recovery we must unlink the md.
3165                  * But we want to keep the local_ni on the recovery queue
3166                  * so we can continue the attempts to recover it.
3167                  */
3168                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3169                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3170                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3171                 }
3172
3173                 lnet_ni_unlock(ni);
3174                 lnet_net_unlock(0);
3175
3176
3177                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3178                        libcfs_nid2str(ni->ni_nid));
3179
3180                 lnet_ni_lock(ni);
3181                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3182                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3183                         lnet_ni_unlock(ni);
3184
3185                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3186                         if (!ev_info) {
3187                                 CERROR("out of memory. Can't recover %s\n",
3188                                        libcfs_nid2str(ni->ni_nid));
3189                                 lnet_ni_lock(ni);
3190                                 ni->ni_recovery_state &=
3191                                   ~LNET_NI_RECOVERY_PENDING;
3192                                 lnet_ni_unlock(ni);
3193                                 continue;
3194                         }
3195
3196                         mdh = ni->ni_ping_mdh;
3197                         /*
3198                          * Invalidate the ni mdh in case it's deleted.
3199                          * We'll unlink the mdh in this case below.
3200                          */
3201                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3202                         nid = ni->ni_nid;
3203
3204                         /*
3205                          * remove the NI from the local queue and drop the
3206                          * reference count to it while we're recovering
3207                          * it. The reason for that, is that the NI could
3208                          * be deleted, and the way the code is structured
3209                          * is if we don't drop the NI, then the deletion
3210                          * code will enter a loop waiting for the
3211                          * reference count to be removed while holding the
3212                          * ln_mutex_lock(). When we look up the peer to
3213                          * send to in lnet_select_pathway() we will try to
3214                          * lock the ln_mutex_lock() as well, leading to
3215                          * a deadlock. By dropping the refcount and
3216                          * removing it from the list, we allow for the NI
3217                          * to be removed, then we use the cached NID to
3218                          * look it up again. If it's gone, then we just
3219                          * continue examining the rest of the queue.
3220                          */
3221                         lnet_net_lock(0);
3222                         list_del_init(&ni->ni_recovery);
3223                         lnet_ni_decref_locked(ni, 0);
3224                         lnet_net_unlock(0);
3225
3226                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3227                         ev_info->mt_nid = nid;
3228                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3229                                             ev_info, the_lnet.ln_mt_eqh, true);
3230                         /* lookup the nid again */
3231                         lnet_net_lock(0);
3232                         ni = lnet_nid2ni_locked(nid, 0);
3233                         if (!ni) {
3234                                 /*
3235                                  * the NI has been deleted when we dropped
3236                                  * the ref count
3237                                  */
3238                                 lnet_net_unlock(0);
3239                                 LNetMDUnlink(mdh);
3240                                 continue;
3241                         }
3242                         /*
3243                          * Same note as in lnet_recover_peer_nis(). When
3244                          * we're sending the ping, the NI is free to be
3245                          * deleted or manipulated. By this point it
3246                          * could've been added back on the recovery queue,
3247                          * and a refcount taken on it.
3248                          * So we can't just add it blindly again or we'll
3249                          * corrupt the queue. We must check under lock if
3250                          * it's not on any list and if not then add it
3251                          * to the processed list, which will eventually be
3252                          * spliced back on to the recovery queue.
3253                          */
3254                         ni->ni_ping_mdh = mdh;
3255                         if (list_empty(&ni->ni_recovery)) {
3256                                 list_add_tail(&ni->ni_recovery, &processed_list);
3257                                 lnet_ni_addref_locked(ni, 0);
3258                         }
3259                         lnet_net_unlock(0);
3260
3261                         lnet_ni_lock(ni);
3262                         if (rc)
3263                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3264                 }
3265                 lnet_ni_unlock(ni);
3266         }
3267
3268         /*
3269          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3270          * reexamined in the next iteration.
3271          */
3272         list_splice_init(&processed_list, &local_queue);
3273         lnet_net_lock(0);
3274         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3275         lnet_net_unlock(0);
3276 }
3277
3278 static int
3279 lnet_resendqs_create(void)
3280 {
3281         struct list_head **resendqs;
3282         resendqs = lnet_create_array_of_queues();
3283
3284         if (!resendqs)
3285                 return -ENOMEM;
3286
3287         lnet_net_lock(LNET_LOCK_EX);
3288         the_lnet.ln_mt_resendqs = resendqs;
3289         lnet_net_unlock(LNET_LOCK_EX);
3290
3291         return 0;
3292 }
3293
3294 static void
3295 lnet_clean_local_ni_recoveryq(void)
3296 {
3297         struct lnet_ni *ni;
3298
3299         /* This is only called when the monitor thread has stopped */
3300         lnet_net_lock(0);
3301
3302         while (!list_empty(&the_lnet.ln_mt_localNIRecovq)) {
3303                 ni = list_entry(the_lnet.ln_mt_localNIRecovq.next,
3304                                 struct lnet_ni, ni_recovery);
3305                 list_del_init(&ni->ni_recovery);
3306                 lnet_ni_lock(ni);
3307                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3308                 lnet_ni_unlock(ni);
3309                 lnet_ni_decref_locked(ni, 0);
3310         }
3311
3312         lnet_net_unlock(0);
3313 }
3314
3315 static void
3316 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3317                                      bool force)
3318 {
3319         struct lnet_handle_md recovery_mdh;
3320
3321         LNetInvalidateMDHandle(&recovery_mdh);
3322
3323         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3324                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3325                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3326         }
3327         spin_unlock(&lpni->lpni_lock);
3328         lnet_net_unlock(cpt);
3329         if (!LNetMDHandleIsInvalid(recovery_mdh))
3330                 LNetMDUnlink(recovery_mdh);
3331         lnet_net_lock(cpt);
3332         spin_lock(&lpni->lpni_lock);
3333 }
3334
3335 static void
3336 lnet_clean_peer_ni_recoveryq(void)
3337 {
3338         struct lnet_peer_ni *lpni, *tmp;
3339
3340         lnet_net_lock(LNET_LOCK_EX);
3341
3342         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3343                                  lpni_recovery) {
3344                 list_del_init(&lpni->lpni_recovery);
3345                 spin_lock(&lpni->lpni_lock);
3346                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3347                 spin_unlock(&lpni->lpni_lock);
3348                 lnet_peer_ni_decref_locked(lpni);
3349         }
3350
3351         lnet_net_unlock(LNET_LOCK_EX);
3352 }
3353
3354 static void
3355 lnet_clean_resendqs(void)
3356 {
3357         struct lnet_msg *msg, *tmp;
3358         struct list_head msgs;
3359         int i;
3360
3361         INIT_LIST_HEAD(&msgs);
3362
3363         cfs_cpt_for_each(i, lnet_cpt_table()) {
3364                 lnet_net_lock(i);
3365                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3366                 lnet_net_unlock(i);
3367                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3368                         list_del_init(&msg->msg_list);
3369                         msg->msg_no_resend = true;
3370                         lnet_finalize(msg, -ESHUTDOWN);
3371                 }
3372         }
3373
3374         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3375 }
3376
3377 static void
3378 lnet_recover_peer_nis(void)
3379 {
3380         struct lnet_mt_event_info *ev_info;
3381         struct list_head processed_list;
3382         struct list_head local_queue;
3383         struct lnet_handle_md mdh;
3384         struct lnet_peer_ni *lpni;
3385         struct lnet_peer_ni *tmp;
3386         lnet_nid_t nid;
3387         int healthv;
3388         int rc;
3389
3390         INIT_LIST_HEAD(&local_queue);
3391         INIT_LIST_HEAD(&processed_list);
3392
3393         /*
3394          * Always use cpt 0 for locking across all interactions with
3395          * ln_mt_peerNIRecovq
3396          */
3397         lnet_net_lock(0);
3398         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3399                          &local_queue);
3400         lnet_net_unlock(0);
3401
3402         list_for_each_entry_safe(lpni, tmp, &local_queue,
3403                                  lpni_recovery) {
3404                 /*
3405                  * The same protection strategy is used here as is in the
3406                  * local recovery case.
3407                  */
3408                 lnet_net_lock(0);
3409                 healthv = atomic_read(&lpni->lpni_healthv);
3410                 spin_lock(&lpni->lpni_lock);
3411                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3412                     healthv == LNET_MAX_HEALTH_VALUE) {
3413                         list_del_init(&lpni->lpni_recovery);
3414                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3415                         spin_unlock(&lpni->lpni_lock);
3416                         lnet_peer_ni_decref_locked(lpni);
3417                         lnet_net_unlock(0);
3418                         continue;
3419                 }
3420
3421                 /*
3422                  * If the peer NI has failed recovery we must unlink the
3423                  * md. But we want to keep the peer ni on the recovery
3424                  * queue so we can try to continue recovering it
3425                  */
3426                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3427                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3428                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3429                 }
3430
3431                 spin_unlock(&lpni->lpni_lock);
3432                 lnet_net_unlock(0);
3433
3434                 /*
3435                  * NOTE: we're racing with peer deletion from user space.
3436                  * It's possible that a peer is deleted after we check its
3437                  * state. In this case the recovery can create a new peer
3438                  */
3439                 spin_lock(&lpni->lpni_lock);
3440                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3441                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3442                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3443                         spin_unlock(&lpni->lpni_lock);
3444
3445                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3446                         if (!ev_info) {
3447                                 CERROR("out of memory. Can't recover %s\n",
3448                                        libcfs_nid2str(lpni->lpni_nid));
3449                                 spin_lock(&lpni->lpni_lock);
3450                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3451                                 spin_unlock(&lpni->lpni_lock);
3452                                 continue;
3453                         }
3454
3455                         /* look at the comments in lnet_recover_local_nis() */
3456                         mdh = lpni->lpni_recovery_ping_mdh;
3457                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3458                         nid = lpni->lpni_nid;
3459                         lnet_net_lock(0);
3460                         list_del_init(&lpni->lpni_recovery);
3461                         lnet_peer_ni_decref_locked(lpni);
3462                         lnet_net_unlock(0);
3463
3464                         ev_info->mt_type = MT_TYPE_PEER_NI;
3465                         ev_info->mt_nid = nid;
3466                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3467                                             ev_info, the_lnet.ln_mt_eqh, true);
3468                         lnet_net_lock(0);
3469                         /*
3470                          * lnet_find_peer_ni_locked() grabs a refcount for
3471                          * us. No need to take it explicitly.
3472                          */
3473                         lpni = lnet_find_peer_ni_locked(nid);
3474                         if (!lpni) {
3475                                 lnet_net_unlock(0);
3476                                 LNetMDUnlink(mdh);
3477                                 continue;
3478                         }
3479
3480                         lpni->lpni_recovery_ping_mdh = mdh;
3481                         /*
3482                          * While we're unlocked the lpni could've been
3483                          * readded on the recovery queue. In this case we
3484                          * don't need to add it to the local queue, since
3485                          * it's already on there and the thread that added
3486                          * it would've incremented the refcount on the
3487                          * peer, which means we need to decref the refcount
3488                          * that was implicitly grabbed by find_peer_ni_locked.
3489                          * Otherwise, if the lpni is still not on
3490                          * the recovery queue, then we'll add it to the
3491                          * processed list.
3492                          */
3493                         if (list_empty(&lpni->lpni_recovery))
3494                                 list_add_tail(&lpni->lpni_recovery, &processed_list);
3495                         else
3496                                 lnet_peer_ni_decref_locked(lpni);
3497                         lnet_net_unlock(0);
3498
3499                         spin_lock(&lpni->lpni_lock);
3500                         if (rc)
3501                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3502                 }
3503                 spin_unlock(&lpni->lpni_lock);
3504         }
3505
3506         list_splice_init(&processed_list, &local_queue);
3507         lnet_net_lock(0);
3508         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3509         lnet_net_unlock(0);
3510 }
3511
3512 static int
3513 lnet_monitor_thread(void *arg)
3514 {
3515         time64_t recovery_timeout = 0;
3516         time64_t rsp_timeout = 0;
3517         int interval;
3518         time64_t now;
3519
3520         wait_for_completion(&the_lnet.ln_started);
3521         /*
3522          * The monitor thread takes care of the following:
3523          *  1. Checks the aliveness of routers
3524          *  2. Checks if there are messages on the resend queue to resend
3525          *     them.
3526          *  3. Check if there are any NIs on the local recovery queue and
3527          *     pings them
3528          *  4. Checks if there are any NIs on the remote recovery queue
3529          *     and pings them.
3530          */
3531         cfs_block_allsigs();
3532
3533         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
3534                 now = ktime_get_real_seconds();
3535
3536                 if (lnet_router_checker_active())
3537                         lnet_check_routers();
3538
3539                 lnet_resend_pending_msgs();
3540
3541                 if (now >= rsp_timeout) {
3542                         lnet_finalize_expired_responses();
3543                         rsp_timeout = now + (lnet_transaction_timeout / 2);
3544                 }
3545
3546                 if (now >= recovery_timeout) {
3547                         lnet_recover_local_nis();
3548                         lnet_recover_peer_nis();
3549                         recovery_timeout = now + lnet_recovery_interval;
3550                 }
3551
3552                 /*
3553                  * TODO do we need to check if we should sleep without
3554                  * timeout?  Technically, an active system will always
3555                  * have messages in flight so this check will always
3556                  * evaluate to false. And on an idle system do we care
3557                  * if we wake up every 1 second? Although, we've seen
3558                  * cases where we get a complaint that an idle thread
3559                  * is waking up unnecessarily.
3560                  *
3561                  * Take into account the current net_count when you wake
3562                  * up for alive router checking, since we need to check
3563                  * possibly as many networks as we have configured.
3564                  */
3565                 interval = min(lnet_recovery_interval,
3566                                min((unsigned int) alive_router_check_interval /
3567                                         lnet_current_net_count,
3568                                    lnet_transaction_timeout / 2));
3569                 wait_for_completion_interruptible_timeout(
3570                         &the_lnet.ln_mt_wait_complete,
3571                         cfs_time_seconds(interval));
3572                 /* Must re-init the completion before testing anything,
3573                  * including ln_mt_state.
3574                  */
3575                 reinit_completion(&the_lnet.ln_mt_wait_complete);
3576         }
3577
3578         /* Shutting down */
3579         lnet_net_lock(LNET_LOCK_EX);
3580         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3581         lnet_net_unlock(LNET_LOCK_EX);
3582
3583         /* signal that the monitor thread is exiting */
3584         up(&the_lnet.ln_mt_signal);
3585
3586         return 0;
3587 }
3588
3589 /*
3590  * lnet_send_ping
3591  * Sends a ping.
3592  * Returns == 0 if success
3593  * Returns > 0 if LNetMDBind or prior fails
3594  * Returns < 0 if LNetGet fails
3595  */
3596 int
3597 lnet_send_ping(lnet_nid_t dest_nid,
3598                struct lnet_handle_md *mdh, int nnis,
3599                void *user_data, struct lnet_handle_eq eqh, bool recovery)
3600 {
3601         struct lnet_md md = { NULL };
3602         struct lnet_process_id id;
3603         struct lnet_ping_buffer *pbuf;
3604         int rc;
3605
3606         if (dest_nid == LNET_NID_ANY) {
3607                 rc = -EHOSTUNREACH;
3608                 goto fail_error;
3609         }
3610
3611         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
3612         if (!pbuf) {
3613                 rc = ENOMEM;
3614                 goto fail_error;
3615         }
3616
3617         /* initialize md content */
3618         md.start     = &pbuf->pb_info;
3619         md.length    = LNET_PING_INFO_SIZE(nnis);
3620         md.threshold = 2; /* GET/REPLY */
3621         md.max_size  = 0;
3622         md.options   = LNET_MD_TRUNCATE;
3623         md.user_ptr  = user_data;
3624         md.eq_handle = eqh;
3625
3626         rc = LNetMDBind(md, LNET_UNLINK, mdh);
3627         if (rc) {
3628                 lnet_ping_buffer_decref(pbuf);
3629                 CERROR("Can't bind MD: %d\n", rc);
3630                 rc = -rc; /* change the rc to positive */
3631                 goto fail_error;
3632         }
3633         id.pid = LNET_PID_LUSTRE;
3634         id.nid = dest_nid;
3635
3636         rc = LNetGet(LNET_NID_ANY, *mdh, id,
3637                      LNET_RESERVED_PORTAL,
3638                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
3639
3640         if (rc)
3641                 goto fail_unlink_md;
3642
3643         return 0;
3644
3645 fail_unlink_md:
3646         LNetMDUnlink(*mdh);
3647         LNetInvalidateMDHandle(mdh);
3648 fail_error:
3649         return rc;
3650 }
3651
3652 static void
3653 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
3654                            int status, bool unlink_event)
3655 {
3656         lnet_nid_t nid = ev_info->mt_nid;
3657
3658         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
3659                 struct lnet_ni *ni;
3660
3661                 lnet_net_lock(0);
3662                 ni = lnet_nid2ni_locked(nid, 0);
3663                 if (!ni) {
3664                         lnet_net_unlock(0);
3665                         return;
3666                 }
3667                 lnet_ni_lock(ni);
3668                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3669                 if (status)
3670                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
3671                 lnet_ni_unlock(ni);
3672                 lnet_net_unlock(0);
3673
3674                 if (status != 0) {
3675                         CERROR("local NI (%s) recovery failed with %d\n",
3676                                libcfs_nid2str(nid), status);
3677                         return;
3678                 }
3679                 /*
3680                  * need to increment healthv for the ni here, because in
3681                  * the lnet_finalize() path we don't have access to this
3682                  * NI. And in order to get access to it, we'll need to
3683                  * carry forward too much information.
3684                  * In the peer case, it'll naturally be incremented
3685                  */
3686                 if (!unlink_event)
3687                         lnet_inc_healthv(&ni->ni_healthv);
3688         } else {
3689                 struct lnet_peer_ni *lpni;
3690                 int cpt;
3691
3692                 cpt = lnet_net_lock_current();
3693                 lpni = lnet_find_peer_ni_locked(nid);
3694                 if (!lpni) {
3695                         lnet_net_unlock(cpt);
3696                         return;
3697                 }
3698                 spin_lock(&lpni->lpni_lock);
3699                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3700                 if (status)
3701                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
3702                 spin_unlock(&lpni->lpni_lock);
3703                 lnet_peer_ni_decref_locked(lpni);
3704                 lnet_net_unlock(cpt);
3705
3706                 if (status != 0)
3707                         CERROR("peer NI (%s) recovery failed with %d\n",
3708                                libcfs_nid2str(nid), status);
3709         }
3710 }
3711
3712 void
3713 lnet_mt_event_handler(struct lnet_event *event)
3714 {
3715         struct lnet_mt_event_info *ev_info = event->md.user_ptr;
3716         struct lnet_ping_buffer *pbuf;
3717
3718         /* TODO: remove assert */
3719         LASSERT(event->type == LNET_EVENT_REPLY ||
3720                 event->type == LNET_EVENT_SEND ||
3721                 event->type == LNET_EVENT_UNLINK);
3722
3723         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
3724                event->status);
3725
3726         switch (event->type) {
3727         case LNET_EVENT_UNLINK:
3728                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
3729                        libcfs_nid2str(ev_info->mt_nid));
3730                 /* fallthrough */
3731         case LNET_EVENT_REPLY:
3732                 lnet_handle_recovery_reply(ev_info, event->status,
3733                                            event->type == LNET_EVENT_UNLINK);
3734                 break;
3735         case LNET_EVENT_SEND:
3736                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
3737                                libcfs_nid2str(ev_info->mt_nid),
3738                                (event->status) ? "unsuccessfully" :
3739                                "successfully", event->status);
3740                 break;
3741         default:
3742                 CERROR("Unexpected event: %d\n", event->type);
3743                 break;
3744         }
3745         if (event->unlinked) {
3746                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
3747                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md.start);
3748                 lnet_ping_buffer_decref(pbuf);
3749         }
3750 }
3751
3752 static int
3753 lnet_rsp_tracker_create(void)
3754 {
3755         struct list_head **rstqs;
3756         rstqs = lnet_create_array_of_queues();
3757
3758         if (!rstqs)
3759                 return -ENOMEM;
3760
3761         the_lnet.ln_mt_rstq = rstqs;
3762
3763         return 0;
3764 }
3765
3766 static void
3767 lnet_rsp_tracker_clean(void)
3768 {
3769         lnet_finalize_expired_responses();
3770
3771         cfs_percpt_free(the_lnet.ln_mt_rstq);
3772         the_lnet.ln_mt_rstq = NULL;
3773 }
3774
3775 int lnet_monitor_thr_start(void)
3776 {
3777         int rc = 0;
3778         struct task_struct *task;
3779
3780         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
3781                 return -EALREADY;
3782
3783         rc = lnet_resendqs_create();
3784         if (rc)
3785                 return rc;
3786
3787         rc = lnet_rsp_tracker_create();
3788         if (rc)
3789                 goto clean_queues;
3790
3791         sema_init(&the_lnet.ln_mt_signal, 0);
3792
3793         lnet_net_lock(LNET_LOCK_EX);
3794         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
3795         lnet_net_unlock(LNET_LOCK_EX);
3796         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
3797         if (IS_ERR(task)) {
3798                 rc = PTR_ERR(task);
3799                 CERROR("Can't start monitor thread: %d\n", rc);
3800                 goto clean_thread;
3801         }
3802
3803         return 0;
3804
3805 clean_thread:
3806         lnet_net_lock(LNET_LOCK_EX);
3807         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
3808         lnet_net_unlock(LNET_LOCK_EX);
3809         /* block until event callback signals exit */
3810         down(&the_lnet.ln_mt_signal);
3811         /* clean up */
3812         lnet_net_lock(LNET_LOCK_EX);
3813         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3814         lnet_net_unlock(LNET_LOCK_EX);
3815         lnet_rsp_tracker_clean();
3816         lnet_clean_local_ni_recoveryq();
3817         lnet_clean_peer_ni_recoveryq();
3818         lnet_clean_resendqs();
3819         LNetInvalidateEQHandle(&the_lnet.ln_mt_eqh);
3820         return rc;
3821 clean_queues:
3822         lnet_rsp_tracker_clean();
3823         lnet_clean_local_ni_recoveryq();
3824         lnet_clean_peer_ni_recoveryq();
3825         lnet_clean_resendqs();
3826         return rc;
3827 }
3828
3829 void lnet_monitor_thr_stop(void)
3830 {
3831         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3832                 return;
3833
3834         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
3835         lnet_net_lock(LNET_LOCK_EX);
3836         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
3837         lnet_net_unlock(LNET_LOCK_EX);
3838
3839         /* tell the monitor thread that we're shutting down */
3840         complete(&the_lnet.ln_mt_wait_complete);
3841
3842         /* block until monitor thread signals that it's done */
3843         down(&the_lnet.ln_mt_signal);
3844         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
3845
3846         /* perform cleanup tasks */
3847         lnet_rsp_tracker_clean();
3848         lnet_clean_local_ni_recoveryq();
3849         lnet_clean_peer_ni_recoveryq();
3850         lnet_clean_resendqs();
3851 }
3852
3853 void
3854 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
3855                   __u32 msg_type)
3856 {
3857         lnet_net_lock(cpt);
3858         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
3859         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
3860         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
3861         lnet_net_unlock(cpt);
3862
3863         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
3864 }
3865
3866 static void
3867 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
3868 {
3869         struct lnet_hdr *hdr = &msg->msg_hdr;
3870
3871         if (msg->msg_wanted != 0)
3872                 lnet_setpayloadbuffer(msg);
3873
3874         lnet_build_msg_event(msg, LNET_EVENT_PUT);
3875
3876         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
3877          * it back into the ACK during lnet_finalize() */
3878         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
3879                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
3880
3881         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
3882                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
3883 }
3884
3885 static int
3886 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
3887 {
3888         struct lnet_hdr         *hdr = &msg->msg_hdr;
3889         struct lnet_match_info  info;
3890         int                     rc;
3891         bool                    ready_delay;
3892
3893         /* Convert put fields to host byte order */
3894         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
3895         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
3896         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
3897
3898         /* Primary peer NID. */
3899         info.mi_id.nid  = msg->msg_initiator;
3900         info.mi_id.pid  = hdr->src_pid;
3901         info.mi_opc     = LNET_MD_OP_PUT;
3902         info.mi_portal  = hdr->msg.put.ptl_index;
3903         info.mi_rlength = hdr->payload_length;
3904         info.mi_roffset = hdr->msg.put.offset;
3905         info.mi_mbits   = hdr->msg.put.match_bits;
3906         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
3907
3908         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
3909         ready_delay = msg->msg_rx_ready_delay;
3910
3911  again:
3912         rc = lnet_ptl_match_md(&info, msg);
3913         switch (rc) {
3914         default:
3915                 LBUG();
3916
3917         case LNET_MATCHMD_OK:
3918                 lnet_recv_put(ni, msg);
3919                 return 0;
3920
3921         case LNET_MATCHMD_NONE:
3922                 if (ready_delay)
3923                         /* no eager_recv or has already called it, should
3924                          * have been attached on delayed list */
3925                         return 0;
3926
3927                 rc = lnet_ni_eager_recv(ni, msg);
3928                 if (rc == 0) {
3929                         ready_delay = true;
3930                         goto again;
3931                 }
3932                 /* fall through */
3933
3934         case LNET_MATCHMD_DROP:
3935                 CNETERR("Dropping PUT from %s portal %d match %llu"
3936                         " offset %d length %d: %d\n",
3937                         libcfs_id2str(info.mi_id), info.mi_portal,
3938                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
3939
3940                 return -ENOENT; /* -ve: OK but no match */
3941         }
3942 }
3943
3944 static int
3945 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
3946 {
3947         struct lnet_match_info info;
3948         struct lnet_hdr *hdr = &msg->msg_hdr;
3949         struct lnet_process_id source_id;
3950         struct lnet_handle_wire reply_wmd;
3951         int rc;
3952
3953         /* Convert get fields to host byte order */
3954         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
3955         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
3956         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
3957         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
3958
3959         source_id.nid = hdr->src_nid;
3960         source_id.pid = hdr->src_pid;
3961         /* Primary peer NID */
3962         info.mi_id.nid  = msg->msg_initiator;
3963         info.mi_id.pid  = hdr->src_pid;
3964         info.mi_opc     = LNET_MD_OP_GET;
3965         info.mi_portal  = hdr->msg.get.ptl_index;
3966         info.mi_rlength = hdr->msg.get.sink_length;
3967         info.mi_roffset = hdr->msg.get.src_offset;
3968         info.mi_mbits   = hdr->msg.get.match_bits;
3969         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
3970
3971         rc = lnet_ptl_match_md(&info, msg);
3972         if (rc == LNET_MATCHMD_DROP) {
3973                 CNETERR("Dropping GET from %s portal %d match %llu"
3974                         " offset %d length %d\n",
3975                         libcfs_id2str(info.mi_id), info.mi_portal,
3976                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
3977                 return -ENOENT; /* -ve: OK but no match */
3978         }
3979
3980         LASSERT(rc == LNET_MATCHMD_OK);
3981
3982         lnet_build_msg_event(msg, LNET_EVENT_GET);
3983
3984         reply_wmd = hdr->msg.get.return_wmd;
3985
3986         lnet_prep_send(msg, LNET_MSG_REPLY, source_id,
3987                        msg->msg_offset, msg->msg_wanted);
3988
3989         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
3990
3991         if (rdma_get) {
3992                 /* The LND completes the REPLY from her recv procedure */
3993                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
3994                              msg->msg_offset, msg->msg_len, msg->msg_len);
3995                 return 0;
3996         }
3997
3998         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
3999         msg->msg_receiving = 0;
4000
4001         rc = lnet_send(ni->ni_nid, msg, msg->msg_from);
4002         if (rc < 0) {
4003                 /* didn't get as far as lnet_ni_send() */
4004                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
4005                        libcfs_nid2str(ni->ni_nid),
4006                        libcfs_id2str(info.mi_id), rc);
4007
4008                 lnet_finalize(msg, rc);
4009         }
4010
4011         return 0;
4012 }
4013
4014 static int
4015 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
4016 {
4017         void *private = msg->msg_private;
4018         struct lnet_hdr *hdr = &msg->msg_hdr;
4019         struct lnet_process_id src = {0};
4020         struct lnet_libmd *md;
4021         int rlength;
4022         int mlength;
4023         int cpt;
4024
4025         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4026         lnet_res_lock(cpt);
4027
4028         src.nid = hdr->src_nid;
4029         src.pid = hdr->src_pid;
4030
4031         /* NB handles only looked up by creator (no flips) */
4032         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4033         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4034                 CNETERR("%s: Dropping REPLY from %s for %s "
4035                         "MD %#llx.%#llx\n",
4036                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4037                         (md == NULL) ? "invalid" : "inactive",
4038                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4039                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4040                 if (md != NULL && md->md_me != NULL)
4041                         CERROR("REPLY MD also attached to portal %d\n",
4042                                md->md_me->me_portal);
4043
4044                 lnet_res_unlock(cpt);
4045                 return -ENOENT; /* -ve: OK but no match */
4046         }
4047
4048         LASSERT(md->md_offset == 0);
4049
4050         rlength = hdr->payload_length;
4051         mlength = MIN(rlength, (int)md->md_length);
4052
4053         if (mlength < rlength &&
4054             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4055                 CNETERR("%s: Dropping REPLY from %s length %d "
4056                         "for MD %#llx would overflow (%d)\n",
4057                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4058                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4059                         mlength);
4060                 lnet_res_unlock(cpt);
4061                 return -ENOENT; /* -ve: OK but no match */
4062         }
4063
4064         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4065                libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4066                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4067
4068         lnet_msg_attach_md(msg, md, 0, mlength);
4069
4070         if (mlength != 0)
4071                 lnet_setpayloadbuffer(msg);
4072
4073         lnet_res_unlock(cpt);
4074
4075         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4076
4077         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4078         return 0;
4079 }
4080
4081 static int
4082 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4083 {
4084         struct lnet_hdr *hdr = &msg->msg_hdr;
4085         struct lnet_process_id src = {0};
4086         struct lnet_libmd *md;
4087         int cpt;
4088
4089         src.nid = hdr->src_nid;
4090         src.pid = hdr->src_pid;
4091
4092         /* Convert ack fields to host byte order */
4093         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4094         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4095
4096         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4097         lnet_res_lock(cpt);
4098
4099         /* NB handles only looked up by creator (no flips) */
4100         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4101         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4102                 /* Don't moan; this is expected */
4103                 CDEBUG(D_NET,
4104                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4105                        libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4106                        (md == NULL) ? "invalid" : "inactive",
4107                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4108                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4109                 if (md != NULL && md->md_me != NULL)
4110                         CERROR("Source MD also attached to portal %d\n",
4111                                md->md_me->me_portal);
4112
4113                 lnet_res_unlock(cpt);
4114                 return -ENOENT;                  /* -ve! */
4115         }
4116
4117         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4118                libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4119                hdr->msg.ack.dst_wmd.wh_object_cookie);
4120
4121         lnet_msg_attach_md(msg, md, 0, 0);
4122
4123         lnet_res_unlock(cpt);
4124
4125         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4126
4127         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4128         return 0;
4129 }
4130
4131 /**
4132  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4133  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4134  * \retval -ve                  error code
4135  */
4136 int
4137 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4138 {
4139         int     rc = 0;
4140
4141         if (!the_lnet.ln_routing)
4142                 return -ECANCELED;
4143
4144         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4145             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4146                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4147                         msg->msg_rx_ready_delay = 1;
4148                 } else {
4149                         lnet_net_unlock(msg->msg_rx_cpt);
4150                         rc = lnet_ni_eager_recv(ni, msg);
4151                         lnet_net_lock(msg->msg_rx_cpt);
4152                 }
4153         }
4154
4155         if (rc == 0)
4156                 rc = lnet_post_routed_recv_locked(msg, 0);
4157         return rc;
4158 }
4159
4160 int
4161 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4162 {
4163         int     rc;
4164
4165         switch (msg->msg_type) {
4166         case LNET_MSG_ACK:
4167                 rc = lnet_parse_ack(ni, msg);
4168                 break;
4169         case LNET_MSG_PUT:
4170                 rc = lnet_parse_put(ni, msg);
4171                 break;
4172         case LNET_MSG_GET:
4173                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4174                 break;
4175         case LNET_MSG_REPLY:
4176                 rc = lnet_parse_reply(ni, msg);
4177                 break;
4178         default: /* prevent an unused label if !kernel */
4179                 LASSERT(0);
4180                 return -EPROTO;
4181         }
4182
4183         LASSERT(rc == 0 || rc == -ENOENT);
4184         return rc;
4185 }
4186
4187 char *
4188 lnet_msgtyp2str (int type)
4189 {
4190         switch (type) {
4191         case LNET_MSG_ACK:
4192                 return ("ACK");
4193         case LNET_MSG_PUT:
4194                 return ("PUT");
4195         case LNET_MSG_GET:
4196                 return ("GET");
4197         case LNET_MSG_REPLY:
4198                 return ("REPLY");
4199         case LNET_MSG_HELLO:
4200                 return ("HELLO");
4201         default:
4202                 return ("<UNKNOWN>");
4203         }
4204 }
4205
4206 void
4207 lnet_print_hdr(struct lnet_hdr *hdr)
4208 {
4209         struct lnet_process_id src = {
4210                 .nid = hdr->src_nid,
4211                 .pid = hdr->src_pid,
4212         };
4213         struct lnet_process_id dst = {
4214                 .nid = hdr->dest_nid,
4215                 .pid = hdr->dest_pid,
4216         };
4217         char *type_str = lnet_msgtyp2str(hdr->type);
4218
4219         CWARN("P3 Header at %p of type %s\n", hdr, type_str);
4220         CWARN("    From %s\n", libcfs_id2str(src));
4221         CWARN("    To   %s\n", libcfs_id2str(dst));
4222
4223         switch (hdr->type) {
4224         default:
4225                 break;
4226
4227         case LNET_MSG_PUT:
4228                 CWARN("    Ptl index %d, ack md %#llx.%#llx, "
4229                       "match bits %llu\n",
4230                       hdr->msg.put.ptl_index,
4231                       hdr->msg.put.ack_wmd.wh_interface_cookie,
4232                       hdr->msg.put.ack_wmd.wh_object_cookie,
4233                       hdr->msg.put.match_bits);
4234                 CWARN("    Length %d, offset %d, hdr data %#llx\n",
4235                       hdr->payload_length, hdr->msg.put.offset,
4236                       hdr->msg.put.hdr_data);
4237                 break;
4238
4239         case LNET_MSG_GET:
4240                 CWARN("    Ptl index %d, return md %#llx.%#llx, "
4241                       "match bits %llu\n", hdr->msg.get.ptl_index,
4242                       hdr->msg.get.return_wmd.wh_interface_cookie,
4243                       hdr->msg.get.return_wmd.wh_object_cookie,
4244                       hdr->msg.get.match_bits);
4245                 CWARN("    Length %d, src offset %d\n",
4246                       hdr->msg.get.sink_length,
4247                       hdr->msg.get.src_offset);
4248                 break;
4249
4250         case LNET_MSG_ACK:
4251                 CWARN("    dst md %#llx.%#llx, "
4252                       "manipulated length %d\n",
4253                       hdr->msg.ack.dst_wmd.wh_interface_cookie,
4254                       hdr->msg.ack.dst_wmd.wh_object_cookie,
4255                       hdr->msg.ack.mlength);
4256                 break;
4257
4258         case LNET_MSG_REPLY:
4259                 CWARN("    dst md %#llx.%#llx, "
4260                       "length %d\n",
4261                       hdr->msg.reply.dst_wmd.wh_interface_cookie,
4262                       hdr->msg.reply.dst_wmd.wh_object_cookie,
4263                       hdr->payload_length);
4264         }
4265
4266 }
4267
4268 int
4269 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
4270            void *private, int rdma_req)
4271 {
4272         struct lnet_peer_ni *lpni;
4273         struct lnet_msg *msg;
4274         __u32 payload_length;
4275         lnet_pid_t dest_pid;
4276         lnet_nid_t dest_nid;
4277         lnet_nid_t src_nid;
4278         bool push = false;
4279         int for_me;
4280         __u32 type;
4281         int rc = 0;
4282         int cpt;
4283
4284         LASSERT (!in_interrupt ());
4285
4286         type = le32_to_cpu(hdr->type);
4287         src_nid = le64_to_cpu(hdr->src_nid);
4288         dest_nid = le64_to_cpu(hdr->dest_nid);
4289         dest_pid = le32_to_cpu(hdr->dest_pid);
4290         payload_length = le32_to_cpu(hdr->payload_length);
4291
4292         for_me = (ni->ni_nid == dest_nid);
4293         cpt = lnet_cpt_of_nid(from_nid, ni);
4294
4295         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4296                 libcfs_nid2str(dest_nid),
4297                 libcfs_nid2str(ni->ni_nid),
4298                 libcfs_nid2str(src_nid),
4299                 lnet_msgtyp2str(type),
4300                 (for_me) ? "for me" : "routed");
4301
4302         switch (type) {
4303         case LNET_MSG_ACK:
4304         case LNET_MSG_GET:
4305                 if (payload_length > 0) {
4306                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4307                                libcfs_nid2str(from_nid),
4308                                libcfs_nid2str(src_nid),
4309                                lnet_msgtyp2str(type), payload_length);
4310                         return -EPROTO;
4311                 }
4312                 break;
4313
4314         case LNET_MSG_PUT:
4315         case LNET_MSG_REPLY:
4316                 if (payload_length >
4317                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4318                         CERROR("%s, src %s: bad %s payload %d "
4319                                "(%d max expected)\n",
4320                                libcfs_nid2str(from_nid),
4321                                libcfs_nid2str(src_nid),
4322                                lnet_msgtyp2str(type),
4323                                payload_length,
4324                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4325                         return -EPROTO;
4326                 }
4327                 break;
4328
4329         default:
4330                 CERROR("%s, src %s: Bad message type 0x%x\n",
4331                        libcfs_nid2str(from_nid),
4332                        libcfs_nid2str(src_nid), type);
4333                 return -EPROTO;
4334         }
4335
4336         if (the_lnet.ln_routing &&
4337             ni->ni_net->net_last_alive != ktime_get_real_seconds()) {
4338                 lnet_ni_lock(ni);
4339                 spin_lock(&ni->ni_net->net_lock);
4340                 ni->ni_net->net_last_alive = ktime_get_real_seconds();
4341                 spin_unlock(&ni->ni_net->net_lock);
4342                 if (ni->ni_status != NULL &&
4343                     ni->ni_status->ns_status == LNET_NI_STATUS_DOWN) {
4344                         ni->ni_status->ns_status = LNET_NI_STATUS_UP;
4345                         push = true;
4346                 }
4347                 lnet_ni_unlock(ni);
4348         }
4349
4350         if (push)
4351                 lnet_push_update_to_peers(1);
4352
4353         /* Regard a bad destination NID as a protocol error.  Senders should
4354          * know what they're doing; if they don't they're misconfigured, buggy
4355          * or malicious so we chop them off at the knees :) */
4356
4357         if (!for_me) {
4358                 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
4359                         /* should have gone direct */
4360                         CERROR("%s, src %s: Bad dest nid %s "
4361                                "(should have been sent direct)\n",
4362                                 libcfs_nid2str(from_nid),
4363                                 libcfs_nid2str(src_nid),
4364                                 libcfs_nid2str(dest_nid));
4365                         return -EPROTO;
4366                 }
4367
4368                 if (lnet_islocalnid(dest_nid)) {
4369                         /* dest is another local NI; sender should have used
4370                          * this node's NID on its own network */
4371                         CERROR("%s, src %s: Bad dest nid %s "
4372                                "(it's my nid but on a different network)\n",
4373                                 libcfs_nid2str(from_nid),
4374                                 libcfs_nid2str(src_nid),
4375                                 libcfs_nid2str(dest_nid));
4376                         return -EPROTO;
4377                 }
4378
4379                 if (rdma_req && type == LNET_MSG_GET) {
4380                         CERROR("%s, src %s: Bad optimized GET for %s "
4381                                "(final destination must be me)\n",
4382                                 libcfs_nid2str(from_nid),
4383                                 libcfs_nid2str(src_nid),
4384                                 libcfs_nid2str(dest_nid));
4385                         return -EPROTO;
4386                 }
4387
4388                 if (!the_lnet.ln_routing) {
4389                         CERROR("%s, src %s: Dropping message for %s "
4390                                "(routing not enabled)\n",
4391                                 libcfs_nid2str(from_nid),
4392                                 libcfs_nid2str(src_nid),
4393                                 libcfs_nid2str(dest_nid));
4394                         goto drop;
4395                 }
4396         }
4397
4398         /* Message looks OK; we're not going to return an error, so we MUST
4399          * call back lnd_recv() come what may... */
4400
4401         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4402             fail_peer(src_nid, 0)) {                    /* shall we now? */
4403                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4404                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4405                        lnet_msgtyp2str(type));
4406                 goto drop;
4407         }
4408
4409         if (!list_empty(&the_lnet.ln_drop_rules) &&
4410             lnet_drop_rule_match(hdr, ni->ni_nid, NULL)) {
4411                 CDEBUG(D_NET, "%s, src %s, dst %s: Dropping %s to simulate"
4412                               "silent message loss\n",
4413                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4414                        libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
4415                 goto drop;
4416         }
4417
4418         if (lnet_drop_asym_route && for_me &&
4419             LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
4420                 struct lnet_net *net;
4421                 struct lnet_remotenet *rnet;
4422                 bool found = true;
4423
4424                 /* we are dealing with a routed message,
4425                  * so see if route to reach src_nid goes through from_nid
4426                  */
4427                 lnet_net_lock(cpt);
4428                 net = lnet_get_net_locked(LNET_NIDNET(ni->ni_nid));
4429                 if (!net) {
4430                         lnet_net_unlock(cpt);
4431                         CERROR("net %s not found\n",
4432                                libcfs_net2str(LNET_NIDNET(ni->ni_nid)));
4433                         return -EPROTO;
4434                 }
4435
4436                 rnet = lnet_find_rnet_locked(LNET_NIDNET(src_nid));
4437                 if (rnet) {
4438                         struct lnet_peer *gw = NULL;
4439                         struct lnet_peer_ni *lpni = NULL;
4440                         struct lnet_route *route;
4441
4442                         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
4443                                 found = false;
4444                                 gw = route->lr_gateway;
4445                                 if (route->lr_lnet != net->net_id)
4446                                         continue;
4447                                 /*
4448                                  * if the nid is one of the gateway's NIDs
4449                                  * then this is a valid gateway
4450                                  */
4451                                 while ((lpni = lnet_get_next_peer_ni_locked(gw,
4452                                                 NULL, lpni)) != NULL) {
4453                                         if (lpni->lpni_nid == from_nid) {
4454                                                 found = true;
4455                                                 break;
4456                                         }
4457                                 }
4458                         }
4459                 }
4460                 lnet_net_unlock(cpt);
4461                 if (!found) {
4462                         /* we would not use from_nid to route a message to
4463                          * src_nid
4464                          * => asymmetric routing detected but forbidden
4465                          */
4466                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4467                                libcfs_nid2str(from_nid),
4468                                libcfs_nid2str(src_nid), lnet_msgtyp2str(type));
4469                         goto drop;
4470                 }
4471         }
4472
4473         msg = lnet_msg_alloc();
4474         if (msg == NULL) {
4475                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4476                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4477                        lnet_msgtyp2str(type));
4478                 goto drop;
4479         }
4480
4481         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4482          * pointers NULL etc */
4483
4484         msg->msg_type = type;
4485         msg->msg_private = private;
4486         msg->msg_receiving = 1;
4487         msg->msg_rdma_get = rdma_req;
4488         msg->msg_len = msg->msg_wanted = payload_length;
4489         msg->msg_offset = 0;
4490         msg->msg_hdr = *hdr;
4491         /* for building message event */
4492         msg->msg_from = from_nid;
4493         if (!for_me) {
4494                 msg->msg_target.pid     = dest_pid;
4495                 msg->msg_target.nid     = dest_nid;
4496                 msg->msg_routing        = 1;
4497
4498         } else {
4499                 /* convert common msg->hdr fields to host byteorder */
4500                 msg->msg_hdr.type       = type;
4501                 msg->msg_hdr.src_nid    = src_nid;
4502                 msg->msg_hdr.src_pid    = le32_to_cpu(msg->msg_hdr.src_pid);
4503                 msg->msg_hdr.dest_nid   = dest_nid;
4504                 msg->msg_hdr.dest_pid   = dest_pid;
4505                 msg->msg_hdr.payload_length = payload_length;
4506         }
4507
4508         lnet_net_lock(cpt);
4509         lpni = lnet_nid2peerni_locked(from_nid, ni->ni_nid, cpt);
4510         if (IS_ERR(lpni)) {
4511                 lnet_net_unlock(cpt);
4512                 CERROR("%s, src %s: Dropping %s "
4513                        "(error %ld looking up sender)\n",
4514                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4515                        lnet_msgtyp2str(type), PTR_ERR(lpni));
4516                 lnet_msg_free(msg);
4517                 if (rc == -ESHUTDOWN)
4518                         /* We are shutting down.  Don't do anything more */
4519                         return 0;
4520                 goto drop;
4521         }
4522
4523         if (the_lnet.ln_routing)
4524                 lpni->lpni_last_alive = ktime_get_seconds();
4525
4526         msg->msg_rxpeer = lpni;
4527         msg->msg_rxni = ni;
4528         lnet_ni_addref_locked(ni, cpt);
4529         /* Multi-Rail: Primary NID of source. */
4530         msg->msg_initiator = lnet_peer_primary_nid_locked(src_nid);
4531
4532         /*
4533          * mark the status of this lpni as UP since we received a message
4534          * from it. The ping response reports back the ns_status which is
4535          * marked on the remote as up or down and we cache it here.
4536          */
4537         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
4538
4539         lnet_msg_commit(msg, cpt);
4540
4541         /* message delay simulation */
4542         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
4543                      lnet_delay_rule_match_locked(hdr, msg))) {
4544                 lnet_net_unlock(cpt);
4545                 return 0;
4546         }
4547
4548         if (!for_me) {
4549                 rc = lnet_parse_forward_locked(ni, msg);
4550                 lnet_net_unlock(cpt);
4551
4552                 if (rc < 0)
4553                         goto free_drop;
4554
4555                 if (rc == LNET_CREDIT_OK) {
4556                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
4557                                      0, payload_length, payload_length);
4558                 }
4559                 return 0;
4560         }
4561
4562         lnet_net_unlock(cpt);
4563
4564         rc = lnet_parse_local(ni, msg);
4565         if (rc != 0)
4566                 goto free_drop;
4567         return 0;
4568
4569  free_drop:
4570         LASSERT(msg->msg_md == NULL);
4571         lnet_finalize(msg, rc);
4572
4573  drop:
4574         lnet_drop_message(ni, cpt, private, payload_length, type);
4575         return 0;
4576 }
4577 EXPORT_SYMBOL(lnet_parse);
4578
4579 void
4580 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
4581 {
4582         while (!list_empty(head)) {
4583                 struct lnet_process_id id = {0};
4584                 struct lnet_msg *msg;
4585
4586                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4587                 list_del(&msg->msg_list);
4588
4589                 id.nid = msg->msg_hdr.src_nid;
4590                 id.pid = msg->msg_hdr.src_pid;
4591
4592                 LASSERT(msg->msg_md == NULL);
4593                 LASSERT(msg->msg_rx_delayed);
4594                 LASSERT(msg->msg_rxpeer != NULL);
4595                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4596
4597                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
4598                       " offset %d length %d: %s\n",
4599                       libcfs_id2str(id),
4600                       msg->msg_hdr.msg.put.ptl_index,
4601                       msg->msg_hdr.msg.put.match_bits,
4602                       msg->msg_hdr.msg.put.offset,
4603                       msg->msg_hdr.payload_length, reason);
4604
4605                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
4606                  * called lnet_drop_message(), so I just hang onto msg as well
4607                  * until that's done */
4608
4609                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
4610                                   msg->msg_private, msg->msg_len,
4611                                   msg->msg_type);
4612
4613                 msg->msg_no_resend = true;
4614                 /*
4615                  * NB: message will not generate event because w/o attached MD,
4616                  * but we still should give error code so lnet_msg_decommit()
4617                  * can skip counters operations and other checks.
4618                  */
4619                 lnet_finalize(msg, -ENOENT);
4620         }
4621 }
4622
4623 void
4624 lnet_recv_delayed_msg_list(struct list_head *head)
4625 {
4626         while (!list_empty(head)) {
4627                 struct lnet_msg *msg;
4628                 struct lnet_process_id id;
4629
4630                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4631                 list_del(&msg->msg_list);
4632
4633                 /* md won't disappear under me, since each msg
4634                  * holds a ref on it */
4635
4636                 id.nid = msg->msg_hdr.src_nid;
4637                 id.pid = msg->msg_hdr.src_pid;
4638
4639                 LASSERT(msg->msg_rx_delayed);
4640                 LASSERT(msg->msg_md != NULL);
4641                 LASSERT(msg->msg_rxpeer != NULL);
4642                 LASSERT(msg->msg_rxni != NULL);
4643                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4644
4645                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
4646                        "match %llu offset %d length %d.\n",
4647                         libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
4648                         msg->msg_hdr.msg.put.match_bits,
4649                         msg->msg_hdr.msg.put.offset,
4650                         msg->msg_hdr.payload_length);
4651
4652                 lnet_recv_put(msg->msg_rxni, msg);
4653         }
4654 }
4655
4656 static void
4657 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
4658                         struct lnet_libmd *md, struct lnet_handle_md mdh)
4659 {
4660         s64 timeout_ns;
4661         bool new_entry = true;
4662         struct lnet_rsp_tracker *local_rspt;
4663
4664         /*
4665          * MD has a refcount taken by message so it's not going away.
4666          * The MD however can be looked up. We need to secure the access
4667          * to the md_rspt_ptr by taking the res_lock.
4668          * The rspt can be accessed without protection up to when it gets
4669          * added to the list.
4670          */
4671
4672         lnet_res_lock(cpt);
4673         local_rspt = md->md_rspt_ptr;
4674         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
4675         if (local_rspt != NULL) {
4676                 /*
4677                  * we already have an rspt attached to the md, so we'll
4678                  * update the deadline on that one.
4679                  */
4680                 LIBCFS_FREE(rspt, sizeof(*rspt));
4681                 new_entry = false;
4682         } else {
4683                 /* new md */
4684                 rspt->rspt_mdh = mdh;
4685                 rspt->rspt_cpt = cpt;
4686                 /* store the rspt so we can access it when we get the REPLY */
4687                 md->md_rspt_ptr = rspt;
4688                 local_rspt = rspt;
4689         }
4690         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
4691
4692         /*
4693          * add to the list of tracked responses. It's added to tail of the
4694          * list in order to expire all the older entries first.
4695          */
4696         lnet_net_lock(cpt);
4697         if (!new_entry && !list_empty(&local_rspt->rspt_on_list))
4698                 list_del_init(&local_rspt->rspt_on_list);
4699         list_add_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
4700         lnet_net_unlock(cpt);
4701         lnet_res_unlock(cpt);
4702 }
4703
4704 /**
4705  * Initiate an asynchronous PUT operation.
4706  *
4707  * There are several events associated with a PUT: completion of the send on
4708  * the initiator node (LNET_EVENT_SEND), and when the send completes
4709  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
4710  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
4711  * used at the target node to indicate the completion of incoming data
4712  * delivery.
4713  *
4714  * The local events will be logged in the EQ associated with the MD pointed to
4715  * by \a mdh handle. Using a MD without an associated EQ results in these
4716  * events being discarded. In this case, the caller must have another
4717  * mechanism (e.g., a higher level protocol) for determining when it is safe
4718  * to modify the memory region associated with the MD.
4719  *
4720  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
4721  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
4722  *
4723  * \param self Indicates the NID of a local interface through which to send
4724  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
4725  * \param mdh A handle for the MD that describes the memory to be sent. The MD
4726  * must be "free floating" (See LNetMDBind()).
4727  * \param ack Controls whether an acknowledgment is requested.
4728  * Acknowledgments are only sent when they are requested by the initiating
4729  * process and the target MD enables them.
4730  * \param target A process identifier for the target process.
4731  * \param portal The index in the \a target's portal table.
4732  * \param match_bits The match bits to use for MD selection at the target
4733  * process.
4734  * \param offset The offset into the target MD (only used when the target
4735  * MD has the LNET_MD_MANAGE_REMOTE option set).
4736  * \param hdr_data 64 bits of user data that can be included in the message
4737  * header. This data is written to an event queue entry at the target if an
4738  * EQ is present on the matching MD.
4739  *
4740  * \retval  0      Success, and only in this case events will be generated
4741  * and logged to EQ (if it exists).
4742  * \retval -EIO    Simulated failure.
4743  * \retval -ENOMEM Memory allocation failure.
4744  * \retval -ENOENT Invalid MD object.
4745  *
4746  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
4747  */
4748 int
4749 LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
4750         struct lnet_process_id target, unsigned int portal,
4751         __u64 match_bits, unsigned int offset,
4752         __u64 hdr_data)
4753 {
4754         struct lnet_msg *msg;
4755         struct lnet_libmd *md;
4756         int cpt;
4757         int rc;
4758         struct lnet_rsp_tracker *rspt = NULL;
4759
4760         LASSERT(the_lnet.ln_refcount > 0);
4761
4762         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4763             fail_peer(target.nid, 1)) {                 /* shall we now? */
4764                 CERROR("Dropping PUT to %s: simulated failure\n",
4765                        libcfs_id2str(target));
4766                 return -EIO;
4767         }
4768
4769         msg = lnet_msg_alloc();
4770         if (msg == NULL) {
4771                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
4772                        libcfs_id2str(target));
4773                 return -ENOMEM;
4774         }
4775         msg->msg_vmflush = !!memory_pressure_get();
4776
4777         cpt = lnet_cpt_of_cookie(mdh.cookie);
4778
4779         if (ack == LNET_ACK_REQ) {
4780                 rspt = lnet_rspt_alloc(cpt);
4781                 if (!rspt) {
4782                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
4783                                 libcfs_id2str(target));
4784                         return -ENOMEM;
4785                 }
4786                 INIT_LIST_HEAD(&rspt->rspt_on_list);
4787         }
4788
4789         lnet_res_lock(cpt);
4790
4791         md = lnet_handle2md(&mdh);
4792         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4793                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
4794                        match_bits, portal, libcfs_id2str(target),
4795                        md == NULL ? -1 : md->md_threshold);
4796                 if (md != NULL && md->md_me != NULL)
4797                         CERROR("Source MD also attached to portal %d\n",
4798                                md->md_me->me_portal);
4799                 lnet_res_unlock(cpt);
4800
4801                 LIBCFS_FREE(rspt, sizeof(*rspt));
4802                 lnet_msg_free(msg);
4803                 return -ENOENT;
4804         }
4805
4806         CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
4807
4808         lnet_msg_attach_md(msg, md, 0, 0);
4809
4810         lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
4811
4812         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
4813         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
4814         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
4815         msg->msg_hdr.msg.put.hdr_data = hdr_data;
4816
4817         /* NB handles only looked up by creator (no flips) */
4818         if (ack == LNET_ACK_REQ) {
4819                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
4820                         the_lnet.ln_interface_cookie;
4821                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
4822                         md->md_lh.lh_cookie;
4823         } else {
4824                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
4825                         LNET_WIRE_HANDLE_COOKIE_NONE;
4826                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
4827                         LNET_WIRE_HANDLE_COOKIE_NONE;
4828         }
4829
4830         lnet_res_unlock(cpt);
4831
4832         lnet_build_msg_event(msg, LNET_EVENT_SEND);
4833
4834         if (ack == LNET_ACK_REQ)
4835                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
4836
4837         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
4838                                  CFS_FAIL_ONCE))
4839                 rc = -EIO;
4840         else
4841                 rc = lnet_send(self, msg, LNET_NID_ANY);
4842
4843         if (rc != 0) {
4844                 CNETERR("Error sending PUT to %s: %d\n",
4845                         libcfs_id2str(target), rc);
4846                 msg->msg_no_resend = true;
4847                 lnet_finalize(msg, rc);
4848         }
4849
4850         /* completion will be signalled by an event */
4851         return 0;
4852 }
4853 EXPORT_SYMBOL(LNetPut);
4854
4855 /*
4856  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
4857  * returns a msg for the LND to pass to lnet_finalize() when the sink
4858  * data has been received.
4859  *
4860  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
4861  * lnet_finalize() is called on it, so the LND must call this first
4862  */
4863 struct lnet_msg *
4864 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
4865 {
4866         struct lnet_msg *msg = lnet_msg_alloc();
4867         struct lnet_libmd *getmd = getmsg->msg_md;
4868         struct lnet_process_id peer_id = getmsg->msg_target;
4869         int cpt;
4870
4871         LASSERT(!getmsg->msg_target_is_router);
4872         LASSERT(!getmsg->msg_routing);
4873
4874         if (msg == NULL) {
4875                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
4876                        libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
4877                 goto drop;
4878         }
4879
4880         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
4881         lnet_res_lock(cpt);
4882
4883         LASSERT(getmd->md_refcount > 0);
4884
4885         if (getmd->md_threshold == 0) {
4886                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
4887                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
4888                         getmd);
4889                 lnet_res_unlock(cpt);
4890                 goto drop;
4891         }
4892
4893         LASSERT(getmd->md_offset == 0);
4894
4895         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
4896                libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
4897
4898         /* setup information for lnet_build_msg_event */
4899         msg->msg_initiator = getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
4900         msg->msg_from = peer_id.nid;
4901         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
4902         msg->msg_hdr.src_nid = peer_id.nid;
4903         msg->msg_hdr.payload_length = getmd->md_length;
4904         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
4905
4906         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
4907         lnet_res_unlock(cpt);
4908
4909         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
4910
4911         lnet_net_lock(cpt);
4912         lnet_msg_commit(msg, cpt);
4913         lnet_net_unlock(cpt);
4914
4915         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4916
4917         return msg;
4918
4919  drop:
4920         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
4921
4922         lnet_net_lock(cpt);
4923         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
4924         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4925         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
4926                 getmd->md_length;
4927         lnet_net_unlock(cpt);
4928
4929         if (msg != NULL)
4930                 lnet_msg_free(msg);
4931
4932         return NULL;
4933 }
4934 EXPORT_SYMBOL(lnet_create_reply_msg);
4935
4936 void
4937 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
4938                        unsigned int len)
4939 {
4940         /* Set the REPLY length, now the RDMA that elides the REPLY message has
4941          * completed and I know it. */
4942         LASSERT(reply != NULL);
4943         LASSERT(reply->msg_type == LNET_MSG_GET);
4944         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
4945
4946         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
4947          * the end of my buffer, I might as well be dead. */
4948         LASSERT(len <= reply->msg_ev.mlength);
4949
4950         reply->msg_ev.mlength = len;
4951 }
4952 EXPORT_SYMBOL(lnet_set_reply_msg_len);
4953
4954 /**
4955  * Initiate an asynchronous GET operation.
4956  *
4957  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
4958  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
4959  * the target node in the REPLY has been written to local MD.
4960  *
4961  * On the target node, an LNET_EVENT_GET is logged when the GET request
4962  * arrives and is accepted into a MD.
4963  *
4964  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
4965  * \param mdh A handle for the MD that describes the memory into which the
4966  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
4967  *
4968  * \retval  0      Success, and only in this case events will be generated
4969  * and logged to EQ (if it exists) of the MD.
4970  * \retval -EIO    Simulated failure.
4971  * \retval -ENOMEM Memory allocation failure.
4972  * \retval -ENOENT Invalid MD object.
4973  */
4974 int
4975 LNetGet(lnet_nid_t self, struct lnet_handle_md mdh,
4976         struct lnet_process_id target, unsigned int portal,
4977         __u64 match_bits, unsigned int offset, bool recovery)
4978 {
4979         struct lnet_msg *msg;
4980         struct lnet_libmd *md;
4981         struct lnet_rsp_tracker *rspt;
4982         int cpt;
4983         int rc;
4984
4985         LASSERT(the_lnet.ln_refcount > 0);
4986
4987         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4988             fail_peer(target.nid, 1))                   /* shall we now? */
4989         {
4990                 CERROR("Dropping GET to %s: simulated failure\n",
4991                        libcfs_id2str(target));
4992                 return -EIO;
4993         }
4994
4995         msg = lnet_msg_alloc();
4996         if (!msg) {
4997                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
4998                        libcfs_id2str(target));
4999                 return -ENOMEM;
5000         }
5001
5002         cpt = lnet_cpt_of_cookie(mdh.cookie);
5003
5004         rspt = lnet_rspt_alloc(cpt);
5005         if (!rspt) {
5006                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
5007                        libcfs_id2str(target));
5008                 return -ENOMEM;
5009         }
5010         INIT_LIST_HEAD(&rspt->rspt_on_list);
5011
5012         msg->msg_recovery = recovery;
5013
5014         lnet_res_lock(cpt);
5015
5016         md = lnet_handle2md(&mdh);
5017         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5018                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
5019                        match_bits, portal, libcfs_id2str(target),
5020                        md == NULL ? -1 : md->md_threshold);
5021                 if (md != NULL && md->md_me != NULL)
5022                         CERROR("REPLY MD also attached to portal %d\n",
5023                                md->md_me->me_portal);
5024
5025                 lnet_res_unlock(cpt);
5026
5027                 lnet_msg_free(msg);
5028                 LIBCFS_FREE(rspt, sizeof(*rspt));
5029                 return -ENOENT;
5030         }
5031
5032         CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
5033
5034         lnet_msg_attach_md(msg, md, 0, 0);
5035
5036         lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
5037
5038         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5039         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5040         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5041         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5042
5043         /* NB handles only looked up by creator (no flips) */
5044         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5045                 the_lnet.ln_interface_cookie;
5046         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5047                 md->md_lh.lh_cookie;
5048
5049         lnet_res_unlock(cpt);
5050
5051         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5052
5053         lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5054
5055         rc = lnet_send(self, msg, LNET_NID_ANY);
5056         if (rc < 0) {
5057                 CNETERR("Error sending GET to %s: %d\n",
5058                         libcfs_id2str(target), rc);
5059                 msg->msg_no_resend = true;
5060                 lnet_finalize(msg, rc);
5061         }
5062
5063         /* completion will be signalled by an event */
5064         return 0;
5065 }
5066 EXPORT_SYMBOL(LNetGet);
5067
5068 /**
5069  * Calculate distance to node at \a dstnid.
5070  *
5071  * \param dstnid Target NID.
5072  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5073  * is saved here.
5074  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5075  * here.
5076  *
5077  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5078  * local_nid_dist_zero is set, which is the default.
5079  * \retval positives Distance to target NID, i.e. number of hops plus one.
5080  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5081  */
5082 int
5083 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
5084 {
5085         struct list_head        *e;
5086         struct lnet_ni *ni = NULL;
5087         struct lnet_remotenet *rnet;
5088         __u32                   dstnet = LNET_NIDNET(dstnid);
5089         int                     hops;
5090         int                     cpt;
5091         __u32                   order = 2;
5092         struct list_head        *rn_list;
5093
5094         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5095          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5096          * keep order 0 free for 0@lo and order 1 free for a local NID
5097          * match */
5098
5099         LASSERT(the_lnet.ln_refcount > 0);
5100
5101         cpt = lnet_net_lock_current();
5102
5103         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5104                 if (ni->ni_nid == dstnid) {
5105                         if (srcnidp != NULL)
5106                                 *srcnidp = dstnid;
5107                         if (orderp != NULL) {
5108                                 if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
5109                                         *orderp = 0;
5110                                 else
5111                                         *orderp = 1;
5112                         }
5113                         lnet_net_unlock(cpt);
5114
5115                         return local_nid_dist_zero ? 0 : 1;
5116                 }
5117
5118                 if (LNET_NIDNET(ni->ni_nid) == dstnet) {
5119                         /* Check if ni was originally created in
5120                          * current net namespace.
5121                          * If not, assign order above 0xffff0000,
5122                          * to make this ni not a priority. */
5123                         if (current->nsproxy &&
5124                             !net_eq(ni->ni_net_ns, current->nsproxy->net_ns))
5125                                         order += 0xffff0000;
5126                         if (srcnidp != NULL)
5127                                 *srcnidp = ni->ni_nid;
5128                         if (orderp != NULL)
5129                                 *orderp = order;
5130                         lnet_net_unlock(cpt);
5131                         return 1;
5132                 }
5133
5134                 order++;
5135         }
5136
5137         rn_list = lnet_net2rnethash(dstnet);
5138         list_for_each(e, rn_list) {
5139                 rnet = list_entry(e, struct lnet_remotenet, lrn_list);
5140
5141                 if (rnet->lrn_net == dstnet) {
5142                         struct lnet_route *route;
5143                         struct lnet_route *shortest = NULL;
5144                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5145                         __u32 route_hops;
5146
5147                         LASSERT(!list_empty(&rnet->lrn_routes));
5148
5149                         list_for_each_entry(route, &rnet->lrn_routes,
5150                                             lr_list) {
5151                                 route_hops = route->lr_hops;
5152                                 if (route_hops == LNET_UNDEFINED_HOPS)
5153                                         route_hops = 1;
5154                                 if (shortest == NULL ||
5155                                     route_hops < shortest_hops) {
5156                                         shortest = route;
5157                                         shortest_hops = route_hops;
5158                                 }
5159                         }
5160
5161                         LASSERT(shortest != NULL);
5162                         hops = shortest_hops;
5163                         if (srcnidp != NULL) {
5164                                 struct lnet_net *net;
5165                                 net = lnet_get_net_locked(shortest->lr_lnet);
5166                                 LASSERT(net);
5167                                 ni = lnet_get_next_ni_locked(net, NULL);
5168                                 *srcnidp = ni->ni_nid;
5169                         }
5170                         if (orderp != NULL)
5171                                 *orderp = order;
5172                         lnet_net_unlock(cpt);
5173                         return hops + 1;
5174                 }
5175                 order++;
5176         }
5177
5178         lnet_net_unlock(cpt);
5179         return -EHOSTUNREACH;
5180 }
5181 EXPORT_SYMBOL(LNetDist);