Whamcloud - gitweb
LU-13929 lnet: modify assertion in lnet_post_send_locked
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/lnet/lib-move.c
33  *
34  * Data movement routines
35  */
36
37 #define DEBUG_SUBSYSTEM S_LNET
38
39 #include <linux/pagemap.h>
40
41 #include <lnet/lib-lnet.h>
42 #include <linux/nsproxy.h>
43 #include <net/net_namespace.h>
44
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48
49 struct lnet_send_data {
50         struct lnet_ni *sd_best_ni;
51         struct lnet_peer_ni *sd_best_lpni;
52         struct lnet_peer_ni *sd_final_dst_lpni;
53         struct lnet_peer *sd_peer;
54         struct lnet_peer *sd_gw_peer;
55         struct lnet_peer_ni *sd_gw_lpni;
56         struct lnet_peer_net *sd_peer_net;
57         struct lnet_msg *sd_msg;
58         lnet_nid_t sd_dst_nid;
59         lnet_nid_t sd_src_nid;
60         lnet_nid_t sd_rtr_nid;
61         int sd_cpt;
62         int sd_md_cpt;
63         __u32 sd_send_case;
64 };
65
66 static inline bool
67 lnet_msg_is_response(struct lnet_msg *msg)
68 {
69         return msg->msg_type == LNET_MSG_ACK || msg->msg_type == LNET_MSG_REPLY;
70 }
71
72 static inline bool
73 lnet_response_tracking_enabled(__u32 msg_type, unsigned int md_options)
74 {
75         if (md_options & LNET_MD_NO_TRACK_RESPONSE)
76                 /* Explicitly disabled in MD options */
77                 return false;
78
79         if (md_options & LNET_MD_TRACK_RESPONSE)
80                 /* Explicity enabled in MD options */
81                 return true;
82
83         if (lnet_response_tracking == 3)
84                 /* Enabled for all message types */
85                 return true;
86
87         if (msg_type == LNET_MSG_PUT)
88                 return lnet_response_tracking == 2;
89
90         if (msg_type == LNET_MSG_GET)
91                 return lnet_response_tracking == 1;
92
93         return false;
94 }
95
96 static inline struct lnet_comm_count *
97 get_stats_counts(struct lnet_element_stats *stats,
98                  enum lnet_stats_type stats_type)
99 {
100         switch (stats_type) {
101         case LNET_STATS_TYPE_SEND:
102                 return &stats->el_send_stats;
103         case LNET_STATS_TYPE_RECV:
104                 return &stats->el_recv_stats;
105         case LNET_STATS_TYPE_DROP:
106                 return &stats->el_drop_stats;
107         default:
108                 CERROR("Unknown stats type\n");
109         }
110
111         return NULL;
112 }
113
114 void lnet_incr_stats(struct lnet_element_stats *stats,
115                      enum lnet_msg_type msg_type,
116                      enum lnet_stats_type stats_type)
117 {
118         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
119         if (!counts)
120                 return;
121
122         switch (msg_type) {
123         case LNET_MSG_ACK:
124                 atomic_inc(&counts->co_ack_count);
125                 break;
126         case LNET_MSG_PUT:
127                 atomic_inc(&counts->co_put_count);
128                 break;
129         case LNET_MSG_GET:
130                 atomic_inc(&counts->co_get_count);
131                 break;
132         case LNET_MSG_REPLY:
133                 atomic_inc(&counts->co_reply_count);
134                 break;
135         case LNET_MSG_HELLO:
136                 atomic_inc(&counts->co_hello_count);
137                 break;
138         default:
139                 CERROR("There is a BUG in the code. Unknown message type\n");
140                 break;
141         }
142 }
143
144 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
145                      enum lnet_stats_type stats_type)
146 {
147         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
148         if (!counts)
149                 return 0;
150
151         return (atomic_read(&counts->co_ack_count) +
152                 atomic_read(&counts->co_put_count) +
153                 atomic_read(&counts->co_get_count) +
154                 atomic_read(&counts->co_reply_count) +
155                 atomic_read(&counts->co_hello_count));
156 }
157
158 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
159                                 struct lnet_comm_count *counts)
160 {
161         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
162         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
163         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
164         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
165         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
166 }
167
168 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
169                               struct lnet_element_stats *stats)
170 {
171         struct lnet_comm_count *counts;
172
173         LASSERT(msg_stats);
174         LASSERT(stats);
175
176         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
177         if (!counts)
178                 return;
179         assign_stats(&msg_stats->im_send_stats, counts);
180
181         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
182         if (!counts)
183                 return;
184         assign_stats(&msg_stats->im_recv_stats, counts);
185
186         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
187         if (!counts)
188                 return;
189         assign_stats(&msg_stats->im_drop_stats, counts);
190 }
191
192 int
193 lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
194 {
195         struct lnet_test_peer *tp;
196         struct list_head *el;
197         struct list_head *next;
198         LIST_HEAD(cull);
199
200         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
201         if (threshold != 0) {
202                 /* Adding a new entry */
203                 LIBCFS_ALLOC(tp, sizeof(*tp));
204                 if (tp == NULL)
205                         return -ENOMEM;
206
207                 tp->tp_nid = nid;
208                 tp->tp_threshold = threshold;
209
210                 lnet_net_lock(0);
211                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
212                 lnet_net_unlock(0);
213                 return 0;
214         }
215
216         lnet_net_lock(0);
217
218         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
219                 tp = list_entry(el, struct lnet_test_peer, tp_list);
220
221                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
222                     nid == LNET_NID_ANY ||      /* removing all entries */
223                     tp->tp_nid == nid) {        /* matched this one */
224                         list_move(&tp->tp_list, &cull);
225                 }
226         }
227
228         lnet_net_unlock(0);
229
230         while (!list_empty(&cull)) {
231                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
232
233                 list_del(&tp->tp_list);
234                 LIBCFS_FREE(tp, sizeof(*tp));
235         }
236         return 0;
237 }
238
239 static int
240 fail_peer (lnet_nid_t nid, int outgoing)
241 {
242         struct lnet_test_peer *tp;
243         struct list_head *el;
244         struct list_head *next;
245         LIST_HEAD(cull);
246         int fail = 0;
247
248         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
249         lnet_net_lock(0);
250
251         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
252                 tp = list_entry(el, struct lnet_test_peer, tp_list);
253
254                 if (tp->tp_threshold == 0) {
255                         /* zombie entry */
256                         if (outgoing) {
257                                 /* only cull zombies on outgoing tests,
258                                  * since we may be at interrupt priority on
259                                  * incoming messages. */
260                                 list_move(&tp->tp_list, &cull);
261                         }
262                         continue;
263                 }
264
265                 if (tp->tp_nid == LNET_NID_ANY ||       /* fail every peer */
266                     nid == tp->tp_nid) {                /* fail this peer */
267                         fail = 1;
268
269                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
270                                 tp->tp_threshold--;
271                                 if (outgoing &&
272                                     tp->tp_threshold == 0) {
273                                         /* see above */
274                                         list_move(&tp->tp_list, &cull);
275                                 }
276                         }
277                         break;
278                 }
279         }
280
281         lnet_net_unlock(0);
282
283         while (!list_empty(&cull)) {
284                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
285                 list_del(&tp->tp_list);
286
287                 LIBCFS_FREE(tp, sizeof(*tp));
288         }
289
290         return fail;
291 }
292
293 unsigned int
294 lnet_iov_nob(unsigned int niov, struct kvec *iov)
295 {
296         unsigned int nob = 0;
297
298         LASSERT(niov == 0 || iov != NULL);
299         while (niov-- > 0)
300                 nob += (iov++)->iov_len;
301
302         return (nob);
303 }
304 EXPORT_SYMBOL(lnet_iov_nob);
305
306 void
307 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
308                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
309                   unsigned int nob)
310 {
311         /* NB diov, siov are READ-ONLY */
312         unsigned int this_nob;
313
314         if (nob == 0)
315                 return;
316
317         /* skip complete frags before 'doffset' */
318         LASSERT(ndiov > 0);
319         while (doffset >= diov->iov_len) {
320                 doffset -= diov->iov_len;
321                 diov++;
322                 ndiov--;
323                 LASSERT(ndiov > 0);
324         }
325
326         /* skip complete frags before 'soffset' */
327         LASSERT(nsiov > 0);
328         while (soffset >= siov->iov_len) {
329                 soffset -= siov->iov_len;
330                 siov++;
331                 nsiov--;
332                 LASSERT(nsiov > 0);
333         }
334
335         do {
336                 LASSERT(ndiov > 0);
337                 LASSERT(nsiov > 0);
338                 this_nob = min3((unsigned int)diov->iov_len - doffset,
339                                 (unsigned int)siov->iov_len - soffset,
340                                 nob);
341
342                 memcpy((char *)diov->iov_base + doffset,
343                        (char *)siov->iov_base + soffset, this_nob);
344                 nob -= this_nob;
345
346                 if (diov->iov_len > doffset + this_nob) {
347                         doffset += this_nob;
348                 } else {
349                         diov++;
350                         ndiov--;
351                         doffset = 0;
352                 }
353
354                 if (siov->iov_len > soffset + this_nob) {
355                         soffset += this_nob;
356                 } else {
357                         siov++;
358                         nsiov--;
359                         soffset = 0;
360                 }
361         } while (nob > 0);
362 }
363 EXPORT_SYMBOL(lnet_copy_iov2iov);
364
365 unsigned int
366 lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
367 {
368         unsigned int  nob = 0;
369
370         LASSERT(niov == 0 || kiov != NULL);
371         while (niov-- > 0)
372                 nob += (kiov++)->bv_len;
373
374         return (nob);
375 }
376 EXPORT_SYMBOL(lnet_kiov_nob);
377
378 void
379 lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov,
380                     unsigned int doffset,
381                     unsigned int nsiov, struct bio_vec *siov,
382                     unsigned int soffset,
383                     unsigned int nob)
384 {
385         /* NB diov, siov are READ-ONLY */
386         unsigned int    this_nob;
387         char           *daddr = NULL;
388         char           *saddr = NULL;
389
390         if (nob == 0)
391                 return;
392
393         LASSERT (!in_interrupt ());
394
395         LASSERT (ndiov > 0);
396         while (doffset >= diov->bv_len) {
397                 doffset -= diov->bv_len;
398                 diov++;
399                 ndiov--;
400                 LASSERT(ndiov > 0);
401         }
402
403         LASSERT(nsiov > 0);
404         while (soffset >= siov->bv_len) {
405                 soffset -= siov->bv_len;
406                 siov++;
407                 nsiov--;
408                 LASSERT(nsiov > 0);
409         }
410
411         do {
412                 LASSERT(ndiov > 0);
413                 LASSERT(nsiov > 0);
414                 this_nob = min3(diov->bv_len - doffset,
415                                 siov->bv_len - soffset,
416                                 nob);
417
418                 if (daddr == NULL)
419                         daddr = ((char *)kmap(diov->bv_page)) +
420                                 diov->bv_offset + doffset;
421                 if (saddr == NULL)
422                         saddr = ((char *)kmap(siov->bv_page)) +
423                                 siov->bv_offset + soffset;
424
425                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
426                  * However in practice at least one of the kiovs will be mapped
427                  * kernel pages and the map/unmap will be NOOPs */
428
429                 memcpy (daddr, saddr, this_nob);
430                 nob -= this_nob;
431
432                 if (diov->bv_len > doffset + this_nob) {
433                         daddr += this_nob;
434                         doffset += this_nob;
435                 } else {
436                         kunmap(diov->bv_page);
437                         daddr = NULL;
438                         diov++;
439                         ndiov--;
440                         doffset = 0;
441                 }
442
443                 if (siov->bv_len > soffset + this_nob) {
444                         saddr += this_nob;
445                         soffset += this_nob;
446                 } else {
447                         kunmap(siov->bv_page);
448                         saddr = NULL;
449                         siov++;
450                         nsiov--;
451                         soffset = 0;
452                 }
453         } while (nob > 0);
454
455         if (daddr != NULL)
456                 kunmap(diov->bv_page);
457         if (saddr != NULL)
458                 kunmap(siov->bv_page);
459 }
460 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
461
462 void
463 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
464                     unsigned int nkiov, struct bio_vec *kiov,
465                     unsigned int kiovoffset,
466                     unsigned int nob)
467 {
468         /* NB iov, kiov are READ-ONLY */
469         unsigned int    this_nob;
470         char           *addr = NULL;
471
472         if (nob == 0)
473                 return;
474
475         LASSERT (!in_interrupt ());
476
477         LASSERT (niov > 0);
478         while (iovoffset >= iov->iov_len) {
479                 iovoffset -= iov->iov_len;
480                 iov++;
481                 niov--;
482                 LASSERT(niov > 0);
483         }
484
485         LASSERT(nkiov > 0);
486         while (kiovoffset >= kiov->bv_len) {
487                 kiovoffset -= kiov->bv_len;
488                 kiov++;
489                 nkiov--;
490                 LASSERT(nkiov > 0);
491         }
492
493         do {
494                 LASSERT(niov > 0);
495                 LASSERT(nkiov > 0);
496                 this_nob = min3((unsigned int)iov->iov_len - iovoffset,
497                                 (unsigned int)kiov->bv_len - kiovoffset,
498                                 nob);
499
500                 if (addr == NULL)
501                         addr = ((char *)kmap(kiov->bv_page)) +
502                                 kiov->bv_offset + kiovoffset;
503
504                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
505                 nob -= this_nob;
506
507                 if (iov->iov_len > iovoffset + this_nob) {
508                         iovoffset += this_nob;
509                 } else {
510                         iov++;
511                         niov--;
512                         iovoffset = 0;
513                 }
514
515                 if (kiov->bv_len > kiovoffset + this_nob) {
516                         addr += this_nob;
517                         kiovoffset += this_nob;
518                 } else {
519                         kunmap(kiov->bv_page);
520                         addr = NULL;
521                         kiov++;
522                         nkiov--;
523                         kiovoffset = 0;
524                 }
525
526         } while (nob > 0);
527
528         if (addr != NULL)
529                 kunmap(kiov->bv_page);
530 }
531 EXPORT_SYMBOL(lnet_copy_kiov2iov);
532
533 void
534 lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
535                    unsigned int kiovoffset,
536                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
537                    unsigned int nob)
538 {
539         /* NB kiov, iov are READ-ONLY */
540         unsigned int    this_nob;
541         char           *addr = NULL;
542
543         if (nob == 0)
544                 return;
545
546         LASSERT (!in_interrupt ());
547
548         LASSERT (nkiov > 0);
549         while (kiovoffset >= kiov->bv_len) {
550                 kiovoffset -= kiov->bv_len;
551                 kiov++;
552                 nkiov--;
553                 LASSERT(nkiov > 0);
554         }
555
556         LASSERT(niov > 0);
557         while (iovoffset >= iov->iov_len) {
558                 iovoffset -= iov->iov_len;
559                 iov++;
560                 niov--;
561                 LASSERT(niov > 0);
562         }
563
564         do {
565                 LASSERT(nkiov > 0);
566                 LASSERT(niov > 0);
567                 this_nob = min3((unsigned int)kiov->bv_len - kiovoffset,
568                                 (unsigned int)iov->iov_len - iovoffset,
569                                 nob);
570
571                 if (addr == NULL)
572                         addr = ((char *)kmap(kiov->bv_page)) +
573                                 kiov->bv_offset + kiovoffset;
574
575                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
576                 nob -= this_nob;
577
578                 if (kiov->bv_len > kiovoffset + this_nob) {
579                         addr += this_nob;
580                         kiovoffset += this_nob;
581                 } else {
582                         kunmap(kiov->bv_page);
583                         addr = NULL;
584                         kiov++;
585                         nkiov--;
586                         kiovoffset = 0;
587                 }
588
589                 if (iov->iov_len > iovoffset + this_nob) {
590                         iovoffset += this_nob;
591                 } else {
592                         iov++;
593                         niov--;
594                         iovoffset = 0;
595                 }
596         } while (nob > 0);
597
598         if (addr != NULL)
599                 kunmap(kiov->bv_page);
600 }
601 EXPORT_SYMBOL(lnet_copy_iov2kiov);
602
603 int
604 lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
605                   int src_niov, struct bio_vec *src,
606                   unsigned int offset, unsigned int len)
607 {
608         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
609          * for exactly 'len' bytes, and return the number of entries.
610          * NB not destructive to 'src' */
611         unsigned int    frag_len;
612         unsigned int    niov;
613
614         if (len == 0)                           /* no data => */
615                 return (0);                     /* no frags */
616
617         LASSERT(src_niov > 0);
618         while (offset >= src->bv_len) {      /* skip initial frags */
619                 offset -= src->bv_len;
620                 src_niov--;
621                 src++;
622                 LASSERT(src_niov > 0);
623         }
624
625         niov = 1;
626         for (;;) {
627                 LASSERT(src_niov > 0);
628                 LASSERT((int)niov <= dst_niov);
629
630                 frag_len = src->bv_len - offset;
631                 dst->bv_page = src->bv_page;
632                 dst->bv_offset = src->bv_offset + offset;
633
634                 if (len <= frag_len) {
635                         dst->bv_len = len;
636                         LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
637                         return niov;
638                 }
639
640                 dst->bv_len = frag_len;
641                 LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
642
643                 len -= frag_len;
644                 dst++;
645                 src++;
646                 niov++;
647                 src_niov--;
648                 offset = 0;
649         }
650 }
651 EXPORT_SYMBOL(lnet_extract_kiov);
652
653 void
654 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
655              int delayed, unsigned int offset, unsigned int mlen,
656              unsigned int rlen)
657 {
658         unsigned int niov = 0;
659         struct kvec *iov = NULL;
660         struct bio_vec  *kiov = NULL;
661         int rc;
662
663         LASSERT (!in_interrupt ());
664         LASSERT (mlen == 0 || msg != NULL);
665
666         if (msg != NULL) {
667                 LASSERT(msg->msg_receiving);
668                 LASSERT(!msg->msg_sending);
669                 LASSERT(rlen == msg->msg_len);
670                 LASSERT(mlen <= msg->msg_len);
671                 LASSERT(msg->msg_offset == offset);
672                 LASSERT(msg->msg_wanted == mlen);
673
674                 msg->msg_receiving = 0;
675
676                 if (mlen != 0) {
677                         niov = msg->msg_niov;
678                         kiov = msg->msg_kiov;
679
680                         LASSERT (niov > 0);
681                         LASSERT ((iov == NULL) != (kiov == NULL));
682                 }
683         }
684
685         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
686                                              niov, kiov, offset, mlen,
687                                              rlen);
688         if (rc < 0)
689                 lnet_finalize(msg, rc);
690 }
691
692 static void
693 lnet_setpayloadbuffer(struct lnet_msg *msg)
694 {
695         struct lnet_libmd *md = msg->msg_md;
696
697         LASSERT(msg->msg_len > 0);
698         LASSERT(!msg->msg_routing);
699         LASSERT(md != NULL);
700         LASSERT(msg->msg_niov == 0);
701         LASSERT(msg->msg_kiov == NULL);
702
703         msg->msg_niov = md->md_niov;
704         msg->msg_kiov = md->md_kiov;
705 }
706
707 void
708 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target,
709                unsigned int offset, unsigned int len)
710 {
711         msg->msg_type = type;
712         msg->msg_target = target;
713         msg->msg_len = len;
714         msg->msg_offset = offset;
715
716         if (len != 0)
717                 lnet_setpayloadbuffer(msg);
718
719         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
720         msg->msg_hdr.type           = cpu_to_le32(type);
721         /* dest_nid will be overwritten by lnet_select_pathway() */
722         msg->msg_hdr.dest_nid       = cpu_to_le64(target.nid);
723         msg->msg_hdr.dest_pid       = cpu_to_le32(target.pid);
724         /* src_nid will be set later */
725         msg->msg_hdr.src_pid        = cpu_to_le32(the_lnet.ln_pid);
726         msg->msg_hdr.payload_length = cpu_to_le32(len);
727 }
728
729 static void
730 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
731 {
732         void *priv = msg->msg_private;
733         int rc;
734
735         LASSERT(!in_interrupt());
736         LASSERT(ni->ni_nid == LNET_NID_LO_0 ||
737                 (msg->msg_txcredit && msg->msg_peertxcredit));
738
739         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
740         if (rc < 0) {
741                 msg->msg_no_resend = true;
742                 lnet_finalize(msg, rc);
743         }
744 }
745
746 static int
747 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
748 {
749         int     rc;
750
751         LASSERT(!msg->msg_sending);
752         LASSERT(msg->msg_receiving);
753         LASSERT(!msg->msg_rx_ready_delay);
754         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
755
756         msg->msg_rx_ready_delay = 1;
757         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
758                                                   &msg->msg_private);
759         if (rc != 0) {
760                 CERROR("recv from %s / send to %s aborted: "
761                        "eager_recv failed %d\n",
762                        libcfs_nid2str(msg->msg_rxpeer->lpni_nid),
763                        libcfs_id2str(msg->msg_target), rc);
764                 LASSERT(rc < 0); /* required by my callers */
765         }
766
767         return rc;
768 }
769
770 static bool
771 lnet_is_peer_deadline_passed(struct lnet_peer_ni *lpni, time64_t now)
772 {
773         time64_t deadline;
774
775         deadline = lpni->lpni_last_alive +
776                    lpni->lpni_net->net_tunables.lct_peer_timeout;
777
778         /*
779          * assume peer_ni is alive as long as we're within the configured
780          * peer timeout
781          */
782         if (deadline > now)
783                 return false;
784
785         return true;
786 }
787
788 /* NB: returns 1 when alive, 0 when dead, negative when error;
789  *     may drop the lnet_net_lock */
790 static int
791 lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
792                        struct lnet_msg *msg)
793 {
794         time64_t now = ktime_get_seconds();
795
796         if (!lnet_peer_aliveness_enabled(lpni))
797                 return -ENODEV;
798
799         /*
800          * If we're resending a message, let's attempt to send it even if
801          * the peer is down to fulfill our resend quota on the message
802          */
803         if (msg->msg_retry_count > 0)
804                 return 1;
805
806         /* try and send recovery messages irregardless */
807         if (msg->msg_recovery)
808                 return 1;
809
810         /* always send any responses */
811         if (lnet_msg_is_response(msg))
812                 return 1;
813
814         if (!lnet_is_peer_deadline_passed(lpni, now))
815                 return true;
816
817         return lnet_is_peer_ni_alive(lpni);
818 }
819
820 /**
821  * \param msg The message to be sent.
822  * \param do_send True if lnet_ni_send() should be called in this function.
823  *        lnet_send() is going to lnet_net_unlock immediately after this, so
824  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
825  *
826  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
827  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
828  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
829  * \retval -ECANCELED If the MD of the message has been unlinked.
830  */
831 static int
832 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
833 {
834         struct lnet_peer_ni     *lp = msg->msg_txpeer;
835         struct lnet_ni          *ni = msg->msg_txni;
836         int                     cpt = msg->msg_tx_cpt;
837         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
838
839         /* non-lnet_send() callers have checked before */
840         LASSERT(!do_send || msg->msg_tx_delayed);
841         LASSERT(!msg->msg_receiving);
842         LASSERT(msg->msg_tx_committed);
843
844         /* can't get here if we're sending to the loopback interface */
845         if (the_lnet.ln_loni)
846                 LASSERT(lp->lpni_nid != the_lnet.ln_loni->ni_nid);
847
848         /* NB 'lp' is always the next hop */
849         if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
850             lnet_peer_alive_locked(ni, lp, msg) == 0) {
851                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
852                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
853                         msg->msg_len;
854                 lnet_net_unlock(cpt);
855                 if (msg->msg_txpeer)
856                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
857                                         msg->msg_type,
858                                         LNET_STATS_TYPE_DROP);
859                 if (msg->msg_txni)
860                         lnet_incr_stats(&msg->msg_txni->ni_stats,
861                                         msg->msg_type,
862                                         LNET_STATS_TYPE_DROP);
863
864                 CNETERR("Dropping message for %s: peer not alive\n",
865                         libcfs_id2str(msg->msg_target));
866                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
867                 if (do_send)
868                         lnet_finalize(msg, -EHOSTUNREACH);
869
870                 lnet_net_lock(cpt);
871                 return -EHOSTUNREACH;
872         }
873
874         if (msg->msg_md != NULL &&
875             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
876                 lnet_net_unlock(cpt);
877
878                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
879                         "called on the MD/ME.\n",
880                         libcfs_id2str(msg->msg_target));
881                 if (do_send) {
882                         msg->msg_no_resend = true;
883                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
884                                msg, libcfs_id2str(msg->msg_target));
885                         lnet_finalize(msg, -ECANCELED);
886                 }
887
888                 lnet_net_lock(cpt);
889                 return -ECANCELED;
890         }
891
892         if (!msg->msg_peertxcredit) {
893                 spin_lock(&lp->lpni_lock);
894                 LASSERT((lp->lpni_txcredits < 0) ==
895                         !list_empty(&lp->lpni_txq));
896
897                 msg->msg_peertxcredit = 1;
898                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr);
899                 lp->lpni_txcredits--;
900
901                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
902                         lp->lpni_mintxcredits = lp->lpni_txcredits;
903
904                 if (lp->lpni_txcredits < 0) {
905                         msg->msg_tx_delayed = 1;
906                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
907                         spin_unlock(&lp->lpni_lock);
908                         return LNET_CREDIT_WAIT;
909                 }
910                 spin_unlock(&lp->lpni_lock);
911         }
912
913         if (!msg->msg_txcredit) {
914                 LASSERT((tq->tq_credits < 0) ==
915                         !list_empty(&tq->tq_delayed));
916
917                 msg->msg_txcredit = 1;
918                 tq->tq_credits--;
919                 atomic_dec(&ni->ni_tx_credits);
920
921                 if (tq->tq_credits < tq->tq_credits_min)
922                         tq->tq_credits_min = tq->tq_credits;
923
924                 if (tq->tq_credits < 0) {
925                         msg->msg_tx_delayed = 1;
926                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
927                         return LNET_CREDIT_WAIT;
928                 }
929         }
930
931         /* unset the tx_delay flag as we're going to send it now */
932         msg->msg_tx_delayed = 0;
933
934         if (do_send) {
935                 lnet_net_unlock(cpt);
936                 lnet_ni_send(ni, msg);
937                 lnet_net_lock(cpt);
938         }
939         return LNET_CREDIT_OK;
940 }
941
942
943 static struct lnet_rtrbufpool *
944 lnet_msg2bufpool(struct lnet_msg *msg)
945 {
946         struct lnet_rtrbufpool  *rbp;
947         int                     cpt;
948
949         LASSERT(msg->msg_rx_committed);
950
951         cpt = msg->msg_rx_cpt;
952         rbp = &the_lnet.ln_rtrpools[cpt][0];
953
954         LASSERT(msg->msg_len <= LNET_MTU);
955         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
956                 rbp++;
957                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
958         }
959
960         return rbp;
961 }
962
963 static int
964 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
965 {
966         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
967          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
968          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
969          * received or OK to receive */
970         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
971         struct lnet_peer *lp;
972         struct lnet_rtrbufpool *rbp;
973         struct lnet_rtrbuf *rb;
974
975         LASSERT(msg->msg_kiov == NULL);
976         LASSERT(msg->msg_niov == 0);
977         LASSERT(msg->msg_routing);
978         LASSERT(msg->msg_receiving);
979         LASSERT(!msg->msg_sending);
980         LASSERT(lpni->lpni_peer_net);
981         LASSERT(lpni->lpni_peer_net->lpn_peer);
982
983         lp = lpni->lpni_peer_net->lpn_peer;
984
985         /* non-lnet_parse callers only receive delayed messages */
986         LASSERT(!do_recv || msg->msg_rx_delayed);
987
988         if (!msg->msg_peerrtrcredit) {
989                 /* lpni_lock protects the credit manipulation */
990                 spin_lock(&lpni->lpni_lock);
991
992                 msg->msg_peerrtrcredit = 1;
993                 lpni->lpni_rtrcredits--;
994                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
995                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
996
997                 if (lpni->lpni_rtrcredits < 0) {
998                         spin_unlock(&lpni->lpni_lock);
999                         /* must have checked eager_recv before here */
1000                         LASSERT(msg->msg_rx_ready_delay);
1001                         msg->msg_rx_delayed = 1;
1002                         /* lp_lock protects the lp_rtrq */
1003                         spin_lock(&lp->lp_lock);
1004                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1005                         spin_unlock(&lp->lp_lock);
1006                         return LNET_CREDIT_WAIT;
1007                 }
1008                 spin_unlock(&lpni->lpni_lock);
1009         }
1010
1011         rbp = lnet_msg2bufpool(msg);
1012
1013         if (!msg->msg_rtrcredit) {
1014                 msg->msg_rtrcredit = 1;
1015                 rbp->rbp_credits--;
1016                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1017                         rbp->rbp_mincredits = rbp->rbp_credits;
1018
1019                 if (rbp->rbp_credits < 0) {
1020                         /* must have checked eager_recv before here */
1021                         LASSERT(msg->msg_rx_ready_delay);
1022                         msg->msg_rx_delayed = 1;
1023                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1024                         return LNET_CREDIT_WAIT;
1025                 }
1026         }
1027
1028         LASSERT(!list_empty(&rbp->rbp_bufs));
1029         rb = list_entry(rbp->rbp_bufs.next, struct lnet_rtrbuf, rb_list);
1030         list_del(&rb->rb_list);
1031
1032         msg->msg_niov = rbp->rbp_npages;
1033         msg->msg_kiov = &rb->rb_kiov[0];
1034
1035         /* unset the msg-rx_delayed flag since we're receiving the message */
1036         msg->msg_rx_delayed = 0;
1037
1038         if (do_recv) {
1039                 int cpt = msg->msg_rx_cpt;
1040
1041                 lnet_net_unlock(cpt);
1042                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1043                              0, msg->msg_len, msg->msg_len);
1044                 lnet_net_lock(cpt);
1045         }
1046         return LNET_CREDIT_OK;
1047 }
1048
1049 void
1050 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1051 {
1052         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1053         struct lnet_ni          *txni = msg->msg_txni;
1054         struct lnet_msg         *msg2;
1055
1056         if (msg->msg_txcredit) {
1057                 struct lnet_ni       *ni = msg->msg_txni;
1058                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1059
1060                 /* give back NI txcredits */
1061                 msg->msg_txcredit = 0;
1062
1063                 LASSERT((tq->tq_credits < 0) ==
1064                         !list_empty(&tq->tq_delayed));
1065
1066                 tq->tq_credits++;
1067                 atomic_inc(&ni->ni_tx_credits);
1068                 if (tq->tq_credits <= 0) {
1069                         msg2 = list_entry(tq->tq_delayed.next,
1070                                           struct lnet_msg, msg_list);
1071                         list_del(&msg2->msg_list);
1072
1073                         LASSERT(msg2->msg_txni == ni);
1074                         LASSERT(msg2->msg_tx_delayed);
1075                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1076
1077                         (void) lnet_post_send_locked(msg2, 1);
1078                 }
1079         }
1080
1081         if (msg->msg_peertxcredit) {
1082                 /* give back peer txcredits */
1083                 msg->msg_peertxcredit = 0;
1084
1085                 spin_lock(&txpeer->lpni_lock);
1086                 LASSERT((txpeer->lpni_txcredits < 0) ==
1087                         !list_empty(&txpeer->lpni_txq));
1088
1089                 txpeer->lpni_txqnob -= msg->msg_len + sizeof(struct lnet_hdr);
1090                 LASSERT(txpeer->lpni_txqnob >= 0);
1091
1092                 txpeer->lpni_txcredits++;
1093                 if (txpeer->lpni_txcredits <= 0) {
1094                         int msg2_cpt;
1095
1096                         msg2 = list_entry(txpeer->lpni_txq.next,
1097                                               struct lnet_msg, msg_list);
1098                         list_del(&msg2->msg_list);
1099                         spin_unlock(&txpeer->lpni_lock);
1100
1101                         LASSERT(msg2->msg_txpeer == txpeer);
1102                         LASSERT(msg2->msg_tx_delayed);
1103
1104                         msg2_cpt = msg2->msg_tx_cpt;
1105
1106                         /*
1107                          * The msg_cpt can be different from the msg2_cpt
1108                          * so we need to make sure we lock the correct cpt
1109                          * for msg2.
1110                          * Once we call lnet_post_send_locked() it is no
1111                          * longer safe to access msg2, since it could've
1112                          * been freed by lnet_finalize(), but we still
1113                          * need to relock the correct cpt, so we cache the
1114                          * msg2_cpt for the purpose of the check that
1115                          * follows the call to lnet_pose_send_locked().
1116                          */
1117                         if (msg2_cpt != msg->msg_tx_cpt) {
1118                                 lnet_net_unlock(msg->msg_tx_cpt);
1119                                 lnet_net_lock(msg2_cpt);
1120                         }
1121                         (void) lnet_post_send_locked(msg2, 1);
1122                         if (msg2_cpt != msg->msg_tx_cpt) {
1123                                 lnet_net_unlock(msg2_cpt);
1124                                 lnet_net_lock(msg->msg_tx_cpt);
1125                         }
1126                 } else {
1127                         spin_unlock(&txpeer->lpni_lock);
1128                 }
1129         }
1130
1131         if (txni != NULL) {
1132                 msg->msg_txni = NULL;
1133                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1134         }
1135
1136         if (txpeer != NULL) {
1137                 msg->msg_txpeer = NULL;
1138                 lnet_peer_ni_decref_locked(txpeer);
1139         }
1140 }
1141
1142 void
1143 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1144 {
1145         struct lnet_msg *msg;
1146
1147         if (list_empty(&rbp->rbp_msgs))
1148                 return;
1149         msg = list_entry(rbp->rbp_msgs.next,
1150                          struct lnet_msg, msg_list);
1151         list_del(&msg->msg_list);
1152
1153         (void)lnet_post_routed_recv_locked(msg, 1);
1154 }
1155
1156 void
1157 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1158 {
1159         struct lnet_msg *msg;
1160         struct lnet_msg *tmp;
1161
1162         lnet_net_unlock(cpt);
1163
1164         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1165                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1166                              0, 0, 0, msg->msg_hdr.payload_length);
1167                 list_del_init(&msg->msg_list);
1168                 msg->msg_no_resend = true;
1169                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1170                 lnet_finalize(msg, -ECANCELED);
1171         }
1172
1173         lnet_net_lock(cpt);
1174 }
1175
1176 void
1177 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1178 {
1179         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1180         struct lnet_peer *lp;
1181         struct lnet_ni *rxni = msg->msg_rxni;
1182         struct lnet_msg *msg2;
1183
1184         if (msg->msg_rtrcredit) {
1185                 /* give back global router credits */
1186                 struct lnet_rtrbuf *rb;
1187                 struct lnet_rtrbufpool *rbp;
1188
1189                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1190                  * there until it gets one allocated, or aborts the wait
1191                  * itself */
1192                 LASSERT(msg->msg_kiov != NULL);
1193
1194                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1195                 rbp = rb->rb_pool;
1196
1197                 msg->msg_kiov = NULL;
1198                 msg->msg_rtrcredit = 0;
1199
1200                 LASSERT(rbp == lnet_msg2bufpool(msg));
1201
1202                 LASSERT((rbp->rbp_credits > 0) ==
1203                         !list_empty(&rbp->rbp_bufs));
1204
1205                 /* If routing is now turned off, we just drop this buffer and
1206                  * don't bother trying to return credits.  */
1207                 if (!the_lnet.ln_routing) {
1208                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1209                         goto routing_off;
1210                 }
1211
1212                 /* It is possible that a user has lowered the desired number of
1213                  * buffers in this pool.  Make sure we never put back
1214                  * more buffers than the stated number. */
1215                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1216                         /* Discard this buffer so we don't have too
1217                          * many. */
1218                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1219                         rbp->rbp_nbuffers--;
1220                 } else {
1221                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1222                         rbp->rbp_credits++;
1223                         if (rbp->rbp_credits <= 0)
1224                                 lnet_schedule_blocked_locked(rbp);
1225                 }
1226         }
1227
1228 routing_off:
1229         if (msg->msg_peerrtrcredit) {
1230                 LASSERT(rxpeerni);
1231                 LASSERT(rxpeerni->lpni_peer_net);
1232                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1233
1234                 /* give back peer router credits */
1235                 msg->msg_peerrtrcredit = 0;
1236
1237                 spin_lock(&rxpeerni->lpni_lock);
1238                 rxpeerni->lpni_rtrcredits++;
1239                 spin_unlock(&rxpeerni->lpni_lock);
1240
1241                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1242                 spin_lock(&lp->lp_lock);
1243
1244                 /* drop all messages which are queued to be routed on that
1245                  * peer. */
1246                 if (!the_lnet.ln_routing) {
1247                         LIST_HEAD(drop);
1248                         list_splice_init(&lp->lp_rtrq, &drop);
1249                         spin_unlock(&lp->lp_lock);
1250                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1251                 } else if (!list_empty(&lp->lp_rtrq)) {
1252                         int msg2_cpt;
1253
1254                         msg2 = list_entry(lp->lp_rtrq.next,
1255                                           struct lnet_msg, msg_list);
1256                         list_del(&msg2->msg_list);
1257                         msg2_cpt = msg2->msg_rx_cpt;
1258                         spin_unlock(&lp->lp_lock);
1259                         /*
1260                          * messages on the lp_rtrq can be from any NID in
1261                          * the peer, which means they might have different
1262                          * cpts. We need to make sure we lock the right
1263                          * one.
1264                          */
1265                         if (msg2_cpt != msg->msg_rx_cpt) {
1266                                 lnet_net_unlock(msg->msg_rx_cpt);
1267                                 lnet_net_lock(msg2_cpt);
1268                         }
1269                         (void) lnet_post_routed_recv_locked(msg2, 1);
1270                         if (msg2_cpt != msg->msg_rx_cpt) {
1271                                 lnet_net_unlock(msg2_cpt);
1272                                 lnet_net_lock(msg->msg_rx_cpt);
1273                         }
1274                 } else {
1275                         spin_unlock(&lp->lp_lock);
1276                 }
1277         }
1278         if (rxni != NULL) {
1279                 msg->msg_rxni = NULL;
1280                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1281         }
1282         if (rxpeerni != NULL) {
1283                 msg->msg_rxpeer = NULL;
1284                 lnet_peer_ni_decref_locked(rxpeerni);
1285         }
1286 }
1287
1288 static struct lnet_peer_ni *
1289 lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid,
1290                     struct lnet_peer *peer,
1291                     struct lnet_peer_ni *best_lpni,
1292                     struct lnet_peer_net *peer_net)
1293 {
1294         /*
1295          * Look at the peer NIs for the destination peer that connect
1296          * to the chosen net. If a peer_ni is preferred when using the
1297          * best_ni to communicate, we use that one. If there is no
1298          * preferred peer_ni, or there are multiple preferred peer_ni,
1299          * the available transmit credits are used. If the transmit
1300          * credits are equal, we round-robin over the peer_ni.
1301          */
1302         struct lnet_peer_ni *lpni = NULL;
1303         int best_lpni_credits = (best_lpni) ? best_lpni->lpni_txcredits :
1304                 INT_MIN;
1305         int best_lpni_healthv = (best_lpni) ?
1306                 atomic_read(&best_lpni->lpni_healthv) : 0;
1307         bool best_lpni_is_preferred = false;
1308         bool lpni_is_preferred;
1309         int lpni_healthv;
1310         __u32 lpni_sel_prio;
1311         __u32 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1312
1313         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1314                 /*
1315                  * if the best_ni we've chosen aleady has this lpni
1316                  * preferred, then let's use it
1317                  */
1318                 if (best_ni) {
1319                         lpni_is_preferred = lnet_peer_is_pref_nid_locked(lpni,
1320                                                                 best_ni->ni_nid);
1321                         CDEBUG(D_NET, "%s lpni_is_preferred = %d\n",
1322                                libcfs_nid2str(best_ni->ni_nid),
1323                                lpni_is_preferred);
1324                 } else {
1325                         lpni_is_preferred = false;
1326                 }
1327
1328                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1329                 lpni_sel_prio = lpni->lpni_sel_priority;
1330
1331                 if (best_lpni)
1332                         CDEBUG(D_NET, "n:[%s, %s] h:[%d, %d] p:[%d, %d] c:[%d, %d] s:[%d, %d]\n",
1333                                 libcfs_nid2str(lpni->lpni_nid),
1334                                 libcfs_nid2str(best_lpni->lpni_nid),
1335                                 lpni_healthv, best_lpni_healthv,
1336                                 lpni_sel_prio, best_sel_prio,
1337                                 lpni->lpni_txcredits, best_lpni_credits,
1338                                 lpni->lpni_seq, best_lpni->lpni_seq);
1339                 else
1340                         goto select_lpni;
1341
1342                 /* pick the healthiest peer ni */
1343                 if (lpni_healthv < best_lpni_healthv)
1344                         continue;
1345                 else if (lpni_healthv > best_lpni_healthv) {
1346                         if (best_lpni_is_preferred)
1347                                 best_lpni_is_preferred = false;
1348                         goto select_lpni;
1349                 }
1350
1351                 if (lpni_sel_prio > best_sel_prio)
1352                         continue;
1353                 else if (lpni_sel_prio < best_sel_prio) {
1354                         if (best_lpni_is_preferred)
1355                                 best_lpni_is_preferred = false;
1356                         goto select_lpni;
1357                 }
1358
1359                 /* if this is a preferred peer use it */
1360                 if (!best_lpni_is_preferred && lpni_is_preferred) {
1361                         best_lpni_is_preferred = true;
1362                         goto select_lpni;
1363                 } else if (best_lpni_is_preferred && !lpni_is_preferred) {
1364                         /* this is not the preferred peer so let's ignore
1365                          * it.
1366                          */
1367                         continue;
1368                 }
1369
1370                 if (lpni->lpni_txcredits < best_lpni_credits)
1371                         /* We already have a peer that has more credits
1372                          * available than this one. No need to consider
1373                          * this peer further.
1374                          */
1375                         continue;
1376                 else if (lpni->lpni_txcredits > best_lpni_credits)
1377                         goto select_lpni;
1378
1379                 /* The best peer found so far and the current peer
1380                  * have the same number of available credits let's
1381                  * make sure to select between them using Round Robin
1382                  */
1383                 if (best_lpni && (best_lpni->lpni_seq <= lpni->lpni_seq))
1384                         continue;
1385 select_lpni:
1386                 best_lpni_is_preferred = lpni_is_preferred;
1387                 best_lpni_healthv = lpni_healthv;
1388                 best_sel_prio = lpni_sel_prio;
1389                 best_lpni = lpni;
1390                 best_lpni_credits = lpni->lpni_txcredits;
1391         }
1392
1393         /* if we still can't find a peer ni then we can't reach it */
1394         if (!best_lpni) {
1395                 __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1396                         LNET_NIDNET(dst_nid);
1397                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1398                                 libcfs_net2str(net_id));
1399                 return NULL;
1400         }
1401
1402         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1403                libcfs_nid2str(best_lpni->lpni_nid));
1404
1405         return best_lpni;
1406 }
1407
1408 /*
1409  * Prerequisite: the best_ni should already be set in the sd
1410  * Find the best lpni.
1411  * If the net id is provided then restrict lpni selection on
1412  * that particular net.
1413  * Otherwise find any reachable lpni. When dealing with an MR
1414  * gateway and it has multiple lpnis which we can use
1415  * we want to select the best one from the list of reachable
1416  * ones.
1417  */
1418 static inline struct lnet_peer_ni *
1419 lnet_find_best_lpni(struct lnet_ni *lni, lnet_nid_t dst_nid,
1420                     struct lnet_peer *peer, __u32 net_id)
1421 {
1422         struct lnet_peer_net *peer_net;
1423
1424         /* find the best_lpni on any local network */
1425         if (net_id == LNET_NET_ANY) {
1426                 struct lnet_peer_ni *best_lpni = NULL;
1427                 struct lnet_peer_net *lpn;
1428                 list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
1429                         /* no net specified find any reachable peer ni */
1430                         if (!lnet_islocalnet_locked(lpn->lpn_net_id))
1431                                 continue;
1432                         best_lpni = lnet_select_peer_ni(lni, dst_nid, peer,
1433                                                         best_lpni, lpn);
1434                 }
1435
1436                 return best_lpni;
1437         }
1438         /* restrict on the specified net */
1439         peer_net = lnet_peer_get_net_locked(peer, net_id);
1440         if (peer_net)
1441                 return lnet_select_peer_ni(lni, dst_nid, peer, NULL, peer_net);
1442
1443         return NULL;
1444 }
1445
1446 static int
1447 lnet_compare_gw_lpnis(struct lnet_peer_ni *lpni1, struct lnet_peer_ni *lpni2)
1448 {
1449         if (lpni1->lpni_txqnob < lpni2->lpni_txqnob)
1450                 return 1;
1451
1452         if (lpni1->lpni_txqnob > lpni2->lpni_txqnob)
1453                 return -1;
1454
1455         if (lpni1->lpni_txcredits > lpni2->lpni_txcredits)
1456                 return 1;
1457
1458         if (lpni1->lpni_txcredits < lpni2->lpni_txcredits)
1459                 return -1;
1460
1461         return 0;
1462 }
1463
1464 /* Compare route priorities and hop counts */
1465 static int
1466 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
1467 {
1468         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1469         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1470
1471         if (r1->lr_priority < r2->lr_priority)
1472                 return 1;
1473
1474         if (r1->lr_priority > r2->lr_priority)
1475                 return -1;
1476
1477         if (r1_hops < r2_hops)
1478                 return 1;
1479
1480         if (r1_hops > r2_hops)
1481                 return -1;
1482
1483         return 0;
1484 }
1485
1486 static struct lnet_route *
1487 lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net,
1488                        struct lnet_peer_ni *remote_lpni,
1489                        struct lnet_route **prev_route,
1490                        struct lnet_peer_ni **gwni)
1491 {
1492         struct lnet_peer_ni *lpni, *best_gw_ni = NULL;
1493         struct lnet_route *best_route;
1494         struct lnet_route *last_route;
1495         struct lnet_route *route;
1496         int rc;
1497         bool best_rte_is_preferred = false;
1498         lnet_nid_t gw_pnid;
1499
1500         CDEBUG(D_NET, "Looking up a route to %s, from %s\n",
1501                libcfs_net2str(rnet->lrn_net), libcfs_net2str(src_net));
1502
1503         best_route = last_route = NULL;
1504         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1505                 if (!lnet_is_route_alive(route))
1506                         continue;
1507                 gw_pnid = route->lr_gateway->lp_primary_nid;
1508
1509                 /* no protection on below fields, but it's harmless */
1510                 if (last_route && (last_route->lr_seq - route->lr_seq < 0))
1511                         last_route = route;
1512
1513                 /* if the best route found is in the preferred list then
1514                  * tag it as preferred and use it later on. But if we
1515                  * didn't find any routes which are on the preferred list
1516                  * then just use the best route possible.
1517                  */
1518                 rc = lnet_peer_is_pref_rtr_locked(remote_lpni, gw_pnid);
1519
1520                 if (!best_route || (rc && !best_rte_is_preferred)) {
1521                         /* Restrict the selection of the router NI on the
1522                          * src_net provided. If the src_net is LNET_NID_ANY,
1523                          * then select the best interface available.
1524                          */
1525                         lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1526                                                    route->lr_gateway,
1527                                                    src_net);
1528                         if (!lpni) {
1529                                 CDEBUG(D_NET,
1530                                        "Gateway %s does not have a peer NI on net %s\n",
1531                                        libcfs_nid2str(gw_pnid),
1532                                        libcfs_net2str(src_net));
1533                                 continue;
1534                         }
1535                 }
1536
1537                 if (rc && !best_rte_is_preferred) {
1538                         /* This is the first preferred route we found,
1539                          * so it beats any route found previously
1540                          */
1541                         best_route = route;
1542                         if (!last_route)
1543                                 last_route = route;
1544                         best_gw_ni = lpni;
1545                         best_rte_is_preferred = true;
1546                         CDEBUG(D_NET, "preferred gw = %s\n",
1547                                libcfs_nid2str(gw_pnid));
1548                         continue;
1549                 } else if ((!rc) && best_rte_is_preferred)
1550                         /* The best route we found so far is in the preferred
1551                          * list, so it beats any non-preferred route
1552                          */
1553                         continue;
1554
1555                 if (!best_route) {
1556                         best_route = last_route = route;
1557                         best_gw_ni = lpni;
1558                         continue;
1559                 }
1560
1561                 rc = lnet_compare_routes(route, best_route);
1562                 if (rc == -1)
1563                         continue;
1564
1565                 /* Restrict the selection of the router NI on the
1566                  * src_net provided. If the src_net is LNET_NID_ANY,
1567                  * then select the best interface available.
1568                  */
1569                 lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1570                                            route->lr_gateway,
1571                                            src_net);
1572                 if (!lpni) {
1573                         CDEBUG(D_NET,
1574                                "Gateway %s does not have a peer NI on net %s\n",
1575                                libcfs_nid2str(gw_pnid),
1576                                libcfs_net2str(src_net));
1577                         continue;
1578                 }
1579
1580                 if (rc == 1) {
1581                         best_route = route;
1582                         best_gw_ni = lpni;
1583                         continue;
1584                 }
1585
1586                 rc = lnet_compare_gw_lpnis(lpni, best_gw_ni);
1587                 if (rc == -1)
1588                         continue;
1589
1590                 if (rc == 1 || route->lr_seq <= best_route->lr_seq) {
1591                         best_route = route;
1592                         best_gw_ni = lpni;
1593                         continue;
1594                 }
1595         }
1596
1597         *prev_route = last_route;
1598         *gwni = best_gw_ni;
1599
1600         return best_route;
1601 }
1602
1603 static struct lnet_ni *
1604 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1605                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1606                  int md_cpt)
1607 {
1608         struct lnet_ni *ni = NULL;
1609         unsigned int shortest_distance;
1610         int best_credits;
1611         int best_healthv;
1612         __u32 best_sel_prio;
1613
1614         /*
1615          * If there is no peer_ni that we can send to on this network,
1616          * then there is no point in looking for a new best_ni here.
1617         */
1618         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1619                 return best_ni;
1620
1621         if (best_ni == NULL) {
1622                 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1623                 shortest_distance = UINT_MAX;
1624                 best_credits = INT_MIN;
1625                 best_healthv = 0;
1626         } else {
1627                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1628                                                      best_ni->ni_dev_cpt);
1629                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1630                 best_healthv = atomic_read(&best_ni->ni_healthv);
1631                 best_sel_prio = best_ni->ni_sel_priority;
1632         }
1633
1634         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1635                 unsigned int distance;
1636                 int ni_credits;
1637                 int ni_healthv;
1638                 int ni_fatal;
1639                 __u32 ni_sel_prio;
1640
1641                 ni_credits = atomic_read(&ni->ni_tx_credits);
1642                 ni_healthv = atomic_read(&ni->ni_healthv);
1643                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1644                 ni_sel_prio = ni->ni_sel_priority;
1645
1646                 /*
1647                  * calculate the distance from the CPT on which
1648                  * the message memory is allocated to the CPT of
1649                  * the NI's physical device
1650                  */
1651                 distance = cfs_cpt_distance(lnet_cpt_table(),
1652                                             md_cpt,
1653                                             ni->ni_dev_cpt);
1654
1655                 /*
1656                  * All distances smaller than the NUMA range
1657                  * are treated equally.
1658                  */
1659                 if (distance < lnet_numa_range)
1660                         distance = lnet_numa_range;
1661
1662                 /*
1663                  * Select on health, shorter distance, available
1664                  * credits, then round-robin.
1665                  */
1666                 if (ni_fatal)
1667                         continue;
1668
1669                 if (best_ni)
1670                         CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d, p:%u] with best_ni %s [c:%d, d:%d, s:%d, p:%u]\n",
1671                                libcfs_nid2str(ni->ni_nid), ni_credits, distance,
1672                                ni->ni_seq, ni_sel_prio,
1673                                (best_ni) ? libcfs_nid2str(best_ni->ni_nid)
1674                                : "not selected", best_credits, shortest_distance,
1675                                (best_ni) ? best_ni->ni_seq : 0,
1676                                best_sel_prio);
1677                 else
1678                         goto select_ni;
1679
1680                 if (ni_healthv < best_healthv)
1681                         continue;
1682                 else if (ni_healthv > best_healthv)
1683                         goto select_ni;
1684
1685                 if (ni_sel_prio > best_sel_prio)
1686                         continue;
1687                 else if (ni_sel_prio < best_sel_prio)
1688                         goto select_ni;
1689
1690                 if (distance > shortest_distance)
1691                         continue;
1692                 else if (distance < shortest_distance)
1693                         goto select_ni;
1694
1695                 if (ni_credits < best_credits)
1696                         continue;
1697                 else if (ni_credits > best_credits)
1698                         goto select_ni;
1699
1700                 if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1701                         continue;
1702
1703 select_ni:
1704                 best_sel_prio = ni_sel_prio;
1705                 shortest_distance = distance;
1706                 best_healthv = ni_healthv;
1707                 best_ni = ni;
1708                 best_credits = ni_credits;
1709         }
1710
1711         CDEBUG(D_NET, "selected best_ni %s\n",
1712                (best_ni) ? libcfs_nid2str(best_ni->ni_nid) : "no selection");
1713
1714         return best_ni;
1715 }
1716
1717 /*
1718  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1719  * because such traffic is required to perform discovery. We therefore
1720  * exclude all GET and PUT on that portal. We also exclude all ACK and
1721  * REPLY traffic, but that is because the portal is not tracked in the
1722  * message structure for these message types. We could restrict this
1723  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1724  */
1725 static bool
1726 lnet_msg_discovery(struct lnet_msg *msg)
1727 {
1728         if (msg->msg_type == LNET_MSG_PUT) {
1729                 if (msg->msg_hdr.msg.put.ptl_index != LNET_RESERVED_PORTAL)
1730                         return true;
1731         } else if (msg->msg_type == LNET_MSG_GET) {
1732                 if (msg->msg_hdr.msg.get.ptl_index != LNET_RESERVED_PORTAL)
1733                         return true;
1734         }
1735         return false;
1736 }
1737
1738 #define SRC_SPEC        0x0001
1739 #define SRC_ANY         0x0002
1740 #define LOCAL_DST       0x0004
1741 #define REMOTE_DST      0x0008
1742 #define MR_DST          0x0010
1743 #define NMR_DST         0x0020
1744 #define SND_RESP        0x0040
1745
1746 /* The following to defines are used for return codes */
1747 #define REPEAT_SEND     0x1000
1748 #define PASS_THROUGH    0x2000
1749
1750 /* The different cases lnet_select pathway needs to handle */
1751 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1752 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1753 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1754 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1755 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1756 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1757 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1758 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1759
1760 static int
1761 lnet_handle_lo_send(struct lnet_send_data *sd)
1762 {
1763         struct lnet_msg *msg = sd->sd_msg;
1764         int cpt = sd->sd_cpt;
1765
1766         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1767                 return -ESHUTDOWN;
1768
1769         /* No send credit hassles with LOLND */
1770         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1771         msg->msg_hdr.dest_nid = cpu_to_le64(the_lnet.ln_loni->ni_nid);
1772         if (!msg->msg_routing)
1773                 msg->msg_hdr.src_nid =
1774                         cpu_to_le64(the_lnet.ln_loni->ni_nid);
1775         msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
1776         lnet_msg_commit(msg, cpt);
1777         msg->msg_txni = the_lnet.ln_loni;
1778
1779         return LNET_CREDIT_OK;
1780 }
1781
1782 static int
1783 lnet_handle_send(struct lnet_send_data *sd)
1784 {
1785         struct lnet_ni *best_ni = sd->sd_best_ni;
1786         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1787         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1788         struct lnet_msg *msg = sd->sd_msg;
1789         int cpt2;
1790         __u32 send_case = sd->sd_send_case;
1791         int rc;
1792         __u32 routing = send_case & REMOTE_DST;
1793          struct lnet_rsp_tracker *rspt;
1794
1795         /* Increment sequence number of the selected peer, peer net,
1796          * local ni and local net so that we pick the next ones
1797          * in Round Robin.
1798          */
1799         best_lpni->lpni_seq++;
1800         best_lpni->lpni_peer_net->lpn_seq++;
1801         best_ni->ni_seq++;
1802         best_ni->ni_net->net_seq++;
1803
1804         CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n",
1805                libcfs_nid2str(best_ni->ni_nid),
1806                best_ni->ni_seq, best_ni->ni_net->net_seq,
1807                atomic_read(&best_ni->ni_tx_credits),
1808                best_ni->ni_sel_priority,
1809                libcfs_nid2str(best_lpni->lpni_nid),
1810                best_lpni->lpni_seq, best_lpni->lpni_peer_net->lpn_seq,
1811                best_lpni->lpni_txcredits,
1812                best_lpni->lpni_sel_priority);
1813
1814         /*
1815          * grab a reference on the peer_ni so it sticks around even if
1816          * we need to drop and relock the lnet_net_lock below.
1817          */
1818         lnet_peer_ni_addref_locked(best_lpni);
1819
1820         /*
1821          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1822          * message. This ensures that we get a CPT that is correct for
1823          * the NI when the NI has been restricted to a subset of all CPTs.
1824          * If the selected CPT differs from the one currently locked, we
1825          * must unlock and relock the lnet_net_lock(), and then check whether
1826          * the configuration has changed. We don't have a hold on the best_ni
1827          * yet, and it may have vanished.
1828          */
1829         cpt2 = lnet_cpt_of_nid_locked(best_lpni->lpni_nid, best_ni);
1830         if (sd->sd_cpt != cpt2) {
1831                 __u32 seq = lnet_get_dlc_seq_locked();
1832                 lnet_net_unlock(sd->sd_cpt);
1833                 sd->sd_cpt = cpt2;
1834                 lnet_net_lock(sd->sd_cpt);
1835                 if (seq != lnet_get_dlc_seq_locked()) {
1836                         lnet_peer_ni_decref_locked(best_lpni);
1837                         return REPEAT_SEND;
1838                 }
1839         }
1840
1841         /*
1842          * store the best_lpni in the message right away to avoid having
1843          * to do the same operation under different conditions
1844          */
1845         msg->msg_txpeer = best_lpni;
1846         msg->msg_txni = best_ni;
1847
1848         /*
1849          * grab a reference for the best_ni since now it's in use in this
1850          * send. The reference will be dropped in lnet_finalize()
1851          */
1852         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1853
1854         /*
1855          * Always set the target.nid to the best peer picked. Either the
1856          * NID will be one of the peer NIDs selected, or the same NID as
1857          * what was originally set in the target or it will be the NID of
1858          * a router if this message should be routed
1859          */
1860         msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1861
1862         /*
1863          * lnet_msg_commit assigns the correct cpt to the message, which
1864          * is used to decrement the correct refcount on the ni when it's
1865          * time to return the credits
1866          */
1867         lnet_msg_commit(msg, sd->sd_cpt);
1868
1869         /*
1870          * If we are routing the message then we keep the src_nid that was
1871          * set by the originator. If we are not routing then we are the
1872          * originator and set it here.
1873          */
1874         if (!msg->msg_routing)
1875                 msg->msg_hdr.src_nid = cpu_to_le64(msg->msg_txni->ni_nid);
1876
1877         if (routing) {
1878                 msg->msg_target_is_router = 1;
1879                 msg->msg_target.pid = LNET_PID_LUSTRE;
1880                 /*
1881                  * since we're routing we want to ensure that the
1882                  * msg_hdr.dest_nid is set to the final destination. When
1883                  * the router receives this message it knows how to route
1884                  * it.
1885                  *
1886                  * final_dst_lpni is set at the beginning of the
1887                  * lnet_select_pathway() function and is never changed.
1888                  * It's safe to use it here.
1889                  */
1890                 msg->msg_hdr.dest_nid = cpu_to_le64(final_dst_lpni->lpni_nid);
1891         } else {
1892                 /*
1893                  * if we're not routing set the dest_nid to the best peer
1894                  * ni NID that we picked earlier in the algorithm.
1895                  */
1896                 msg->msg_hdr.dest_nid = cpu_to_le64(msg->msg_txpeer->lpni_nid);
1897         }
1898
1899         /*
1900          * if we have response tracker block update it with the next hop
1901          * nid
1902          */
1903         if (msg->msg_md) {
1904                 rspt = msg->msg_md->md_rspt_ptr;
1905                 if (rspt) {
1906                         rspt->rspt_next_hop_nid = msg->msg_txpeer->lpni_nid;
1907                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1908                                libcfs_nid2str(rspt->rspt_next_hop_nid));
1909                 }
1910         }
1911
1912         rc = lnet_post_send_locked(msg, 0);
1913
1914         if (!rc)
1915                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
1916                        libcfs_nid2str(msg->msg_hdr.src_nid),
1917                        libcfs_nid2str(msg->msg_txni->ni_nid),
1918                        libcfs_nid2str(sd->sd_src_nid),
1919                        libcfs_nid2str(msg->msg_hdr.dest_nid),
1920                        libcfs_nid2str(sd->sd_dst_nid),
1921                        libcfs_nid2str(msg->msg_txpeer->lpni_nid),
1922                        libcfs_nid2str(sd->sd_rtr_nid),
1923                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
1924
1925         return rc;
1926 }
1927
1928 static inline void
1929 lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni,
1930                          struct lnet_msg *msg)
1931 {
1932         if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) &&
1933             !lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) {
1934                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
1935                        libcfs_nid2str(lni->ni_nid),
1936                        libcfs_nid2str(lpni->lpni_nid));
1937                 lnet_peer_ni_set_non_mr_pref_nid(lpni, lni->ni_nid);
1938         }
1939 }
1940
1941 /*
1942  * Source Specified
1943  * Local Destination
1944  * non-mr peer
1945  *
1946  * use the source and destination NIDs as the pathway
1947  */
1948 static int
1949 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
1950 {
1951         /* the destination lpni is set before we get here. */
1952
1953         /* find local NI */
1954         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
1955         if (!sd->sd_best_ni) {
1956                 CERROR("Can't send to %s: src %s is not a "
1957                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
1958                                 libcfs_nid2str(sd->sd_src_nid));
1959                 return -EINVAL;
1960         }
1961
1962         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
1963
1964         return lnet_handle_send(sd);
1965 }
1966
1967 /*
1968  * Source Specified
1969  * Local Destination
1970  * MR Peer
1971  *
1972  * Don't run the selection algorithm on the peer NIs. By specifying the
1973  * local NID, we're also saying that we should always use the destination NID
1974  * provided. This handles the case where we should be using the same
1975  * destination NID for the all the messages which belong to the same RPC
1976  * request.
1977  */
1978 static int
1979 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
1980 {
1981         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
1982         if (!sd->sd_best_ni) {
1983                 CERROR("Can't send to %s: src %s is not a "
1984                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
1985                                 libcfs_nid2str(sd->sd_src_nid));
1986                 return -EINVAL;
1987         }
1988
1989         if (sd->sd_best_lpni &&
1990             sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid)
1991                 return lnet_handle_lo_send(sd);
1992         else if (sd->sd_best_lpni)
1993                 return lnet_handle_send(sd);
1994
1995         CERROR("can't send to %s. no NI on %s\n",
1996                libcfs_nid2str(sd->sd_dst_nid),
1997                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
1998
1999         return -EHOSTUNREACH;
2000 }
2001
2002 struct lnet_ni *
2003 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
2004                               struct lnet_peer *peer,
2005                               struct lnet_peer_net *peer_net,
2006                               int cpt)
2007 {
2008         struct lnet_net *local_net;
2009         struct lnet_ni *best_ni;
2010
2011         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
2012         if (!local_net)
2013                 return NULL;
2014
2015         /*
2016          * Iterate through the NIs in this local Net and select
2017          * the NI to send from. The selection is determined by
2018          * these 3 criterion in the following priority:
2019          *      1. NUMA
2020          *      2. NI available credits
2021          *      3. Round Robin
2022          */
2023         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
2024                                    peer, peer_net, cpt);
2025
2026         return best_ni;
2027 }
2028
2029 static int
2030 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg,
2031                              int cpt)
2032 {
2033         struct lnet_peer *peer;
2034         int rc;
2035
2036         lnet_peer_ni_addref_locked(lpni);
2037
2038         peer = lpni->lpni_peer_net->lpn_peer;
2039
2040         if (lnet_peer_gw_discovery(peer)) {
2041                 lnet_peer_ni_decref_locked(lpni);
2042                 return 0;
2043         }
2044
2045         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
2046                 lnet_peer_ni_decref_locked(lpni);
2047                 return 0;
2048         }
2049
2050         rc = lnet_discover_peer_locked(lpni, cpt, false);
2051         if (rc) {
2052                 lnet_peer_ni_decref_locked(lpni);
2053                 return rc;
2054         }
2055         /* The peer may have changed. */
2056         peer = lpni->lpni_peer_net->lpn_peer;
2057         spin_lock(&peer->lp_lock);
2058         if (lnet_peer_is_uptodate_locked(peer)) {
2059                 spin_unlock(&peer->lp_lock);
2060                 lnet_peer_ni_decref_locked(lpni);
2061                 return 0;
2062         }
2063         /* queue message and return */
2064         msg->msg_sending = 0;
2065         msg->msg_txpeer = NULL;
2066         list_add_tail(&msg->msg_list, &peer->lp_dc_pendq);
2067         spin_unlock(&peer->lp_lock);
2068
2069         lnet_peer_ni_decref_locked(lpni);
2070
2071         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2072                msg, libcfs_nid2str(peer->lp_primary_nid));
2073
2074         return LNET_DC_WAIT;
2075 }
2076
2077 static int
2078 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2079                              lnet_nid_t dst_nid,
2080                              struct lnet_peer_ni **gw_lpni,
2081                              struct lnet_peer **gw_peer)
2082 {
2083         int rc;
2084         __u32 local_lnet;
2085         struct lnet_peer *gw;
2086         struct lnet_peer *lp;
2087         struct lnet_peer_net *lpn;
2088         struct lnet_peer_net *best_lpn = NULL;
2089         struct lnet_remotenet *rnet, *best_rnet = NULL;
2090         struct lnet_route *best_route = NULL;
2091         struct lnet_route *last_route = NULL;
2092         struct lnet_peer_ni *lpni = NULL;
2093         struct lnet_peer_ni *gwni = NULL;
2094         bool route_found = false;
2095         lnet_nid_t src_nid = (sd->sd_src_nid != LNET_NID_ANY) ? sd->sd_src_nid :
2096                 (sd->sd_best_ni != NULL) ? sd->sd_best_ni->ni_nid :
2097                 LNET_NID_ANY;
2098         int best_lpn_healthv = 0;
2099         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2100
2101         CDEBUG(D_NET, "using src nid %s for route restriction\n",
2102                libcfs_nid2str(src_nid));
2103
2104         /* If a router nid was specified then we are replying to a GET or
2105          * sending an ACK. In this case we use the gateway associated with the
2106          * specified router nid.
2107          */
2108         if (sd->sd_rtr_nid != LNET_NID_ANY) {
2109                 gwni = lnet_find_peer_ni_locked(sd->sd_rtr_nid);
2110                 if (gwni) {
2111                         gw = gwni->lpni_peer_net->lpn_peer;
2112                         lnet_peer_ni_decref_locked(gwni);
2113                         if (gw->lp_rtr_refcount) {
2114                                 local_lnet = LNET_NIDNET(sd->sd_rtr_nid);
2115                                 route_found = true;
2116                         }
2117                 } else {
2118                         CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n",
2119                                libcfs_nid2str(sd->sd_rtr_nid));
2120                 }
2121         }
2122
2123         if (!route_found) {
2124                 if (sd->sd_msg->msg_routing) {
2125                         /* If I'm routing this message then I need to find the
2126                          * next hop based on the destination NID
2127                          */
2128                         best_rnet = lnet_find_rnet_locked(LNET_NIDNET(sd->sd_dst_nid));
2129                         if (!best_rnet) {
2130                                 CERROR("Unable to route message to %s - Route table may be misconfigured\n",
2131                                        libcfs_nid2str(sd->sd_dst_nid));
2132                                 return -EHOSTUNREACH;
2133                         }
2134                 } else {
2135                         /* we've already looked up the initial lpni using
2136                          * dst_nid
2137                          */
2138                         lpni = sd->sd_best_lpni;
2139                         /* the peer tree must be in existence */
2140                         LASSERT(lpni && lpni->lpni_peer_net &&
2141                                 lpni->lpni_peer_net->lpn_peer);
2142                         lp = lpni->lpni_peer_net->lpn_peer;
2143
2144                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2145                                 /* is this remote network reachable?  */
2146                                 rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2147                                 if (!rnet)
2148                                         continue;
2149
2150                                 if (!best_lpn) {
2151                                         best_lpn = lpn;
2152                                         best_rnet = rnet;
2153                                 }
2154
2155                                 /* select the preferred peer net */
2156                                 if (best_lpn_healthv > lpn->lpn_healthv)
2157                                         continue;
2158                                 else if (best_lpn_healthv < lpn->lpn_healthv)
2159                                         goto use_lpn;
2160
2161                                 if (best_lpn_sel_prio < lpn->lpn_sel_priority)
2162                                         continue;
2163                                 else if (best_lpn_sel_prio > lpn->lpn_sel_priority)
2164                                         goto use_lpn;
2165
2166                                 if (best_lpn->lpn_seq <= lpn->lpn_seq)
2167                                         continue;
2168 use_lpn:
2169                                 best_lpn_healthv = lpn->lpn_healthv;
2170                                 best_lpn_sel_prio = lpn->lpn_sel_priority;
2171                                 best_lpn = lpn;
2172                                 best_rnet = rnet;
2173                         }
2174
2175                         if (!best_lpn) {
2176                                 CERROR("peer %s has no available nets\n",
2177                                        libcfs_nid2str(sd->sd_dst_nid));
2178                                 return -EHOSTUNREACH;
2179                         }
2180
2181                         sd->sd_best_lpni = lnet_find_best_lpni(sd->sd_best_ni,
2182                                                                sd->sd_dst_nid,
2183                                                                lp,
2184                                                                best_lpn->lpn_net_id);
2185                         if (!sd->sd_best_lpni) {
2186                                 CERROR("peer %s is unreachable\n",
2187                                        libcfs_nid2str(sd->sd_dst_nid));
2188                                 return -EHOSTUNREACH;
2189                         }
2190
2191                         /* We're attempting to round robin over the remote peer
2192                          * NI's so update the final destination we selected
2193                          */
2194                         sd->sd_final_dst_lpni = sd->sd_best_lpni;
2195
2196                         /* Increment the sequence number of the remote lpni so
2197                          * we can round robin over the different interfaces of
2198                          * the remote lpni
2199                          */
2200                         sd->sd_best_lpni->lpni_seq++;
2201                 }
2202
2203                 /*
2204                  * find the best route. Restrict the selection on the net of the
2205                  * local NI if we've already picked the local NI to send from.
2206                  * Otherwise, let's pick any route we can find and then find
2207                  * a local NI we can reach the route's gateway on. Any route we
2208                  * select will be reachable by virtue of the restriction we have
2209                  * when adding a route.
2210                  */
2211                 best_route = lnet_find_route_locked(best_rnet,
2212                                                     LNET_NIDNET(src_nid),
2213                                                     sd->sd_best_lpni,
2214                                                     &last_route, &gwni);
2215
2216                 if (!best_route) {
2217                         CERROR("no route to %s from %s\n",
2218                                libcfs_nid2str(dst_nid),
2219                                libcfs_nid2str(src_nid));
2220                         return -EHOSTUNREACH;
2221                 }
2222
2223                 if (!gwni) {
2224                         CERROR("Internal Error. Route expected to %s from %s\n",
2225                                libcfs_nid2str(dst_nid),
2226                                libcfs_nid2str(src_nid));
2227                         return -EFAULT;
2228                 }
2229
2230                 gw = best_route->lr_gateway;
2231                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2232                 local_lnet = best_route->lr_lnet;
2233         }
2234
2235         /*
2236          * Discover this gateway if it hasn't already been discovered.
2237          * This means we might delay the message until discovery has
2238          * completed
2239          */
2240         sd->sd_msg->msg_src_nid_param = sd->sd_src_nid;
2241         rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt);
2242         if (rc)
2243                 return rc;
2244
2245         if (!sd->sd_best_ni)
2246                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw,
2247                                         lnet_peer_get_net_locked(gw,
2248                                                                  local_lnet),
2249                                         sd->sd_md_cpt);
2250
2251         if (!sd->sd_best_ni) {
2252                 CERROR("Internal Error. Expected local ni on %s but non found :%s\n",
2253                        libcfs_net2str(local_lnet),
2254                        libcfs_nid2str(sd->sd_src_nid));
2255                 return -EFAULT;
2256         }
2257
2258         *gw_lpni = gwni;
2259         *gw_peer = gw;
2260
2261         /*
2262          * increment the sequence numbers since now we're sure we're
2263          * going to use this path
2264          */
2265         if (sd->sd_rtr_nid == LNET_NID_ANY) {
2266                 LASSERT(best_route && last_route);
2267                 best_route->lr_seq = last_route->lr_seq + 1;
2268                 if (best_lpn)
2269                         best_lpn->lpn_seq++;
2270         }
2271
2272         return 0;
2273 }
2274
2275 /*
2276  * Handle two cases:
2277  *
2278  * Case 1:
2279  *  Source specified
2280  *  Remote destination
2281  *  Non-MR destination
2282  *
2283  * Case 2:
2284  *  Source specified
2285  *  Remote destination
2286  *  MR destination
2287  *
2288  * The handling of these two cases is similar. Even though the destination
2289  * can be MR or non-MR, we'll deal directly with the router.
2290  */
2291 static int
2292 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2293 {
2294         int rc;
2295         struct lnet_peer_ni *gw_lpni = NULL;
2296         struct lnet_peer *gw_peer = NULL;
2297
2298         /* find local NI */
2299         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
2300         if (!sd->sd_best_ni) {
2301                 CERROR("Can't send to %s: src %s is not a "
2302                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
2303                                 libcfs_nid2str(sd->sd_src_nid));
2304                 return -EINVAL;
2305         }
2306
2307         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2308                                      &gw_peer);
2309         if (rc)
2310                 return rc;
2311
2312         if (sd->sd_send_case & NMR_DST)
2313                 /*
2314                  * since the final destination is non-MR let's set its preferred
2315                  * NID before we send
2316                  */
2317                 lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni,
2318                                          sd->sd_msg);
2319
2320         /*
2321          * We're going to send to the gw found so let's set its
2322          * info
2323          */
2324         sd->sd_peer = gw_peer;
2325         sd->sd_best_lpni = gw_lpni;
2326
2327         return lnet_handle_send(sd);
2328 }
2329
2330 struct lnet_ni *
2331 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2332                                bool discovery)
2333 {
2334         struct lnet_peer_net *lpn = NULL;
2335         struct lnet_peer_net *best_lpn = NULL;
2336         struct lnet_net *net = NULL;
2337         struct lnet_net *best_net = NULL;
2338         struct lnet_ni *best_ni = NULL;
2339         int best_lpn_healthv = 0;
2340         int best_net_healthv = 0;
2341         int net_healthv;
2342         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2343         __u32 lpn_sel_prio;
2344         __u32 best_net_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2345         __u32 net_sel_prio;
2346         bool exit = false;
2347
2348         /*
2349          * The peer can have multiple interfaces, some of them can be on
2350          * the local network and others on a routed network. We should
2351          * prefer the local network. However if the local network is not
2352          * available then we need to try the routed network
2353          */
2354
2355         /* go through all the peer nets and find the best_ni */
2356         list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
2357                 /*
2358                  * The peer's list of nets can contain non-local nets. We
2359                  * want to only examine the local ones.
2360                  */
2361                 net = lnet_get_net_locked(lpn->lpn_net_id);
2362                 if (!net)
2363                         continue;
2364
2365                 lpn_sel_prio = lpn->lpn_sel_priority;
2366                 net_healthv = lnet_get_net_healthv_locked(net);
2367                 net_sel_prio = net->net_sel_priority;
2368
2369                 /*
2370                  * if this is a discovery message and lp_disc_net_id is
2371                  * specified then use that net to send the discovery on.
2372                  */
2373                 if (peer->lp_disc_net_id == lpn->lpn_net_id &&
2374                     discovery) {
2375                         exit = true;
2376                         goto select_lpn;
2377                 }
2378
2379                 if (!best_lpn)
2380                         goto select_lpn;
2381
2382                 /* always select the lpn with the best health */
2383                 if (best_lpn_healthv > lpn->lpn_healthv)
2384                         continue;
2385                 else if (best_lpn_healthv < lpn->lpn_healthv)
2386                         goto select_lpn;
2387
2388                 /* select the preferred peer and local nets */
2389                 if (best_lpn_sel_prio < lpn_sel_prio)
2390                         continue;
2391                 else if (best_lpn_sel_prio > lpn_sel_prio)
2392                         goto select_lpn;
2393
2394                 if (best_net_healthv > net_healthv)
2395                         continue;
2396                 else if (best_net_healthv < net_healthv)
2397                         goto select_lpn;
2398
2399                 if (best_net_sel_prio < net_sel_prio)
2400                         continue;
2401                 else if (best_net_sel_prio > net_sel_prio)
2402                         goto select_lpn;
2403
2404                 if (best_lpn->lpn_seq < lpn->lpn_seq)
2405                         continue;
2406                 else if (best_lpn->lpn_seq > lpn->lpn_seq)
2407                         goto select_lpn;
2408
2409                 /* round robin over the local networks */
2410                 if (best_net->net_seq <= net->net_seq)
2411                         continue;
2412
2413 select_lpn:
2414                 best_net_healthv = net_healthv;
2415                 best_net_sel_prio = net_sel_prio;
2416                 best_lpn_healthv = lpn->lpn_healthv;
2417                 best_lpn_sel_prio = lpn_sel_prio;
2418                 best_lpn = lpn;
2419                 best_net = net;
2420
2421                 if (exit)
2422                         break;
2423         }
2424
2425         if (best_lpn) {
2426                 /* Select the best NI on the same net as best_lpn chosen
2427                  * above
2428                  */
2429                 best_ni = lnet_find_best_ni_on_spec_net(NULL, peer,
2430                                                         best_lpn, md_cpt);
2431         }
2432
2433         return best_ni;
2434 }
2435
2436 static struct lnet_ni *
2437 lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt)
2438 {
2439         struct lnet_ni *best_ni = NULL;
2440         struct lnet_peer_net *peer_net = lpni->lpni_peer_net;
2441         struct lnet_peer_ni *lpni_entry;
2442
2443         /*
2444          * We must use a consistent source address when sending to a
2445          * non-MR peer. However, a non-MR peer can have multiple NIDs
2446          * on multiple networks, and we may even need to talk to this
2447          * peer on multiple networks -- certain types of
2448          * load-balancing configuration do this.
2449          *
2450          * So we need to pick the NI the peer prefers for this
2451          * particular network.
2452          */
2453         LASSERT(peer_net);
2454         list_for_each_entry(lpni_entry, &peer_net->lpn_peer_nis,
2455                             lpni_peer_nis) {
2456                 if (lpni_entry->lpni_pref_nnids == 0)
2457                         continue;
2458                 LASSERT(lpni_entry->lpni_pref_nnids == 1);
2459                 best_ni = lnet_nid2ni_locked(lpni_entry->lpni_pref.nid, cpt);
2460                 break;
2461         }
2462
2463         return best_ni;
2464 }
2465
2466 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2467 static int
2468 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2469 {
2470         struct lnet_ni *best_ni = NULL;
2471         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
2472
2473         /*
2474          * We must use a consistent source address when sending to a
2475          * non-MR peer. However, a non-MR peer can have multiple NIDs
2476          * on multiple networks, and we may even need to talk to this
2477          * peer on multiple networks -- certain types of
2478          * load-balancing configuration do this.
2479          *
2480          * So we need to pick the NI the peer prefers for this
2481          * particular network.
2482          */
2483
2484         best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2485                                                        sd->sd_cpt);
2486
2487         /* if best_ni is still not set just pick one */
2488         if (!best_ni) {
2489                 best_ni =
2490                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2491                                                 sd->sd_best_lpni->lpni_peer_net,
2492                                                 sd->sd_md_cpt);
2493                 /* If there is no best_ni we don't have a route */
2494                 if (!best_ni) {
2495                         CERROR("no path to %s from net %s\n",
2496                                 libcfs_nid2str(best_lpni->lpni_nid),
2497                                 libcfs_net2str(best_lpni->lpni_net->net_id));
2498                         return -EHOSTUNREACH;
2499                 }
2500         }
2501
2502         sd->sd_best_ni = best_ni;
2503
2504         /* Set preferred NI if necessary. */
2505         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2506
2507         return 0;
2508 }
2509
2510
2511 /*
2512  * Source not specified
2513  * Local destination
2514  * Non-MR Peer
2515  *
2516  * always use the same source NID for NMR peers
2517  * If we've talked to that peer before then we already have a preferred
2518  * source NI associated with it. Otherwise, we select a preferred local NI
2519  * and store it in the peer
2520  */
2521 static int
2522 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2523 {
2524         int rc = 0;
2525
2526         /* sd->sd_best_lpni is already set to the final destination */
2527
2528         /*
2529          * At this point we should've created the peer ni and peer. If we
2530          * can't find it, then something went wrong. Instead of assert
2531          * output a relevant message and fail the send
2532          */
2533         if (!sd->sd_best_lpni) {
2534                 CERROR("Internal fault. Unable to send msg %s to %s. "
2535                        "NID not known\n",
2536                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2537                        libcfs_nid2str(sd->sd_dst_nid));
2538                 return -EFAULT;
2539         }
2540
2541         if (sd->sd_msg->msg_routing) {
2542                 /* If I'm forwarding this message then I can choose any NI
2543                  * on the destination peer net
2544                  */
2545                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL,
2546                                                                sd->sd_peer,
2547                                                                sd->sd_best_lpni->lpni_peer_net,
2548                                                                sd->sd_md_cpt);
2549                 if (!sd->sd_best_ni) {
2550                         CERROR("Unable to forward message to %s. No local NI available\n",
2551                                libcfs_nid2str(sd->sd_dst_nid));
2552                         rc = -EHOSTUNREACH;
2553                 }
2554         } else
2555                 rc = lnet_select_preferred_best_ni(sd);
2556
2557         if (!rc)
2558                 rc = lnet_handle_send(sd);
2559
2560         return rc;
2561 }
2562
2563 static int
2564 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2565 {
2566         /*
2567          * NOTE we've already handled the remote peer case. So we only
2568          * need to worry about the local case here.
2569          *
2570          * if we're sending a response, ACK or reply, we need to send it
2571          * to the destination NID given to us. At this point we already
2572          * have the peer_ni we're suppose to send to, so just find the
2573          * best_ni on the peer net and use that. Since we're sending to an
2574          * MR peer then we can just run the selection algorithm on our
2575          * local NIs and pick the best one.
2576          */
2577         if (sd->sd_send_case & SND_RESP) {
2578                 sd->sd_best_ni =
2579                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2580                                                 sd->sd_best_lpni->lpni_peer_net,
2581                                                 sd->sd_md_cpt);
2582
2583                 if (!sd->sd_best_ni) {
2584                         /*
2585                          * We're not going to deal with not able to send
2586                          * a response to the provided final destination
2587                          */
2588                         CERROR("Can't send response to %s. "
2589                                "No local NI available\n",
2590                                 libcfs_nid2str(sd->sd_dst_nid));
2591                         return -EHOSTUNREACH;
2592                 }
2593
2594                 return lnet_handle_send(sd);
2595         }
2596
2597         /*
2598          * If we get here that means we're sending a fresh request, PUT or
2599          * GET, so we need to run our standard selection algorithm.
2600          * First find the best local interface that's on any of the peer's
2601          * networks.
2602          */
2603         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2604                                         sd->sd_md_cpt,
2605                                         lnet_msg_discovery(sd->sd_msg));
2606         if (sd->sd_best_ni) {
2607                 sd->sd_best_lpni =
2608                   lnet_find_best_lpni(sd->sd_best_ni, sd->sd_dst_nid,
2609                                       sd->sd_peer,
2610                                       sd->sd_best_ni->ni_net->net_id);
2611
2612                 /*
2613                  * if we're successful in selecting a peer_ni on the local
2614                  * network, then send to it. Otherwise fall through and
2615                  * try and see if we can reach it over another routed
2616                  * network
2617                  */
2618                 if (sd->sd_best_lpni &&
2619                     sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid) {
2620                         /*
2621                          * in case we initially started with a routed
2622                          * destination, let's reset to local
2623                          */
2624                         sd->sd_send_case &= ~REMOTE_DST;
2625                         sd->sd_send_case |= LOCAL_DST;
2626                         return lnet_handle_lo_send(sd);
2627                 } else if (sd->sd_best_lpni) {
2628                         /*
2629                          * in case we initially started with a routed
2630                          * destination, let's reset to local
2631                          */
2632                         sd->sd_send_case &= ~REMOTE_DST;
2633                         sd->sd_send_case |= LOCAL_DST;
2634                         return lnet_handle_send(sd);
2635                 }
2636
2637                 CERROR("Internal Error. Expected to have a best_lpni: "
2638                        "%s -> %s\n",
2639                        libcfs_nid2str(sd->sd_src_nid),
2640                        libcfs_nid2str(sd->sd_dst_nid));
2641
2642                 return -EFAULT;
2643         }
2644
2645         /*
2646          * Peer doesn't have a local network. Let's see if there is
2647          * a remote network we can reach it on.
2648          */
2649         return PASS_THROUGH;
2650 }
2651
2652 /*
2653  * Case 1:
2654  *      Source NID not specified
2655  *      Local destination
2656  *      MR peer
2657  *
2658  * Case 2:
2659  *      Source NID not speified
2660  *      Remote destination
2661  *      MR peer
2662  *
2663  * In both of these cases if we're sending a response, ACK or REPLY, then
2664  * we need to send to the destination NID provided.
2665  *
2666  * In the remote case let's deal with MR routers.
2667  *
2668  */
2669
2670 static int
2671 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2672 {
2673         int rc = 0;
2674         struct lnet_peer *gw_peer = NULL;
2675         struct lnet_peer_ni *gw_lpni = NULL;
2676
2677         /*
2678          * handle sending a response to a remote peer here so we don't
2679          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2680          */
2681         if (sd->sd_send_case & REMOTE_DST &&
2682             sd->sd_send_case & SND_RESP) {
2683                 struct lnet_peer_ni *gw;
2684                 struct lnet_peer *gw_peer;
2685
2686                 rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw,
2687                                                   &gw_peer);
2688                 if (rc < 0) {
2689                         CERROR("Can't send response to %s. "
2690                                "No route available\n",
2691                                 libcfs_nid2str(sd->sd_dst_nid));
2692                         return -EHOSTUNREACH;
2693                 } else if (rc > 0) {
2694                         return rc;
2695                 }
2696
2697                 sd->sd_best_lpni = gw;
2698                 sd->sd_peer = gw_peer;
2699
2700                 return lnet_handle_send(sd);
2701         }
2702
2703         /*
2704          * Even though the NID for the peer might not be on a local network,
2705          * since the peer is MR there could be other interfaces on the
2706          * local network. In that case we'd still like to prefer the local
2707          * network over the routed network. If we're unable to do that
2708          * then we select the best router among the different routed networks,
2709          * and if the router is MR then we can deal with it as such.
2710          */
2711         rc = lnet_handle_any_mr_dsta(sd);
2712         if (rc != PASS_THROUGH)
2713                 return rc;
2714
2715         /*
2716          * Now that we must route to the destination, we must consider the
2717          * MR case, where the destination has multiple interfaces, some of
2718          * which we can route to and others we do not. For this reason we
2719          * need to select the destination which we can route to and if
2720          * there are multiple, we need to round robin.
2721          */
2722         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2723                                           &gw_peer);
2724         if (rc)
2725                 return rc;
2726
2727         sd->sd_send_case &= ~LOCAL_DST;
2728         sd->sd_send_case |= REMOTE_DST;
2729
2730         sd->sd_peer = gw_peer;
2731         sd->sd_best_lpni = gw_lpni;
2732
2733         return lnet_handle_send(sd);
2734 }
2735
2736 /*
2737  * Source not specified
2738  * Remote destination
2739  * Non-MR peer
2740  *
2741  * Must send to the specified peer NID using the same source NID that
2742  * we've used before. If it's the first time to talk to that peer then
2743  * find the source NI and assign it as preferred to that peer
2744  */
2745 static int
2746 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2747 {
2748         int rc;
2749         struct lnet_peer_ni *gw_lpni = NULL;
2750         struct lnet_peer *gw_peer = NULL;
2751
2752         /*
2753          * Let's see if we have a preferred NI to talk to this NMR peer
2754          */
2755         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2756                                                               sd->sd_cpt);
2757
2758         /*
2759          * find the router and that'll find the best NI if we didn't find
2760          * it already.
2761          */
2762         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2763                                           &gw_peer);
2764         if (rc)
2765                 return rc;
2766
2767         /*
2768          * set the best_ni we've chosen as the preferred one for
2769          * this peer
2770          */
2771         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2772
2773         /* we'll be sending to the gw */
2774         sd->sd_best_lpni = gw_lpni;
2775         sd->sd_peer = gw_peer;
2776
2777         return lnet_handle_send(sd);
2778 }
2779
2780 static int
2781 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2782 {
2783         /*
2784          * turn off the SND_RESP bit.
2785          * It will be checked in the case handling
2786          */
2787         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2788
2789         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2790                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2791                 (send_case & SRC_SPEC) ? libcfs_nid2str(sd->sd_src_nid) : "",
2792                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2793                 libcfs_nid2str(sd->sd_dst_nid),
2794                 (send_case & LOCAL_DST) ? "local" : "routed");
2795
2796         switch (send_case) {
2797         /*
2798          * For all cases where the source is specified, we should always
2799          * use the destination NID, whether it's an MR destination or not,
2800          * since we're continuing a series of related messages for the
2801          * same RPC
2802          */
2803         case SRC_SPEC_LOCAL_NMR_DST:
2804                 return lnet_handle_spec_local_nmr_dst(sd);
2805         case SRC_SPEC_LOCAL_MR_DST:
2806                 return lnet_handle_spec_local_mr_dst(sd);
2807         case SRC_SPEC_ROUTER_NMR_DST:
2808         case SRC_SPEC_ROUTER_MR_DST:
2809                 return lnet_handle_spec_router_dst(sd);
2810         case SRC_ANY_LOCAL_NMR_DST:
2811                 return lnet_handle_any_local_nmr_dst(sd);
2812         case SRC_ANY_LOCAL_MR_DST:
2813         case SRC_ANY_ROUTER_MR_DST:
2814                 return lnet_handle_any_mr_dst(sd);
2815         case SRC_ANY_ROUTER_NMR_DST:
2816                 return lnet_handle_any_router_nmr_dst(sd);
2817         default:
2818                 CERROR("Unknown send case\n");
2819                 return -1;
2820         }
2821 }
2822
2823 static int
2824 lnet_select_pathway(lnet_nid_t src_nid, lnet_nid_t dst_nid,
2825                     struct lnet_msg *msg, lnet_nid_t rtr_nid)
2826 {
2827         struct lnet_peer_ni *lpni;
2828         struct lnet_peer *peer;
2829         struct lnet_send_data send_data;
2830         int cpt, rc;
2831         int md_cpt;
2832         __u32 send_case = 0;
2833         bool final_hop;
2834         bool mr_forwarding_allowed;
2835
2836         memset(&send_data, 0, sizeof(send_data));
2837
2838         /*
2839          * get an initial CPT to use for locking. The idea here is not to
2840          * serialize the calls to select_pathway, so that as many
2841          * operations can run concurrently as possible. To do that we use
2842          * the CPT where this call is being executed. Later on when we
2843          * determine the CPT to use in lnet_message_commit, we switch the
2844          * lock and check if there was any configuration change.  If none,
2845          * then we proceed, if there is, then we restart the operation.
2846          */
2847         cpt = lnet_net_lock_current();
2848
2849         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
2850         if (md_cpt == CFS_CPT_ANY)
2851                 md_cpt = cpt;
2852
2853 again:
2854
2855         /*
2856          * If we're being asked to send to the loopback interface, there
2857          * is no need to go through any selection. We can just shortcut
2858          * the entire process and send over lolnd
2859          */
2860         send_data.sd_msg = msg;
2861         send_data.sd_cpt = cpt;
2862         if (dst_nid == LNET_NID_LO_0) {
2863                 rc = lnet_handle_lo_send(&send_data);
2864                 lnet_net_unlock(cpt);
2865                 return rc;
2866         }
2867
2868         /*
2869          * find an existing peer_ni, or create one and mark it as having been
2870          * created due to network traffic. This call will create the
2871          * peer->peer_net->peer_ni tree.
2872          */
2873         lpni = lnet_nid2peerni_locked(dst_nid, LNET_NID_ANY, cpt);
2874         if (IS_ERR(lpni)) {
2875                 lnet_net_unlock(cpt);
2876                 return PTR_ERR(lpni);
2877         }
2878
2879         /*
2880          * Cache the original src_nid and rtr_nid. If we need to resend the
2881          * message then we'll need to know whether the src_nid was originally
2882          * specified for this message. If it was originally specified,
2883          * then we need to keep using the same src_nid since it's
2884          * continuing the same sequence of messages. Similarly, rtr_nid will
2885          * affect our choice of next hop.
2886          */
2887         msg->msg_src_nid_param = src_nid;
2888         msg->msg_rtr_nid_param = rtr_nid;
2889
2890         /*
2891          * If necessary, perform discovery on the peer that owns this peer_ni.
2892          * Note, this can result in the ownership of this peer_ni changing
2893          * to another peer object.
2894          */
2895         rc = lnet_initiate_peer_discovery(lpni, msg, cpt);
2896         if (rc) {
2897                 lnet_peer_ni_decref_locked(lpni);
2898                 lnet_net_unlock(cpt);
2899                 return rc;
2900         }
2901         lnet_peer_ni_decref_locked(lpni);
2902
2903         peer = lpni->lpni_peer_net->lpn_peer;
2904
2905         /*
2906          * Identify the different send cases
2907          */
2908         if (src_nid == LNET_NID_ANY)
2909                 send_case |= SRC_ANY;
2910         else
2911                 send_case |= SRC_SPEC;
2912
2913         if (lnet_get_net_locked(LNET_NIDNET(dst_nid)))
2914                 send_case |= LOCAL_DST;
2915         else
2916                 send_case |= REMOTE_DST;
2917
2918         final_hop = false;
2919         if (msg->msg_routing && (send_case & LOCAL_DST))
2920                 final_hop = true;
2921
2922         /* Determine whether to allow MR forwarding for this message.
2923          * NB: MR forwarding is allowed if the message originator and the
2924          * destination are both MR capable, and the destination lpni that was
2925          * originally chosen by the originator is unhealthy or down.
2926          * We check the MR capability of the destination further below
2927          */
2928         mr_forwarding_allowed = false;
2929         if (final_hop) {
2930                 struct lnet_peer *src_lp;
2931                 struct lnet_peer_ni *src_lpni;
2932
2933                 src_lpni = lnet_nid2peerni_locked(msg->msg_hdr.src_nid,
2934                                                   LNET_NID_ANY, cpt);
2935                 /* We don't fail the send if we hit any errors here. We'll just
2936                  * try to send it via non-multi-rail criteria
2937                  */
2938                 if (!IS_ERR(src_lpni)) {
2939                         /* Drop ref taken by lnet_nid2peerni_locked() */
2940                         lnet_peer_ni_decref_locked(src_lpni);
2941                         src_lp = lpni->lpni_peer_net->lpn_peer;
2942                         if (lnet_peer_is_multi_rail(src_lp) &&
2943                             !lnet_is_peer_ni_alive(lpni))
2944                                 mr_forwarding_allowed = true;
2945
2946                 }
2947                 CDEBUG(D_NET, "msg %p MR forwarding %s\n", msg,
2948                        mr_forwarding_allowed ? "allowed" : "not allowed");
2949         }
2950
2951         /*
2952          * Deal with the peer as NMR in the following cases:
2953          * 1. the peer is NMR
2954          * 2. We're trying to recover a specific peer NI
2955          * 3. I'm a router sending to the final destination and MR forwarding is
2956          *    not allowed for this message (as determined above).
2957          *    In this case the source of the message would've
2958          *    already selected the final destination so my job
2959          *    is to honor the selection.
2960          */
2961         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery ||
2962             (final_hop && !mr_forwarding_allowed))
2963                 send_case |= NMR_DST;
2964         else
2965                 send_case |= MR_DST;
2966
2967         if (lnet_msg_is_response(msg))
2968                 send_case |= SND_RESP;
2969
2970         /* assign parameters to the send_data */
2971         send_data.sd_rtr_nid = rtr_nid;
2972         send_data.sd_src_nid = src_nid;
2973         send_data.sd_dst_nid = dst_nid;
2974         send_data.sd_best_lpni = lpni;
2975         /*
2976          * keep a pointer to the final destination in case we're going to
2977          * route, so we'll need to access it later
2978          */
2979         send_data.sd_final_dst_lpni = lpni;
2980         send_data.sd_peer = peer;
2981         send_data.sd_md_cpt = md_cpt;
2982         send_data.sd_send_case = send_case;
2983
2984         rc = lnet_handle_send_case_locked(&send_data);
2985
2986         /*
2987          * Update the local cpt since send_data.sd_cpt might've been
2988          * updated as a result of calling lnet_handle_send_case_locked().
2989          */
2990         cpt = send_data.sd_cpt;
2991
2992         if (rc == REPEAT_SEND)
2993                 goto again;
2994
2995         lnet_net_unlock(cpt);
2996
2997         return rc;
2998 }
2999
3000 int
3001 lnet_send(lnet_nid_t src_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid)
3002 {
3003         lnet_nid_t              dst_nid = msg->msg_target.nid;
3004         int                     rc;
3005
3006         /*
3007          * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
3008          * but we might want to use pre-determined router for ACK/REPLY
3009          * in the future
3010          */
3011         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
3012         LASSERT(msg->msg_txpeer == NULL);
3013         LASSERT(msg->msg_txni == NULL);
3014         LASSERT(!msg->msg_sending);
3015         LASSERT(!msg->msg_target_is_router);
3016         LASSERT(!msg->msg_receiving);
3017
3018         msg->msg_sending = 1;
3019
3020         LASSERT(!msg->msg_tx_committed);
3021
3022         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
3023         if (rc < 0) {
3024                 if (rc == -EHOSTUNREACH)
3025                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
3026                 else
3027                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
3028                 return rc;
3029         }
3030
3031         if (rc == LNET_CREDIT_OK)
3032                 lnet_ni_send(msg->msg_txni, msg);
3033
3034         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
3035         return 0;
3036 }
3037
3038 enum lnet_mt_event_type {
3039         MT_TYPE_LOCAL_NI = 0,
3040         MT_TYPE_PEER_NI
3041 };
3042
3043 struct lnet_mt_event_info {
3044         enum lnet_mt_event_type mt_type;
3045         lnet_nid_t mt_nid;
3046 };
3047
3048 /* called with res_lock held */
3049 void
3050 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
3051 {
3052         struct lnet_rsp_tracker *rspt;
3053
3054         /*
3055          * msg has a refcount on the MD so the MD is not going away.
3056          * The rspt queue for the cpt is protected by
3057          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
3058          */
3059         if (!md->md_rspt_ptr)
3060                 return;
3061
3062         rspt = md->md_rspt_ptr;
3063
3064         /* debug code */
3065         LASSERT(rspt->rspt_cpt == cpt);
3066
3067         md->md_rspt_ptr = NULL;
3068
3069         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3070                 /*
3071                  * The monitor thread has invalidated this handle because the
3072                  * response timed out, but it failed to lookup the MD. That
3073                  * means this response tracker is on the zombie list. We can
3074                  * safely remove it under the resource lock (held by caller) and
3075                  * free the response tracker block.
3076                  */
3077                 list_del(&rspt->rspt_on_list);
3078                 lnet_rspt_free(rspt, cpt);
3079         } else {
3080                 /*
3081                  * invalidate the handle to indicate that a response has been
3082                  * received, which will then lead the monitor thread to clean up
3083                  * the rspt block.
3084                  */
3085                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
3086         }
3087 }
3088
3089 void
3090 lnet_clean_zombie_rstqs(void)
3091 {
3092         struct lnet_rsp_tracker *rspt, *tmp;
3093         int i;
3094
3095         cfs_cpt_for_each(i, lnet_cpt_table()) {
3096                 list_for_each_entry_safe(rspt, tmp,
3097                                          the_lnet.ln_mt_zombie_rstqs[i],
3098                                          rspt_on_list) {
3099                         list_del(&rspt->rspt_on_list);
3100                         lnet_rspt_free(rspt, i);
3101                 }
3102         }
3103
3104         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
3105 }
3106
3107 static void
3108 lnet_finalize_expired_responses(void)
3109 {
3110         struct lnet_libmd *md;
3111         struct lnet_rsp_tracker *rspt, *tmp;
3112         ktime_t now;
3113         int i;
3114
3115         if (the_lnet.ln_mt_rstq == NULL)
3116                 return;
3117
3118         cfs_cpt_for_each(i, lnet_cpt_table()) {
3119                 LIST_HEAD(local_queue);
3120
3121                 lnet_net_lock(i);
3122                 if (!the_lnet.ln_mt_rstq[i]) {
3123                         lnet_net_unlock(i);
3124                         continue;
3125                 }
3126                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
3127                 lnet_net_unlock(i);
3128
3129                 now = ktime_get();
3130
3131                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
3132                         /*
3133                          * The rspt mdh will be invalidated when a response
3134                          * is received or whenever we want to discard the
3135                          * block the monitor thread will walk the queue
3136                          * and clean up any rsts with an invalid mdh.
3137                          * The monitor thread will walk the queue until
3138                          * the first unexpired rspt block. This means that
3139                          * some rspt blocks which received their
3140                          * corresponding responses will linger in the
3141                          * queue until they are cleaned up eventually.
3142                          */
3143                         lnet_res_lock(i);
3144                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3145                                 lnet_res_unlock(i);
3146                                 list_del(&rspt->rspt_on_list);
3147                                 lnet_rspt_free(rspt, i);
3148                                 continue;
3149                         }
3150
3151                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
3152                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
3153                                 struct lnet_peer_ni *lpni;
3154                                 lnet_nid_t nid;
3155
3156                                 md = lnet_handle2md(&rspt->rspt_mdh);
3157                                 if (!md) {
3158                                         /* MD has been queued for unlink, but
3159                                          * rspt hasn't been detached (Note we've
3160                                          * checked above that the rspt_mdh is
3161                                          * valid). Since we cannot lookup the MD
3162                                          * we're unable to detach the rspt
3163                                          * ourselves. Thus, move the rspt to the
3164                                          * zombie list where we'll wait for
3165                                          * either:
3166                                          *   1. The remaining operations on the
3167                                          *   MD to complete. In this case the
3168                                          *   final operation will result in
3169                                          *   lnet_msg_detach_md()->
3170                                          *   lnet_detach_rsp_tracker() where
3171                                          *   we will clean up this response
3172                                          *   tracker.
3173                                          *   2. LNet to shutdown. In this case
3174                                          *   we'll wait until after all LND Nets
3175                                          *   have shutdown and then we can
3176                                          *   safely free any remaining response
3177                                          *   tracker blocks on the zombie list.
3178                                          * Note: We need to hold the resource
3179                                          * lock when adding to the zombie list
3180                                          * because we may have concurrent access
3181                                          * with lnet_detach_rsp_tracker().
3182                                          */
3183                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
3184                                         list_move(&rspt->rspt_on_list,
3185                                                   the_lnet.ln_mt_zombie_rstqs[i]);
3186                                         lnet_res_unlock(i);
3187                                         continue;
3188                                 }
3189                                 LASSERT(md->md_rspt_ptr == rspt);
3190                                 md->md_rspt_ptr = NULL;
3191                                 lnet_res_unlock(i);
3192
3193                                 LNetMDUnlink(rspt->rspt_mdh);
3194
3195                                 nid = rspt->rspt_next_hop_nid;
3196
3197                                 list_del(&rspt->rspt_on_list);
3198                                 lnet_rspt_free(rspt, i);
3199
3200                                 /* If we're shutting down we just want to clean
3201                                  * up the rspt blocks
3202                                  */
3203                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3204                                         continue;
3205
3206                                 lnet_net_lock(i);
3207                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
3208                                 lnet_net_unlock(i);
3209
3210                                 CDEBUG(D_NET,
3211                                        "Response timeout: md = %p: nid = %s\n",
3212                                        md, libcfs_nid2str(nid));
3213
3214                                 /*
3215                                  * If there is a timeout on the response
3216                                  * from the next hop decrement its health
3217                                  * value so that we don't use it
3218                                  */
3219                                 lnet_net_lock(0);
3220                                 lpni = lnet_find_peer_ni_locked(nid);
3221                                 if (lpni) {
3222                                         lnet_handle_remote_failure_locked(lpni);
3223                                         lnet_peer_ni_decref_locked(lpni);
3224                                 }
3225                                 lnet_net_unlock(0);
3226                         } else {
3227                                 lnet_res_unlock(i);
3228                                 break;
3229                         }
3230                 }
3231
3232                 if (!list_empty(&local_queue)) {
3233                         lnet_net_lock(i);
3234                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
3235                         lnet_net_unlock(i);
3236                 }
3237         }
3238 }
3239
3240 static void
3241 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
3242 {
3243         struct lnet_msg *msg;
3244
3245         while (!list_empty(resendq)) {
3246                 struct lnet_peer_ni *lpni;
3247
3248                 msg = list_entry(resendq->next, struct lnet_msg,
3249                                  msg_list);
3250
3251                 list_del_init(&msg->msg_list);
3252
3253                 lpni = lnet_find_peer_ni_locked(msg->msg_hdr.dest_nid);
3254                 if (!lpni) {
3255                         lnet_net_unlock(cpt);
3256                         CERROR("Expected that a peer is already created for %s\n",
3257                                libcfs_nid2str(msg->msg_hdr.dest_nid));
3258                         msg->msg_no_resend = true;
3259                         lnet_finalize(msg, -EFAULT);
3260                         lnet_net_lock(cpt);
3261                 } else {
3262                         int rc;
3263
3264                         lnet_peer_ni_decref_locked(lpni);
3265
3266                         lnet_net_unlock(cpt);
3267                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3268                                libcfs_nid2str(msg->msg_src_nid_param),
3269                                libcfs_id2str(msg->msg_target),
3270                                lnet_msgtyp2str(msg->msg_type),
3271                                msg->msg_recovery,
3272                                msg->msg_retry_count);
3273                         rc = lnet_send(msg->msg_src_nid_param, msg,
3274                                        msg->msg_rtr_nid_param);
3275                         if (rc) {
3276                                 CERROR("Error sending %s to %s: %d\n",
3277                                        lnet_msgtyp2str(msg->msg_type),
3278                                        libcfs_id2str(msg->msg_target), rc);
3279                                 msg->msg_no_resend = true;
3280                                 lnet_finalize(msg, rc);
3281                         }
3282                         lnet_net_lock(cpt);
3283                         if (!rc)
3284                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3285                 }
3286         }
3287 }
3288
3289 static void
3290 lnet_resend_pending_msgs(void)
3291 {
3292         int i;
3293
3294         cfs_cpt_for_each(i, lnet_cpt_table()) {
3295                 lnet_net_lock(i);
3296                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3297                 lnet_net_unlock(i);
3298         }
3299 }
3300
3301 /* called with cpt and ni_lock held */
3302 static void
3303 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3304 {
3305         struct lnet_handle_md recovery_mdh;
3306
3307         LNetInvalidateMDHandle(&recovery_mdh);
3308
3309         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3310             force) {
3311                 recovery_mdh = ni->ni_ping_mdh;
3312                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3313         }
3314         lnet_ni_unlock(ni);
3315         lnet_net_unlock(cpt);
3316         if (!LNetMDHandleIsInvalid(recovery_mdh))
3317                 LNetMDUnlink(recovery_mdh);
3318         lnet_net_lock(cpt);
3319         lnet_ni_lock(ni);
3320 }
3321
3322 static void
3323 lnet_recover_local_nis(void)
3324 {
3325         struct lnet_mt_event_info *ev_info;
3326         LIST_HEAD(processed_list);
3327         LIST_HEAD(local_queue);
3328         struct lnet_handle_md mdh;
3329         struct lnet_ni *tmp;
3330         struct lnet_ni *ni;
3331         lnet_nid_t nid;
3332         int healthv;
3333         int rc;
3334
3335         /*
3336          * splice the recovery queue on a local queue. We will iterate
3337          * through the local queue and update it as needed. Once we're
3338          * done with the traversal, we'll splice the local queue back on
3339          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3340          * will be traversed in the next iteration.
3341          */
3342         lnet_net_lock(0);
3343         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3344                          &local_queue);
3345         lnet_net_unlock(0);
3346
3347         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3348                 /*
3349                  * if an NI is being deleted or it is now healthy, there
3350                  * is no need to keep it around in the recovery queue.
3351                  * The monitor thread is the only thread responsible for
3352                  * removing the NI from the recovery queue.
3353                  * Multiple threads can be adding NIs to the recovery
3354                  * queue.
3355                  */
3356                 healthv = atomic_read(&ni->ni_healthv);
3357
3358                 lnet_net_lock(0);
3359                 lnet_ni_lock(ni);
3360                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3361                     healthv == LNET_MAX_HEALTH_VALUE) {
3362                         list_del_init(&ni->ni_recovery);
3363                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3364                         lnet_ni_unlock(ni);
3365                         lnet_ni_decref_locked(ni, 0);
3366                         lnet_net_unlock(0);
3367                         continue;
3368                 }
3369
3370                 /*
3371                  * if the local NI failed recovery we must unlink the md.
3372                  * But we want to keep the local_ni on the recovery queue
3373                  * so we can continue the attempts to recover it.
3374                  */
3375                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3376                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3377                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3378                 }
3379
3380                 lnet_ni_unlock(ni);
3381                 lnet_net_unlock(0);
3382
3383
3384                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3385                        libcfs_nid2str(ni->ni_nid));
3386
3387                 lnet_ni_lock(ni);
3388                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3389                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3390                         lnet_ni_unlock(ni);
3391
3392                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3393                         if (!ev_info) {
3394                                 CERROR("out of memory. Can't recover %s\n",
3395                                        libcfs_nid2str(ni->ni_nid));
3396                                 lnet_ni_lock(ni);
3397                                 ni->ni_recovery_state &=
3398                                   ~LNET_NI_RECOVERY_PENDING;
3399                                 lnet_ni_unlock(ni);
3400                                 continue;
3401                         }
3402
3403                         mdh = ni->ni_ping_mdh;
3404                         /*
3405                          * Invalidate the ni mdh in case it's deleted.
3406                          * We'll unlink the mdh in this case below.
3407                          */
3408                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3409                         nid = ni->ni_nid;
3410
3411                         /*
3412                          * remove the NI from the local queue and drop the
3413                          * reference count to it while we're recovering
3414                          * it. The reason for that, is that the NI could
3415                          * be deleted, and the way the code is structured
3416                          * is if we don't drop the NI, then the deletion
3417                          * code will enter a loop waiting for the
3418                          * reference count to be removed while holding the
3419                          * ln_mutex_lock(). When we look up the peer to
3420                          * send to in lnet_select_pathway() we will try to
3421                          * lock the ln_mutex_lock() as well, leading to
3422                          * a deadlock. By dropping the refcount and
3423                          * removing it from the list, we allow for the NI
3424                          * to be removed, then we use the cached NID to
3425                          * look it up again. If it's gone, then we just
3426                          * continue examining the rest of the queue.
3427                          */
3428                         lnet_net_lock(0);
3429                         list_del_init(&ni->ni_recovery);
3430                         lnet_ni_decref_locked(ni, 0);
3431                         lnet_net_unlock(0);
3432
3433                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3434                         ev_info->mt_nid = nid;
3435                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3436                                             ev_info, the_lnet.ln_mt_handler,
3437                                             true);
3438                         /* lookup the nid again */
3439                         lnet_net_lock(0);
3440                         ni = lnet_nid2ni_locked(nid, 0);
3441                         if (!ni) {
3442                                 /*
3443                                  * the NI has been deleted when we dropped
3444                                  * the ref count
3445                                  */
3446                                 lnet_net_unlock(0);
3447                                 LNetMDUnlink(mdh);
3448                                 continue;
3449                         }
3450                         /*
3451                          * Same note as in lnet_recover_peer_nis(). When
3452                          * we're sending the ping, the NI is free to be
3453                          * deleted or manipulated. By this point it
3454                          * could've been added back on the recovery queue,
3455                          * and a refcount taken on it.
3456                          * So we can't just add it blindly again or we'll
3457                          * corrupt the queue. We must check under lock if
3458                          * it's not on any list and if not then add it
3459                          * to the processed list, which will eventually be
3460                          * spliced back on to the recovery queue.
3461                          */
3462                         ni->ni_ping_mdh = mdh;
3463                         if (list_empty(&ni->ni_recovery)) {
3464                                 list_add_tail(&ni->ni_recovery, &processed_list);
3465                                 lnet_ni_addref_locked(ni, 0);
3466                         }
3467                         lnet_net_unlock(0);
3468
3469                         lnet_ni_lock(ni);
3470                         if (rc)
3471                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3472                 }
3473                 lnet_ni_unlock(ni);
3474         }
3475
3476         /*
3477          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3478          * reexamined in the next iteration.
3479          */
3480         list_splice_init(&processed_list, &local_queue);
3481         lnet_net_lock(0);
3482         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3483         lnet_net_unlock(0);
3484 }
3485
3486 static int
3487 lnet_resendqs_create(void)
3488 {
3489         struct list_head **resendqs;
3490         resendqs = lnet_create_array_of_queues();
3491
3492         if (!resendqs)
3493                 return -ENOMEM;
3494
3495         lnet_net_lock(LNET_LOCK_EX);
3496         the_lnet.ln_mt_resendqs = resendqs;
3497         lnet_net_unlock(LNET_LOCK_EX);
3498
3499         return 0;
3500 }
3501
3502 static void
3503 lnet_clean_local_ni_recoveryq(void)
3504 {
3505         struct lnet_ni *ni;
3506
3507         /* This is only called when the monitor thread has stopped */
3508         lnet_net_lock(0);
3509
3510         while (!list_empty(&the_lnet.ln_mt_localNIRecovq)) {
3511                 ni = list_entry(the_lnet.ln_mt_localNIRecovq.next,
3512                                 struct lnet_ni, ni_recovery);
3513                 list_del_init(&ni->ni_recovery);
3514                 lnet_ni_lock(ni);
3515                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3516                 lnet_ni_unlock(ni);
3517                 lnet_ni_decref_locked(ni, 0);
3518         }
3519
3520         lnet_net_unlock(0);
3521 }
3522
3523 static void
3524 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3525                                      bool force)
3526 {
3527         struct lnet_handle_md recovery_mdh;
3528
3529         LNetInvalidateMDHandle(&recovery_mdh);
3530
3531         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3532                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3533                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3534         }
3535         spin_unlock(&lpni->lpni_lock);
3536         lnet_net_unlock(cpt);
3537         if (!LNetMDHandleIsInvalid(recovery_mdh))
3538                 LNetMDUnlink(recovery_mdh);
3539         lnet_net_lock(cpt);
3540         spin_lock(&lpni->lpni_lock);
3541 }
3542
3543 static void
3544 lnet_clean_peer_ni_recoveryq(void)
3545 {
3546         struct lnet_peer_ni *lpni, *tmp;
3547
3548         lnet_net_lock(LNET_LOCK_EX);
3549
3550         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3551                                  lpni_recovery) {
3552                 list_del_init(&lpni->lpni_recovery);
3553                 spin_lock(&lpni->lpni_lock);
3554                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3555                 spin_unlock(&lpni->lpni_lock);
3556                 lnet_peer_ni_decref_locked(lpni);
3557         }
3558
3559         lnet_net_unlock(LNET_LOCK_EX);
3560 }
3561
3562 static void
3563 lnet_clean_resendqs(void)
3564 {
3565         struct lnet_msg *msg, *tmp;
3566         LIST_HEAD(msgs);
3567         int i;
3568
3569         cfs_cpt_for_each(i, lnet_cpt_table()) {
3570                 lnet_net_lock(i);
3571                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3572                 lnet_net_unlock(i);
3573                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3574                         list_del_init(&msg->msg_list);
3575                         msg->msg_no_resend = true;
3576                         lnet_finalize(msg, -ESHUTDOWN);
3577                 }
3578         }
3579
3580         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3581 }
3582
3583 static void
3584 lnet_recover_peer_nis(void)
3585 {
3586         struct lnet_mt_event_info *ev_info;
3587         LIST_HEAD(processed_list);
3588         LIST_HEAD(local_queue);
3589         struct lnet_handle_md mdh;
3590         struct lnet_peer_ni *lpni;
3591         struct lnet_peer_ni *tmp;
3592         lnet_nid_t nid;
3593         int healthv;
3594         int rc;
3595
3596         /*
3597          * Always use cpt 0 for locking across all interactions with
3598          * ln_mt_peerNIRecovq
3599          */
3600         lnet_net_lock(0);
3601         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3602                          &local_queue);
3603         lnet_net_unlock(0);
3604
3605         list_for_each_entry_safe(lpni, tmp, &local_queue,
3606                                  lpni_recovery) {
3607                 /*
3608                  * The same protection strategy is used here as is in the
3609                  * local recovery case.
3610                  */
3611                 lnet_net_lock(0);
3612                 healthv = atomic_read(&lpni->lpni_healthv);
3613                 spin_lock(&lpni->lpni_lock);
3614                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3615                     healthv == LNET_MAX_HEALTH_VALUE) {
3616                         list_del_init(&lpni->lpni_recovery);
3617                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3618                         spin_unlock(&lpni->lpni_lock);
3619                         lnet_peer_ni_decref_locked(lpni);
3620                         lnet_net_unlock(0);
3621                         continue;
3622                 }
3623
3624                 /*
3625                  * If the peer NI has failed recovery we must unlink the
3626                  * md. But we want to keep the peer ni on the recovery
3627                  * queue so we can try to continue recovering it
3628                  */
3629                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3630                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3631                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3632                 }
3633
3634                 spin_unlock(&lpni->lpni_lock);
3635                 lnet_net_unlock(0);
3636
3637                 /*
3638                  * NOTE: we're racing with peer deletion from user space.
3639                  * It's possible that a peer is deleted after we check its
3640                  * state. In this case the recovery can create a new peer
3641                  */
3642                 spin_lock(&lpni->lpni_lock);
3643                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3644                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3645                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3646                         spin_unlock(&lpni->lpni_lock);
3647
3648                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3649                         if (!ev_info) {
3650                                 CERROR("out of memory. Can't recover %s\n",
3651                                        libcfs_nid2str(lpni->lpni_nid));
3652                                 spin_lock(&lpni->lpni_lock);
3653                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3654                                 spin_unlock(&lpni->lpni_lock);
3655                                 continue;
3656                         }
3657
3658                         /* look at the comments in lnet_recover_local_nis() */
3659                         mdh = lpni->lpni_recovery_ping_mdh;
3660                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3661                         nid = lpni->lpni_nid;
3662                         lnet_net_lock(0);
3663                         list_del_init(&lpni->lpni_recovery);
3664                         lnet_peer_ni_decref_locked(lpni);
3665                         lnet_net_unlock(0);
3666
3667                         ev_info->mt_type = MT_TYPE_PEER_NI;
3668                         ev_info->mt_nid = nid;
3669                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3670                                             ev_info, the_lnet.ln_mt_handler,
3671                                             true);
3672                         lnet_net_lock(0);
3673                         /*
3674                          * lnet_find_peer_ni_locked() grabs a refcount for
3675                          * us. No need to take it explicitly.
3676                          */
3677                         lpni = lnet_find_peer_ni_locked(nid);
3678                         if (!lpni) {
3679                                 lnet_net_unlock(0);
3680                                 LNetMDUnlink(mdh);
3681                                 continue;
3682                         }
3683
3684                         lpni->lpni_recovery_ping_mdh = mdh;
3685                         /*
3686                          * While we're unlocked the lpni could've been
3687                          * readded on the recovery queue. In this case we
3688                          * don't need to add it to the local queue, since
3689                          * it's already on there and the thread that added
3690                          * it would've incremented the refcount on the
3691                          * peer, which means we need to decref the refcount
3692                          * that was implicitly grabbed by find_peer_ni_locked.
3693                          * Otherwise, if the lpni is still not on
3694                          * the recovery queue, then we'll add it to the
3695                          * processed list.
3696                          */
3697                         if (list_empty(&lpni->lpni_recovery))
3698                                 list_add_tail(&lpni->lpni_recovery, &processed_list);
3699                         else
3700                                 lnet_peer_ni_decref_locked(lpni);
3701                         lnet_net_unlock(0);
3702
3703                         spin_lock(&lpni->lpni_lock);
3704                         if (rc)
3705                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3706                 }
3707                 spin_unlock(&lpni->lpni_lock);
3708         }
3709
3710         list_splice_init(&processed_list, &local_queue);
3711         lnet_net_lock(0);
3712         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3713         lnet_net_unlock(0);
3714 }
3715
3716 static int
3717 lnet_monitor_thread(void *arg)
3718 {
3719         time64_t recovery_timeout = 0;
3720         time64_t rsp_timeout = 0;
3721         int interval;
3722         time64_t now;
3723
3724         wait_for_completion(&the_lnet.ln_started);
3725         /*
3726          * The monitor thread takes care of the following:
3727          *  1. Checks the aliveness of routers
3728          *  2. Checks if there are messages on the resend queue to resend
3729          *     them.
3730          *  3. Check if there are any NIs on the local recovery queue and
3731          *     pings them
3732          *  4. Checks if there are any NIs on the remote recovery queue
3733          *     and pings them.
3734          */
3735         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
3736                 now = ktime_get_real_seconds();
3737
3738                 if (lnet_router_checker_active())
3739                         lnet_check_routers();
3740
3741                 lnet_resend_pending_msgs();
3742
3743                 if (now >= rsp_timeout) {
3744                         lnet_finalize_expired_responses();
3745                         rsp_timeout = now + (lnet_transaction_timeout / 2);
3746                 }
3747
3748                 if (now >= recovery_timeout) {
3749                         lnet_recover_local_nis();
3750                         lnet_recover_peer_nis();
3751                         recovery_timeout = now + lnet_recovery_interval;
3752                 }
3753
3754                 /*
3755                  * TODO do we need to check if we should sleep without
3756                  * timeout?  Technically, an active system will always
3757                  * have messages in flight so this check will always
3758                  * evaluate to false. And on an idle system do we care
3759                  * if we wake up every 1 second? Although, we've seen
3760                  * cases where we get a complaint that an idle thread
3761                  * is waking up unnecessarily.
3762                  *
3763                  * Take into account the current net_count when you wake
3764                  * up for alive router checking, since we need to check
3765                  * possibly as many networks as we have configured.
3766                  */
3767                 interval = min(lnet_recovery_interval,
3768                                min((unsigned int) alive_router_check_interval /
3769                                         lnet_current_net_count,
3770                                    lnet_transaction_timeout / 2));
3771                 wait_for_completion_interruptible_timeout(
3772                         &the_lnet.ln_mt_wait_complete,
3773                         cfs_time_seconds(interval));
3774                 /* Must re-init the completion before testing anything,
3775                  * including ln_mt_state.
3776                  */
3777                 reinit_completion(&the_lnet.ln_mt_wait_complete);
3778         }
3779
3780         /* Shutting down */
3781         lnet_net_lock(LNET_LOCK_EX);
3782         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3783         lnet_net_unlock(LNET_LOCK_EX);
3784
3785         /* signal that the monitor thread is exiting */
3786         up(&the_lnet.ln_mt_signal);
3787
3788         return 0;
3789 }
3790
3791 /*
3792  * lnet_send_ping
3793  * Sends a ping.
3794  * Returns == 0 if success
3795  * Returns > 0 if LNetMDBind or prior fails
3796  * Returns < 0 if LNetGet fails
3797  */
3798 int
3799 lnet_send_ping(lnet_nid_t dest_nid,
3800                struct lnet_handle_md *mdh, int nnis,
3801                void *user_data, lnet_handler_t handler, bool recovery)
3802 {
3803         struct lnet_md md = { NULL };
3804         struct lnet_process_id id;
3805         struct lnet_ping_buffer *pbuf;
3806         int rc;
3807
3808         if (dest_nid == LNET_NID_ANY) {
3809                 rc = -EHOSTUNREACH;
3810                 goto fail_error;
3811         }
3812
3813         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
3814         if (!pbuf) {
3815                 rc = ENOMEM;
3816                 goto fail_error;
3817         }
3818
3819         /* initialize md content */
3820         md.start     = &pbuf->pb_info;
3821         md.length    = LNET_PING_INFO_SIZE(nnis);
3822         md.threshold = 2; /* GET/REPLY */
3823         md.max_size  = 0;
3824         md.options   = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE;
3825         md.user_ptr  = user_data;
3826         md.handler   = handler;
3827
3828         rc = LNetMDBind(&md, LNET_UNLINK, mdh);
3829         if (rc) {
3830                 lnet_ping_buffer_decref(pbuf);
3831                 CERROR("Can't bind MD: %d\n", rc);
3832                 rc = -rc; /* change the rc to positive */
3833                 goto fail_error;
3834         }
3835         id.pid = LNET_PID_LUSTRE;
3836         id.nid = dest_nid;
3837
3838         rc = LNetGet(LNET_NID_ANY, *mdh, id,
3839                      LNET_RESERVED_PORTAL,
3840                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
3841
3842         if (rc)
3843                 goto fail_unlink_md;
3844
3845         return 0;
3846
3847 fail_unlink_md:
3848         LNetMDUnlink(*mdh);
3849         LNetInvalidateMDHandle(mdh);
3850 fail_error:
3851         return rc;
3852 }
3853
3854 static void
3855 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
3856                            int status, bool send, bool unlink_event)
3857 {
3858         lnet_nid_t nid = ev_info->mt_nid;
3859
3860         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
3861                 struct lnet_ni *ni;
3862
3863                 lnet_net_lock(0);
3864                 ni = lnet_nid2ni_locked(nid, 0);
3865                 if (!ni) {
3866                         lnet_net_unlock(0);
3867                         return;
3868                 }
3869                 lnet_ni_lock(ni);
3870                 if (!send || (send && status != 0))
3871                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3872                 if (status)
3873                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
3874                 lnet_ni_unlock(ni);
3875                 lnet_net_unlock(0);
3876
3877                 if (status != 0) {
3878                         CERROR("local NI (%s) recovery failed with %d\n",
3879                                libcfs_nid2str(nid), status);
3880                         return;
3881                 }
3882                 /*
3883                  * need to increment healthv for the ni here, because in
3884                  * the lnet_finalize() path we don't have access to this
3885                  * NI. And in order to get access to it, we'll need to
3886                  * carry forward too much information.
3887                  * In the peer case, it'll naturally be incremented
3888                  */
3889                 if (!unlink_event)
3890                         lnet_inc_healthv(&ni->ni_healthv,
3891                                          lnet_health_sensitivity);
3892         } else {
3893                 struct lnet_peer_ni *lpni;
3894                 int cpt;
3895
3896                 cpt = lnet_net_lock_current();
3897                 lpni = lnet_find_peer_ni_locked(nid);
3898                 if (!lpni) {
3899                         lnet_net_unlock(cpt);
3900                         return;
3901                 }
3902                 spin_lock(&lpni->lpni_lock);
3903                 if (!send || (send && status != 0))
3904                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3905                 if (status)
3906                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
3907                 spin_unlock(&lpni->lpni_lock);
3908                 lnet_peer_ni_decref_locked(lpni);
3909                 lnet_net_unlock(cpt);
3910
3911                 if (status != 0)
3912                         CERROR("peer NI (%s) recovery failed with %d\n",
3913                                libcfs_nid2str(nid), status);
3914         }
3915 }
3916
3917 void
3918 lnet_mt_event_handler(struct lnet_event *event)
3919 {
3920         struct lnet_mt_event_info *ev_info = event->md_user_ptr;
3921         struct lnet_ping_buffer *pbuf;
3922
3923         /* TODO: remove assert */
3924         LASSERT(event->type == LNET_EVENT_REPLY ||
3925                 event->type == LNET_EVENT_SEND ||
3926                 event->type == LNET_EVENT_UNLINK);
3927
3928         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
3929                event->status);
3930
3931         switch (event->type) {
3932         case LNET_EVENT_UNLINK:
3933                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
3934                        libcfs_nid2str(ev_info->mt_nid));
3935                 /* fallthrough */
3936         case LNET_EVENT_REPLY:
3937                 lnet_handle_recovery_reply(ev_info, event->status, false,
3938                                            event->type == LNET_EVENT_UNLINK);
3939                 break;
3940         case LNET_EVENT_SEND:
3941                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
3942                                libcfs_nid2str(ev_info->mt_nid),
3943                                (event->status) ? "unsuccessfully" :
3944                                "successfully", event->status);
3945                 lnet_handle_recovery_reply(ev_info, event->status, true, false);
3946                 break;
3947         default:
3948                 CERROR("Unexpected event: %d\n", event->type);
3949                 break;
3950         }
3951         if (event->unlinked) {
3952                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
3953                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
3954                 lnet_ping_buffer_decref(pbuf);
3955         }
3956 }
3957
3958 static int
3959 lnet_rsp_tracker_create(void)
3960 {
3961         struct list_head **rstqs;
3962         rstqs = lnet_create_array_of_queues();
3963
3964         if (!rstqs)
3965                 return -ENOMEM;
3966
3967         the_lnet.ln_mt_rstq = rstqs;
3968
3969         return 0;
3970 }
3971
3972 static void
3973 lnet_rsp_tracker_clean(void)
3974 {
3975         lnet_finalize_expired_responses();
3976
3977         cfs_percpt_free(the_lnet.ln_mt_rstq);
3978         the_lnet.ln_mt_rstq = NULL;
3979 }
3980
3981 int lnet_monitor_thr_start(void)
3982 {
3983         int rc = 0;
3984         struct task_struct *task;
3985
3986         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
3987                 return -EALREADY;
3988
3989         rc = lnet_resendqs_create();
3990         if (rc)
3991                 return rc;
3992
3993         rc = lnet_rsp_tracker_create();
3994         if (rc)
3995                 goto clean_queues;
3996
3997         sema_init(&the_lnet.ln_mt_signal, 0);
3998
3999         lnet_net_lock(LNET_LOCK_EX);
4000         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
4001         lnet_net_unlock(LNET_LOCK_EX);
4002         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
4003         if (IS_ERR(task)) {
4004                 rc = PTR_ERR(task);
4005                 CERROR("Can't start monitor thread: %d\n", rc);
4006                 goto clean_thread;
4007         }
4008
4009         return 0;
4010
4011 clean_thread:
4012         lnet_net_lock(LNET_LOCK_EX);
4013         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4014         lnet_net_unlock(LNET_LOCK_EX);
4015         /* block until event callback signals exit */
4016         down(&the_lnet.ln_mt_signal);
4017         /* clean up */
4018         lnet_net_lock(LNET_LOCK_EX);
4019         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
4020         lnet_net_unlock(LNET_LOCK_EX);
4021         lnet_rsp_tracker_clean();
4022         lnet_clean_local_ni_recoveryq();
4023         lnet_clean_peer_ni_recoveryq();
4024         lnet_clean_resendqs();
4025         the_lnet.ln_mt_handler = NULL;
4026         return rc;
4027 clean_queues:
4028         lnet_rsp_tracker_clean();
4029         lnet_clean_local_ni_recoveryq();
4030         lnet_clean_peer_ni_recoveryq();
4031         lnet_clean_resendqs();
4032         return rc;
4033 }
4034
4035 void lnet_monitor_thr_stop(void)
4036 {
4037         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
4038                 return;
4039
4040         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
4041         lnet_net_lock(LNET_LOCK_EX);
4042         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4043         lnet_net_unlock(LNET_LOCK_EX);
4044
4045         /* tell the monitor thread that we're shutting down */
4046         complete(&the_lnet.ln_mt_wait_complete);
4047
4048         /* block until monitor thread signals that it's done */
4049         down(&the_lnet.ln_mt_signal);
4050         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
4051
4052         /* perform cleanup tasks */
4053         lnet_rsp_tracker_clean();
4054         lnet_clean_local_ni_recoveryq();
4055         lnet_clean_peer_ni_recoveryq();
4056         lnet_clean_resendqs();
4057 }
4058
4059 void
4060 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
4061                   __u32 msg_type)
4062 {
4063         lnet_net_lock(cpt);
4064         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
4065         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4066         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
4067         lnet_net_unlock(cpt);
4068
4069         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
4070 }
4071
4072 static void
4073 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
4074 {
4075         struct lnet_hdr *hdr = &msg->msg_hdr;
4076
4077         if (msg->msg_wanted != 0)
4078                 lnet_setpayloadbuffer(msg);
4079
4080         lnet_build_msg_event(msg, LNET_EVENT_PUT);
4081
4082         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
4083          * it back into the ACK during lnet_finalize() */
4084         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
4085                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
4086
4087         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
4088                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
4089 }
4090
4091 static int
4092 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
4093 {
4094         struct lnet_hdr         *hdr = &msg->msg_hdr;
4095         struct lnet_match_info  info;
4096         int                     rc;
4097         bool                    ready_delay;
4098
4099         /* Convert put fields to host byte order */
4100         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
4101         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
4102         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
4103
4104         /* Primary peer NID. */
4105         info.mi_id.nid  = msg->msg_initiator;
4106         info.mi_id.pid  = hdr->src_pid;
4107         info.mi_opc     = LNET_MD_OP_PUT;
4108         info.mi_portal  = hdr->msg.put.ptl_index;
4109         info.mi_rlength = hdr->payload_length;
4110         info.mi_roffset = hdr->msg.put.offset;
4111         info.mi_mbits   = hdr->msg.put.match_bits;
4112         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
4113
4114         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
4115         ready_delay = msg->msg_rx_ready_delay;
4116
4117  again:
4118         rc = lnet_ptl_match_md(&info, msg);
4119         switch (rc) {
4120         default:
4121                 LBUG();
4122
4123         case LNET_MATCHMD_OK:
4124                 lnet_recv_put(ni, msg);
4125                 return 0;
4126
4127         case LNET_MATCHMD_NONE:
4128                 if (ready_delay)
4129                         /* no eager_recv or has already called it, should
4130                          * have been attached on delayed list */
4131                         return 0;
4132
4133                 rc = lnet_ni_eager_recv(ni, msg);
4134                 if (rc == 0) {
4135                         ready_delay = true;
4136                         goto again;
4137                 }
4138                 /* fall through */
4139
4140         case LNET_MATCHMD_DROP:
4141                 CNETERR("Dropping PUT from %s portal %d match %llu"
4142                         " offset %d length %d: %d\n",
4143                         libcfs_id2str(info.mi_id), info.mi_portal,
4144                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
4145
4146                 return -ENOENT; /* -ve: OK but no match */
4147         }
4148 }
4149
4150 static int
4151 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
4152 {
4153         struct lnet_match_info info;
4154         struct lnet_hdr *hdr = &msg->msg_hdr;
4155         struct lnet_process_id source_id;
4156         struct lnet_handle_wire reply_wmd;
4157         int rc;
4158
4159         /* Convert get fields to host byte order */
4160         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
4161         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
4162         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
4163         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
4164
4165         source_id.nid = hdr->src_nid;
4166         source_id.pid = hdr->src_pid;
4167         /* Primary peer NID */
4168         info.mi_id.nid  = msg->msg_initiator;
4169         info.mi_id.pid  = hdr->src_pid;
4170         info.mi_opc     = LNET_MD_OP_GET;
4171         info.mi_portal  = hdr->msg.get.ptl_index;
4172         info.mi_rlength = hdr->msg.get.sink_length;
4173         info.mi_roffset = hdr->msg.get.src_offset;
4174         info.mi_mbits   = hdr->msg.get.match_bits;
4175         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
4176
4177         rc = lnet_ptl_match_md(&info, msg);
4178         if (rc == LNET_MATCHMD_DROP) {
4179                 CNETERR("Dropping GET from %s portal %d match %llu"
4180                         " offset %d length %d\n",
4181                         libcfs_id2str(info.mi_id), info.mi_portal,
4182                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
4183                 return -ENOENT; /* -ve: OK but no match */
4184         }
4185
4186         LASSERT(rc == LNET_MATCHMD_OK);
4187
4188         lnet_build_msg_event(msg, LNET_EVENT_GET);
4189
4190         reply_wmd = hdr->msg.get.return_wmd;
4191
4192         lnet_prep_send(msg, LNET_MSG_REPLY, source_id,
4193                        msg->msg_offset, msg->msg_wanted);
4194
4195         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
4196
4197         if (rdma_get) {
4198                 /* The LND completes the REPLY from her recv procedure */
4199                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
4200                              msg->msg_offset, msg->msg_len, msg->msg_len);
4201                 return 0;
4202         }
4203
4204         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
4205         msg->msg_receiving = 0;
4206
4207         rc = lnet_send(ni->ni_nid, msg, msg->msg_from);
4208         if (rc < 0) {
4209                 /* didn't get as far as lnet_ni_send() */
4210                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
4211                        libcfs_nid2str(ni->ni_nid),
4212                        libcfs_id2str(info.mi_id), rc);
4213
4214                 lnet_finalize(msg, rc);
4215         }
4216
4217         return 0;
4218 }
4219
4220 static int
4221 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
4222 {
4223         void *private = msg->msg_private;
4224         struct lnet_hdr *hdr = &msg->msg_hdr;
4225         struct lnet_process_id src = {0};
4226         struct lnet_libmd *md;
4227         unsigned int rlength;
4228         unsigned int mlength;
4229         int cpt;
4230
4231         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4232         lnet_res_lock(cpt);
4233
4234         src.nid = hdr->src_nid;
4235         src.pid = hdr->src_pid;
4236
4237         /* NB handles only looked up by creator (no flips) */
4238         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4239         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4240                 CNETERR("%s: Dropping REPLY from %s for %s "
4241                         "MD %#llx.%#llx\n",
4242                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4243                         (md == NULL) ? "invalid" : "inactive",
4244                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4245                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4246                 if (md != NULL && md->md_me != NULL)
4247                         CERROR("REPLY MD also attached to portal %d\n",
4248                                md->md_me->me_portal);
4249
4250                 lnet_res_unlock(cpt);
4251                 return -ENOENT; /* -ve: OK but no match */
4252         }
4253
4254         LASSERT(md->md_offset == 0);
4255
4256         rlength = hdr->payload_length;
4257         mlength = min(rlength, md->md_length);
4258
4259         if (mlength < rlength &&
4260             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4261                 CNETERR("%s: Dropping REPLY from %s length %d "
4262                         "for MD %#llx would overflow (%d)\n",
4263                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4264                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4265                         mlength);
4266                 lnet_res_unlock(cpt);
4267                 return -ENOENT; /* -ve: OK but no match */
4268         }
4269
4270         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4271                libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4272                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4273
4274         lnet_msg_attach_md(msg, md, 0, mlength);
4275
4276         if (mlength != 0)
4277                 lnet_setpayloadbuffer(msg);
4278
4279         lnet_res_unlock(cpt);
4280
4281         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4282
4283         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4284         return 0;
4285 }
4286
4287 static int
4288 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4289 {
4290         struct lnet_hdr *hdr = &msg->msg_hdr;
4291         struct lnet_process_id src = {0};
4292         struct lnet_libmd *md;
4293         int cpt;
4294
4295         src.nid = hdr->src_nid;
4296         src.pid = hdr->src_pid;
4297
4298         /* Convert ack fields to host byte order */
4299         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4300         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4301
4302         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4303         lnet_res_lock(cpt);
4304
4305         /* NB handles only looked up by creator (no flips) */
4306         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4307         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4308                 /* Don't moan; this is expected */
4309                 CDEBUG(D_NET,
4310                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4311                        libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4312                        (md == NULL) ? "invalid" : "inactive",
4313                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4314                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4315                 if (md != NULL && md->md_me != NULL)
4316                         CERROR("Source MD also attached to portal %d\n",
4317                                md->md_me->me_portal);
4318
4319                 lnet_res_unlock(cpt);
4320                 return -ENOENT;                  /* -ve! */
4321         }
4322
4323         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4324                libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4325                hdr->msg.ack.dst_wmd.wh_object_cookie);
4326
4327         lnet_msg_attach_md(msg, md, 0, 0);
4328
4329         lnet_res_unlock(cpt);
4330
4331         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4332
4333         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4334         return 0;
4335 }
4336
4337 /**
4338  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4339  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4340  * \retval -ve                  error code
4341  */
4342 int
4343 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4344 {
4345         int     rc = 0;
4346
4347         if (!the_lnet.ln_routing)
4348                 return -ECANCELED;
4349
4350         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4351             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4352                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4353                         msg->msg_rx_ready_delay = 1;
4354                 } else {
4355                         lnet_net_unlock(msg->msg_rx_cpt);
4356                         rc = lnet_ni_eager_recv(ni, msg);
4357                         lnet_net_lock(msg->msg_rx_cpt);
4358                 }
4359         }
4360
4361         if (rc == 0)
4362                 rc = lnet_post_routed_recv_locked(msg, 0);
4363         return rc;
4364 }
4365
4366 int
4367 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4368 {
4369         int     rc;
4370
4371         switch (msg->msg_type) {
4372         case LNET_MSG_ACK:
4373                 rc = lnet_parse_ack(ni, msg);
4374                 break;
4375         case LNET_MSG_PUT:
4376                 rc = lnet_parse_put(ni, msg);
4377                 break;
4378         case LNET_MSG_GET:
4379                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4380                 break;
4381         case LNET_MSG_REPLY:
4382                 rc = lnet_parse_reply(ni, msg);
4383                 break;
4384         default: /* prevent an unused label if !kernel */
4385                 LASSERT(0);
4386                 return -EPROTO;
4387         }
4388
4389         LASSERT(rc == 0 || rc == -ENOENT);
4390         return rc;
4391 }
4392
4393 char *
4394 lnet_msgtyp2str (int type)
4395 {
4396         switch (type) {
4397         case LNET_MSG_ACK:
4398                 return ("ACK");
4399         case LNET_MSG_PUT:
4400                 return ("PUT");
4401         case LNET_MSG_GET:
4402                 return ("GET");
4403         case LNET_MSG_REPLY:
4404                 return ("REPLY");
4405         case LNET_MSG_HELLO:
4406                 return ("HELLO");
4407         default:
4408                 return ("<UNKNOWN>");
4409         }
4410 }
4411
4412 int
4413 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
4414            void *private, int rdma_req)
4415 {
4416         struct lnet_peer_ni *lpni;
4417         struct lnet_msg *msg;
4418         __u32 payload_length;
4419         lnet_pid_t dest_pid;
4420         lnet_nid_t dest_nid;
4421         lnet_nid_t src_nid;
4422         bool push = false;
4423         int for_me;
4424         __u32 type;
4425         int rc = 0;
4426         int cpt;
4427
4428         LASSERT (!in_interrupt ());
4429
4430         type = le32_to_cpu(hdr->type);
4431         src_nid = le64_to_cpu(hdr->src_nid);
4432         dest_nid = le64_to_cpu(hdr->dest_nid);
4433         dest_pid = le32_to_cpu(hdr->dest_pid);
4434         payload_length = le32_to_cpu(hdr->payload_length);
4435
4436         for_me = (ni->ni_nid == dest_nid);
4437         cpt = lnet_cpt_of_nid(from_nid, ni);
4438
4439         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4440                 libcfs_nid2str(dest_nid),
4441                 libcfs_nid2str(ni->ni_nid),
4442                 libcfs_nid2str(src_nid),
4443                 lnet_msgtyp2str(type),
4444                 (for_me) ? "for me" : "routed");
4445
4446         switch (type) {
4447         case LNET_MSG_ACK:
4448         case LNET_MSG_GET:
4449                 if (payload_length > 0) {
4450                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4451                                libcfs_nid2str(from_nid),
4452                                libcfs_nid2str(src_nid),
4453                                lnet_msgtyp2str(type), payload_length);
4454                         return -EPROTO;
4455                 }
4456                 break;
4457
4458         case LNET_MSG_PUT:
4459         case LNET_MSG_REPLY:
4460                 if (payload_length >
4461                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4462                         CERROR("%s, src %s: bad %s payload %d "
4463                                "(%d max expected)\n",
4464                                libcfs_nid2str(from_nid),
4465                                libcfs_nid2str(src_nid),
4466                                lnet_msgtyp2str(type),
4467                                payload_length,
4468                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4469                         return -EPROTO;
4470                 }
4471                 break;
4472
4473         default:
4474                 CERROR("%s, src %s: Bad message type 0x%x\n",
4475                        libcfs_nid2str(from_nid),
4476                        libcfs_nid2str(src_nid), type);
4477                 return -EPROTO;
4478         }
4479
4480         if (the_lnet.ln_routing &&
4481             ni->ni_net->net_last_alive != ktime_get_real_seconds()) {
4482                 lnet_ni_lock(ni);
4483                 spin_lock(&ni->ni_net->net_lock);
4484                 ni->ni_net->net_last_alive = ktime_get_real_seconds();
4485                 spin_unlock(&ni->ni_net->net_lock);
4486                 push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP);
4487                 lnet_ni_unlock(ni);
4488         }
4489
4490         if (push)
4491                 lnet_push_update_to_peers(1);
4492
4493         /* Regard a bad destination NID as a protocol error.  Senders should
4494          * know what they're doing; if they don't they're misconfigured, buggy
4495          * or malicious so we chop them off at the knees :) */
4496
4497         if (!for_me) {
4498                 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
4499                         /* should have gone direct */
4500                         CERROR("%s, src %s: Bad dest nid %s "
4501                                "(should have been sent direct)\n",
4502                                 libcfs_nid2str(from_nid),
4503                                 libcfs_nid2str(src_nid),
4504                                 libcfs_nid2str(dest_nid));
4505                         return -EPROTO;
4506                 }
4507
4508                 if (lnet_islocalnid(dest_nid)) {
4509                         /* dest is another local NI; sender should have used
4510                          * this node's NID on its own network */
4511                         CERROR("%s, src %s: Bad dest nid %s "
4512                                "(it's my nid but on a different network)\n",
4513                                 libcfs_nid2str(from_nid),
4514                                 libcfs_nid2str(src_nid),
4515                                 libcfs_nid2str(dest_nid));
4516                         return -EPROTO;
4517                 }
4518
4519                 if (rdma_req && type == LNET_MSG_GET) {
4520                         CERROR("%s, src %s: Bad optimized GET for %s "
4521                                "(final destination must be me)\n",
4522                                 libcfs_nid2str(from_nid),
4523                                 libcfs_nid2str(src_nid),
4524                                 libcfs_nid2str(dest_nid));
4525                         return -EPROTO;
4526                 }
4527
4528                 if (!the_lnet.ln_routing) {
4529                         CERROR("%s, src %s: Dropping message for %s "
4530                                "(routing not enabled)\n",
4531                                 libcfs_nid2str(from_nid),
4532                                 libcfs_nid2str(src_nid),
4533                                 libcfs_nid2str(dest_nid));
4534                         goto drop;
4535                 }
4536         }
4537
4538         /* Message looks OK; we're not going to return an error, so we MUST
4539          * call back lnd_recv() come what may... */
4540
4541         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4542             fail_peer(src_nid, 0)) {                    /* shall we now? */
4543                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4544                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4545                        lnet_msgtyp2str(type));
4546                 goto drop;
4547         }
4548
4549         if (!list_empty(&the_lnet.ln_drop_rules) &&
4550             lnet_drop_rule_match(hdr, ni->ni_nid, NULL)) {
4551                 CDEBUG(D_NET,
4552                        "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
4553                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4554                        libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
4555                 goto drop;
4556         }
4557
4558         if (lnet_drop_asym_route && for_me &&
4559             LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
4560                 struct lnet_net *net;
4561                 struct lnet_remotenet *rnet;
4562                 bool found = true;
4563
4564                 /* we are dealing with a routed message,
4565                  * so see if route to reach src_nid goes through from_nid
4566                  */
4567                 lnet_net_lock(cpt);
4568                 net = lnet_get_net_locked(LNET_NIDNET(ni->ni_nid));
4569                 if (!net) {
4570                         lnet_net_unlock(cpt);
4571                         CERROR("net %s not found\n",
4572                                libcfs_net2str(LNET_NIDNET(ni->ni_nid)));
4573                         return -EPROTO;
4574                 }
4575
4576                 rnet = lnet_find_rnet_locked(LNET_NIDNET(src_nid));
4577                 if (rnet) {
4578                         struct lnet_peer *gw = NULL;
4579                         struct lnet_peer_ni *lpni = NULL;
4580                         struct lnet_route *route;
4581
4582                         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
4583                                 found = false;
4584                                 gw = route->lr_gateway;
4585                                 if (route->lr_lnet != net->net_id)
4586                                         continue;
4587                                 /*
4588                                  * if the nid is one of the gateway's NIDs
4589                                  * then this is a valid gateway
4590                                  */
4591                                 while ((lpni = lnet_get_next_peer_ni_locked(gw,
4592                                                 NULL, lpni)) != NULL) {
4593                                         if (lpni->lpni_nid == from_nid) {
4594                                                 found = true;
4595                                                 break;
4596                                         }
4597                                 }
4598                         }
4599                 }
4600                 lnet_net_unlock(cpt);
4601                 if (!found) {
4602                         /* we would not use from_nid to route a message to
4603                          * src_nid
4604                          * => asymmetric routing detected but forbidden
4605                          */
4606                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4607                                libcfs_nid2str(from_nid),
4608                                libcfs_nid2str(src_nid), lnet_msgtyp2str(type));
4609                         goto drop;
4610                 }
4611         }
4612
4613         msg = lnet_msg_alloc();
4614         if (msg == NULL) {
4615                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4616                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4617                        lnet_msgtyp2str(type));
4618                 goto drop;
4619         }
4620
4621         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4622          * pointers NULL etc */
4623
4624         msg->msg_type = type;
4625         msg->msg_private = private;
4626         msg->msg_receiving = 1;
4627         msg->msg_rdma_get = rdma_req;
4628         msg->msg_len = msg->msg_wanted = payload_length;
4629         msg->msg_offset = 0;
4630         msg->msg_hdr = *hdr;
4631         /* for building message event */
4632         msg->msg_from = from_nid;
4633         if (!for_me) {
4634                 msg->msg_target.pid     = dest_pid;
4635                 msg->msg_target.nid     = dest_nid;
4636                 msg->msg_routing        = 1;
4637
4638         } else {
4639                 /* convert common msg->hdr fields to host byteorder */
4640                 msg->msg_hdr.type       = type;
4641                 msg->msg_hdr.src_nid    = src_nid;
4642                 msg->msg_hdr.src_pid    = le32_to_cpu(msg->msg_hdr.src_pid);
4643                 msg->msg_hdr.dest_nid   = dest_nid;
4644                 msg->msg_hdr.dest_pid   = dest_pid;
4645                 msg->msg_hdr.payload_length = payload_length;
4646         }
4647
4648         lnet_net_lock(cpt);
4649         lpni = lnet_nid2peerni_locked(from_nid, ni->ni_nid, cpt);
4650         if (IS_ERR(lpni)) {
4651                 lnet_net_unlock(cpt);
4652                 CERROR("%s, src %s: Dropping %s "
4653                        "(error %ld looking up sender)\n",
4654                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4655                        lnet_msgtyp2str(type), PTR_ERR(lpni));
4656                 lnet_msg_free(msg);
4657                 if (rc == -ESHUTDOWN)
4658                         /* We are shutting down.  Don't do anything more */
4659                         return 0;
4660                 goto drop;
4661         }
4662
4663         if (the_lnet.ln_routing)
4664                 lpni->lpni_last_alive = ktime_get_seconds();
4665
4666         msg->msg_rxpeer = lpni;
4667         msg->msg_rxni = ni;
4668         lnet_ni_addref_locked(ni, cpt);
4669         /* Multi-Rail: Primary NID of source. */
4670         msg->msg_initiator = lnet_peer_primary_nid_locked(src_nid);
4671
4672         /*
4673          * mark the status of this lpni as UP since we received a message
4674          * from it. The ping response reports back the ns_status which is
4675          * marked on the remote as up or down and we cache it here.
4676          */
4677         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
4678
4679         lnet_msg_commit(msg, cpt);
4680
4681         /* message delay simulation */
4682         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
4683                      lnet_delay_rule_match_locked(hdr, msg))) {
4684                 lnet_net_unlock(cpt);
4685                 return 0;
4686         }
4687
4688         if (!for_me) {
4689                 rc = lnet_parse_forward_locked(ni, msg);
4690                 lnet_net_unlock(cpt);
4691
4692                 if (rc < 0)
4693                         goto free_drop;
4694
4695                 if (rc == LNET_CREDIT_OK) {
4696                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
4697                                      0, payload_length, payload_length);
4698                 }
4699                 return 0;
4700         }
4701
4702         lnet_net_unlock(cpt);
4703
4704         rc = lnet_parse_local(ni, msg);
4705         if (rc != 0)
4706                 goto free_drop;
4707         return 0;
4708
4709  free_drop:
4710         LASSERT(msg->msg_md == NULL);
4711         lnet_finalize(msg, rc);
4712
4713  drop:
4714         lnet_drop_message(ni, cpt, private, payload_length, type);
4715         return 0;
4716 }
4717 EXPORT_SYMBOL(lnet_parse);
4718
4719 void
4720 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
4721 {
4722         while (!list_empty(head)) {
4723                 struct lnet_process_id id = {0};
4724                 struct lnet_msg *msg;
4725
4726                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4727                 list_del(&msg->msg_list);
4728
4729                 id.nid = msg->msg_hdr.src_nid;
4730                 id.pid = msg->msg_hdr.src_pid;
4731
4732                 LASSERT(msg->msg_md == NULL);
4733                 LASSERT(msg->msg_rx_delayed);
4734                 LASSERT(msg->msg_rxpeer != NULL);
4735                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4736
4737                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
4738                       " offset %d length %d: %s\n",
4739                       libcfs_id2str(id),
4740                       msg->msg_hdr.msg.put.ptl_index,
4741                       msg->msg_hdr.msg.put.match_bits,
4742                       msg->msg_hdr.msg.put.offset,
4743                       msg->msg_hdr.payload_length, reason);
4744
4745                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
4746                  * called lnet_drop_message(), so I just hang onto msg as well
4747                  * until that's done */
4748
4749                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
4750                                   msg->msg_private, msg->msg_len,
4751                                   msg->msg_type);
4752
4753                 msg->msg_no_resend = true;
4754                 /*
4755                  * NB: message will not generate event because w/o attached MD,
4756                  * but we still should give error code so lnet_msg_decommit()
4757                  * can skip counters operations and other checks.
4758                  */
4759                 lnet_finalize(msg, -ENOENT);
4760         }
4761 }
4762
4763 void
4764 lnet_recv_delayed_msg_list(struct list_head *head)
4765 {
4766         while (!list_empty(head)) {
4767                 struct lnet_msg *msg;
4768                 struct lnet_process_id id;
4769
4770                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4771                 list_del(&msg->msg_list);
4772
4773                 /* md won't disappear under me, since each msg
4774                  * holds a ref on it */
4775
4776                 id.nid = msg->msg_hdr.src_nid;
4777                 id.pid = msg->msg_hdr.src_pid;
4778
4779                 LASSERT(msg->msg_rx_delayed);
4780                 LASSERT(msg->msg_md != NULL);
4781                 LASSERT(msg->msg_rxpeer != NULL);
4782                 LASSERT(msg->msg_rxni != NULL);
4783                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4784
4785                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
4786                        "match %llu offset %d length %d.\n",
4787                         libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
4788                         msg->msg_hdr.msg.put.match_bits,
4789                         msg->msg_hdr.msg.put.offset,
4790                         msg->msg_hdr.payload_length);
4791
4792                 lnet_recv_put(msg->msg_rxni, msg);
4793         }
4794 }
4795
4796 static void
4797 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
4798                         struct lnet_libmd *md, struct lnet_handle_md mdh)
4799 {
4800         s64 timeout_ns;
4801         struct lnet_rsp_tracker *local_rspt;
4802
4803         /*
4804          * MD has a refcount taken by message so it's not going away.
4805          * The MD however can be looked up. We need to secure the access
4806          * to the md_rspt_ptr by taking the res_lock.
4807          * The rspt can be accessed without protection up to when it gets
4808          * added to the list.
4809          */
4810
4811         lnet_res_lock(cpt);
4812         local_rspt = md->md_rspt_ptr;
4813         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
4814         if (local_rspt != NULL) {
4815                 /*
4816                  * we already have an rspt attached to the md, so we'll
4817                  * update the deadline on that one.
4818                  */
4819                 lnet_rspt_free(rspt, cpt);
4820         } else {
4821                 /* new md */
4822                 rspt->rspt_mdh = mdh;
4823                 rspt->rspt_cpt = cpt;
4824                 /* store the rspt so we can access it when we get the REPLY */
4825                 md->md_rspt_ptr = rspt;
4826                 local_rspt = rspt;
4827         }
4828         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
4829
4830         /*
4831          * add to the list of tracked responses. It's added to tail of the
4832          * list in order to expire all the older entries first.
4833          */
4834         lnet_net_lock(cpt);
4835         list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
4836         lnet_net_unlock(cpt);
4837         lnet_res_unlock(cpt);
4838 }
4839
4840 /**
4841  * Initiate an asynchronous PUT operation.
4842  *
4843  * There are several events associated with a PUT: completion of the send on
4844  * the initiator node (LNET_EVENT_SEND), and when the send completes
4845  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
4846  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
4847  * used at the target node to indicate the completion of incoming data
4848  * delivery.
4849  *
4850  * The local events will be logged in the EQ associated with the MD pointed to
4851  * by \a mdh handle. Using a MD without an associated EQ results in these
4852  * events being discarded. In this case, the caller must have another
4853  * mechanism (e.g., a higher level protocol) for determining when it is safe
4854  * to modify the memory region associated with the MD.
4855  *
4856  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
4857  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
4858  *
4859  * \param self Indicates the NID of a local interface through which to send
4860  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
4861  * \param mdh A handle for the MD that describes the memory to be sent. The MD
4862  * must be "free floating" (See LNetMDBind()).
4863  * \param ack Controls whether an acknowledgment is requested.
4864  * Acknowledgments are only sent when they are requested by the initiating
4865  * process and the target MD enables them.
4866  * \param target A process identifier for the target process.
4867  * \param portal The index in the \a target's portal table.
4868  * \param match_bits The match bits to use for MD selection at the target
4869  * process.
4870  * \param offset The offset into the target MD (only used when the target
4871  * MD has the LNET_MD_MANAGE_REMOTE option set).
4872  * \param hdr_data 64 bits of user data that can be included in the message
4873  * header. This data is written to an event queue entry at the target if an
4874  * EQ is present on the matching MD.
4875  *
4876  * \retval  0      Success, and only in this case events will be generated
4877  * and logged to EQ (if it exists).
4878  * \retval -EIO    Simulated failure.
4879  * \retval -ENOMEM Memory allocation failure.
4880  * \retval -ENOENT Invalid MD object.
4881  *
4882  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
4883  */
4884 int
4885 LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
4886         struct lnet_process_id target, unsigned int portal,
4887         __u64 match_bits, unsigned int offset,
4888         __u64 hdr_data)
4889 {
4890         struct lnet_msg *msg;
4891         struct lnet_libmd *md;
4892         int cpt;
4893         int rc;
4894         struct lnet_rsp_tracker *rspt = NULL;
4895
4896         LASSERT(the_lnet.ln_refcount > 0);
4897
4898         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4899             fail_peer(target.nid, 1)) {                 /* shall we now? */
4900                 CERROR("Dropping PUT to %s: simulated failure\n",
4901                        libcfs_id2str(target));
4902                 return -EIO;
4903         }
4904
4905         msg = lnet_msg_alloc();
4906         if (msg == NULL) {
4907                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
4908                        libcfs_id2str(target));
4909                 return -ENOMEM;
4910         }
4911         msg->msg_vmflush = !!(current->flags & PF_MEMALLOC);
4912
4913         cpt = lnet_cpt_of_cookie(mdh.cookie);
4914
4915         if (ack == LNET_ACK_REQ) {
4916                 rspt = lnet_rspt_alloc(cpt);
4917                 if (!rspt) {
4918                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
4919                                 libcfs_id2str(target));
4920                         return -ENOMEM;
4921                 }
4922                 INIT_LIST_HEAD(&rspt->rspt_on_list);
4923         }
4924
4925         lnet_res_lock(cpt);
4926
4927         md = lnet_handle2md(&mdh);
4928         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4929                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
4930                        match_bits, portal, libcfs_id2str(target),
4931                        md == NULL ? -1 : md->md_threshold);
4932                 if (md != NULL && md->md_me != NULL)
4933                         CERROR("Source MD also attached to portal %d\n",
4934                                md->md_me->me_portal);
4935                 lnet_res_unlock(cpt);
4936
4937                 if (rspt)
4938                         lnet_rspt_free(rspt, cpt);
4939
4940                 lnet_msg_free(msg);
4941                 return -ENOENT;
4942         }
4943
4944         CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
4945
4946         lnet_msg_attach_md(msg, md, 0, 0);
4947
4948         lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
4949
4950         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
4951         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
4952         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
4953         msg->msg_hdr.msg.put.hdr_data = hdr_data;
4954
4955         /* NB handles only looked up by creator (no flips) */
4956         if (ack == LNET_ACK_REQ) {
4957                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
4958                         the_lnet.ln_interface_cookie;
4959                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
4960                         md->md_lh.lh_cookie;
4961         } else {
4962                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
4963                         LNET_WIRE_HANDLE_COOKIE_NONE;
4964                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
4965                         LNET_WIRE_HANDLE_COOKIE_NONE;
4966         }
4967
4968         lnet_res_unlock(cpt);
4969
4970         lnet_build_msg_event(msg, LNET_EVENT_SEND);
4971
4972         if (rspt && lnet_response_tracking_enabled(LNET_MSG_PUT,
4973                                                    md->md_options))
4974                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
4975         else if (rspt)
4976                 lnet_rspt_free(rspt, cpt);
4977
4978         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
4979                                  CFS_FAIL_ONCE))
4980                 rc = -EIO;
4981         else
4982                 rc = lnet_send(self, msg, LNET_NID_ANY);
4983
4984         if (rc != 0) {
4985                 CNETERR("Error sending PUT to %s: %d\n",
4986                         libcfs_id2str(target), rc);
4987                 msg->msg_no_resend = true;
4988                 lnet_finalize(msg, rc);
4989         }
4990
4991         /* completion will be signalled by an event */
4992         return 0;
4993 }
4994 EXPORT_SYMBOL(LNetPut);
4995
4996 /*
4997  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
4998  * returns a msg for the LND to pass to lnet_finalize() when the sink
4999  * data has been received.
5000  *
5001  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
5002  * lnet_finalize() is called on it, so the LND must call this first
5003  */
5004 struct lnet_msg *
5005 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
5006 {
5007         struct lnet_msg *msg = lnet_msg_alloc();
5008         struct lnet_libmd *getmd = getmsg->msg_md;
5009         struct lnet_process_id peer_id = getmsg->msg_target;
5010         int cpt;
5011
5012         LASSERT(!getmsg->msg_target_is_router);
5013         LASSERT(!getmsg->msg_routing);
5014
5015         if (msg == NULL) {
5016                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
5017                        libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
5018                 goto drop;
5019         }
5020
5021         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
5022         lnet_res_lock(cpt);
5023
5024         LASSERT(getmd->md_refcount > 0);
5025
5026         if (getmd->md_threshold == 0) {
5027                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
5028                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
5029                         getmd);
5030                 lnet_res_unlock(cpt);
5031                 goto drop;
5032         }
5033
5034         LASSERT(getmd->md_offset == 0);
5035
5036         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
5037                libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
5038
5039         /* setup information for lnet_build_msg_event */
5040         msg->msg_initiator = getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
5041         msg->msg_from = peer_id.nid;
5042         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
5043         msg->msg_hdr.src_nid = peer_id.nid;
5044         msg->msg_hdr.payload_length = getmd->md_length;
5045         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
5046
5047         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
5048         lnet_res_unlock(cpt);
5049
5050         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
5051
5052         lnet_net_lock(cpt);
5053         lnet_msg_commit(msg, cpt);
5054         lnet_net_unlock(cpt);
5055
5056         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
5057
5058         return msg;
5059
5060  drop:
5061         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
5062
5063         lnet_net_lock(cpt);
5064         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
5065         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
5066         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
5067                 getmd->md_length;
5068         lnet_net_unlock(cpt);
5069
5070         if (msg != NULL)
5071                 lnet_msg_free(msg);
5072
5073         return NULL;
5074 }
5075 EXPORT_SYMBOL(lnet_create_reply_msg);
5076
5077 void
5078 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
5079                        unsigned int len)
5080 {
5081         /* Set the REPLY length, now the RDMA that elides the REPLY message has
5082          * completed and I know it. */
5083         LASSERT(reply != NULL);
5084         LASSERT(reply->msg_type == LNET_MSG_GET);
5085         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
5086
5087         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
5088          * the end of my buffer, I might as well be dead. */
5089         LASSERT(len <= reply->msg_ev.mlength);
5090
5091         reply->msg_ev.mlength = len;
5092 }
5093 EXPORT_SYMBOL(lnet_set_reply_msg_len);
5094
5095 /**
5096  * Initiate an asynchronous GET operation.
5097  *
5098  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
5099  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
5100  * the target node in the REPLY has been written to local MD.
5101  *
5102  * On the target node, an LNET_EVENT_GET is logged when the GET request
5103  * arrives and is accepted into a MD.
5104  *
5105  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
5106  * \param mdh A handle for the MD that describes the memory into which the
5107  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
5108  *
5109  * \retval  0      Success, and only in this case events will be generated
5110  * and logged to EQ (if it exists) of the MD.
5111  * \retval -EIO    Simulated failure.
5112  * \retval -ENOMEM Memory allocation failure.
5113  * \retval -ENOENT Invalid MD object.
5114  */
5115 int
5116 LNetGet(lnet_nid_t self, struct lnet_handle_md mdh,
5117         struct lnet_process_id target, unsigned int portal,
5118         __u64 match_bits, unsigned int offset, bool recovery)
5119 {
5120         struct lnet_msg *msg;
5121         struct lnet_libmd *md;
5122         struct lnet_rsp_tracker *rspt;
5123         int cpt;
5124         int rc;
5125
5126         LASSERT(the_lnet.ln_refcount > 0);
5127
5128         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5129             fail_peer(target.nid, 1))                   /* shall we now? */
5130         {
5131                 CERROR("Dropping GET to %s: simulated failure\n",
5132                        libcfs_id2str(target));
5133                 return -EIO;
5134         }
5135
5136         msg = lnet_msg_alloc();
5137         if (!msg) {
5138                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
5139                        libcfs_id2str(target));
5140                 return -ENOMEM;
5141         }
5142
5143         cpt = lnet_cpt_of_cookie(mdh.cookie);
5144
5145         rspt = lnet_rspt_alloc(cpt);
5146         if (!rspt) {
5147                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
5148                        libcfs_id2str(target));
5149                 return -ENOMEM;
5150         }
5151         INIT_LIST_HEAD(&rspt->rspt_on_list);
5152
5153         msg->msg_recovery = recovery;
5154
5155         lnet_res_lock(cpt);
5156
5157         md = lnet_handle2md(&mdh);
5158         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5159                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
5160                        match_bits, portal, libcfs_id2str(target),
5161                        md == NULL ? -1 : md->md_threshold);
5162                 if (md != NULL && md->md_me != NULL)
5163                         CERROR("REPLY MD also attached to portal %d\n",
5164                                md->md_me->me_portal);
5165
5166                 lnet_res_unlock(cpt);
5167
5168                 lnet_msg_free(msg);
5169                 lnet_rspt_free(rspt, cpt);
5170                 return -ENOENT;
5171         }
5172
5173         CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
5174
5175         lnet_msg_attach_md(msg, md, 0, 0);
5176
5177         lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
5178
5179         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5180         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5181         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5182         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5183
5184         /* NB handles only looked up by creator (no flips) */
5185         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5186                 the_lnet.ln_interface_cookie;
5187         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5188                 md->md_lh.lh_cookie;
5189
5190         lnet_res_unlock(cpt);
5191
5192         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5193
5194         if (lnet_response_tracking_enabled(LNET_MSG_GET, md->md_options))
5195                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5196         else
5197                 lnet_rspt_free(rspt, cpt);
5198
5199         rc = lnet_send(self, msg, LNET_NID_ANY);
5200         if (rc < 0) {
5201                 CNETERR("Error sending GET to %s: %d\n",
5202                         libcfs_id2str(target), rc);
5203                 msg->msg_no_resend = true;
5204                 lnet_finalize(msg, rc);
5205         }
5206
5207         /* completion will be signalled by an event */
5208         return 0;
5209 }
5210 EXPORT_SYMBOL(LNetGet);
5211
5212 /**
5213  * Calculate distance to node at \a dstnid.
5214  *
5215  * \param dstnid Target NID.
5216  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5217  * is saved here.
5218  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5219  * here.
5220  *
5221  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5222  * local_nid_dist_zero is set, which is the default.
5223  * \retval positives Distance to target NID, i.e. number of hops plus one.
5224  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5225  */
5226 int
5227 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
5228 {
5229         struct list_head *e;
5230         struct lnet_ni *ni = NULL;
5231         struct lnet_remotenet *rnet;
5232         __u32 dstnet = LNET_NIDNET(dstnid);
5233         int hops;
5234         int cpt;
5235         __u32 order = 2;
5236         struct list_head *rn_list;
5237
5238         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5239          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5240          * keep order 0 free for 0@lo and order 1 free for a local NID
5241          * match */
5242
5243         LASSERT(the_lnet.ln_refcount > 0);
5244
5245         cpt = lnet_net_lock_current();
5246
5247         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5248                 if (ni->ni_nid == dstnid) {
5249                         if (srcnidp != NULL)
5250                                 *srcnidp = dstnid;
5251                         if (orderp != NULL) {
5252                                 if (dstnid == LNET_NID_LO_0)
5253                                         *orderp = 0;
5254                                 else
5255                                         *orderp = 1;
5256                         }
5257                         lnet_net_unlock(cpt);
5258
5259                         return local_nid_dist_zero ? 0 : 1;
5260                 }
5261
5262                 if (LNET_NIDNET(ni->ni_nid) == dstnet) {
5263                         /* Check if ni was originally created in
5264                          * current net namespace.
5265                          * If not, assign order above 0xffff0000,
5266                          * to make this ni not a priority. */
5267                         if (current->nsproxy &&
5268                             !net_eq(ni->ni_net_ns, current->nsproxy->net_ns))
5269                                         order += 0xffff0000;
5270                         if (srcnidp != NULL)
5271                                 *srcnidp = ni->ni_nid;
5272                         if (orderp != NULL)
5273                                 *orderp = order;
5274                         lnet_net_unlock(cpt);
5275                         return 1;
5276                 }
5277
5278                 order++;
5279         }
5280
5281         rn_list = lnet_net2rnethash(dstnet);
5282         list_for_each(e, rn_list) {
5283                 rnet = list_entry(e, struct lnet_remotenet, lrn_list);
5284
5285                 if (rnet->lrn_net == dstnet) {
5286                         struct lnet_route *route;
5287                         struct lnet_route *shortest = NULL;
5288                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5289                         __u32 route_hops;
5290
5291                         LASSERT(!list_empty(&rnet->lrn_routes));
5292
5293                         list_for_each_entry(route, &rnet->lrn_routes,
5294                                             lr_list) {
5295                                 route_hops = route->lr_hops;
5296                                 if (route_hops == LNET_UNDEFINED_HOPS)
5297                                         route_hops = 1;
5298                                 if (shortest == NULL ||
5299                                     route_hops < shortest_hops) {
5300                                         shortest = route;
5301                                         shortest_hops = route_hops;
5302                                 }
5303                         }
5304
5305                         LASSERT(shortest != NULL);
5306                         hops = shortest_hops;
5307                         if (srcnidp != NULL) {
5308                                 struct lnet_net *net;
5309                                 net = lnet_get_net_locked(shortest->lr_lnet);
5310                                 LASSERT(net);
5311                                 ni = lnet_get_next_ni_locked(net, NULL);
5312                                 *srcnidp = ni->ni_nid;
5313                         }
5314                         if (orderp != NULL)
5315                                 *orderp = order;
5316                         lnet_net_unlock(cpt);
5317                         return hops + 1;
5318                 }
5319                 order++;
5320         }
5321
5322         lnet_net_unlock(cpt);
5323         return -EHOSTUNREACH;
5324 }
5325 EXPORT_SYMBOL(LNetDist);