Whamcloud - gitweb
LU-10391 lnet: introduce struct lnet_nid
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/lib-move.c
32  *
33  * Data movement routines
34  */
35
36 #define DEBUG_SUBSYSTEM S_LNET
37
38 #include <linux/pagemap.h>
39
40 #include <lnet/lib-lnet.h>
41 #include <linux/nsproxy.h>
42 #include <lnet/lnet_rdma.h>
43 #include <net/net_namespace.h>
44
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48
49 struct lnet_send_data {
50         struct lnet_ni *sd_best_ni;
51         struct lnet_peer_ni *sd_best_lpni;
52         struct lnet_peer_ni *sd_final_dst_lpni;
53         struct lnet_peer *sd_peer;
54         struct lnet_peer *sd_gw_peer;
55         struct lnet_peer_ni *sd_gw_lpni;
56         struct lnet_peer_net *sd_peer_net;
57         struct lnet_msg *sd_msg;
58         lnet_nid_t sd_dst_nid;
59         lnet_nid_t sd_src_nid;
60         lnet_nid_t sd_rtr_nid;
61         int sd_cpt;
62         int sd_md_cpt;
63         __u32 sd_send_case;
64 };
65
66 static inline bool
67 lnet_msg_is_response(struct lnet_msg *msg)
68 {
69         return msg->msg_type == LNET_MSG_ACK || msg->msg_type == LNET_MSG_REPLY;
70 }
71
72 static inline bool
73 lnet_response_tracking_enabled(__u32 msg_type, unsigned int md_options)
74 {
75         if (md_options & LNET_MD_NO_TRACK_RESPONSE)
76                 /* Explicitly disabled in MD options */
77                 return false;
78
79         if (md_options & LNET_MD_TRACK_RESPONSE)
80                 /* Explicity enabled in MD options */
81                 return true;
82
83         if (lnet_response_tracking == 3)
84                 /* Enabled for all message types */
85                 return true;
86
87         if (msg_type == LNET_MSG_PUT)
88                 return lnet_response_tracking == 2;
89
90         if (msg_type == LNET_MSG_GET)
91                 return lnet_response_tracking == 1;
92
93         return false;
94 }
95
96 static inline struct lnet_comm_count *
97 get_stats_counts(struct lnet_element_stats *stats,
98                  enum lnet_stats_type stats_type)
99 {
100         switch (stats_type) {
101         case LNET_STATS_TYPE_SEND:
102                 return &stats->el_send_stats;
103         case LNET_STATS_TYPE_RECV:
104                 return &stats->el_recv_stats;
105         case LNET_STATS_TYPE_DROP:
106                 return &stats->el_drop_stats;
107         default:
108                 CERROR("Unknown stats type\n");
109         }
110
111         return NULL;
112 }
113
114 void lnet_incr_stats(struct lnet_element_stats *stats,
115                      enum lnet_msg_type msg_type,
116                      enum lnet_stats_type stats_type)
117 {
118         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
119         if (!counts)
120                 return;
121
122         switch (msg_type) {
123         case LNET_MSG_ACK:
124                 atomic_inc(&counts->co_ack_count);
125                 break;
126         case LNET_MSG_PUT:
127                 atomic_inc(&counts->co_put_count);
128                 break;
129         case LNET_MSG_GET:
130                 atomic_inc(&counts->co_get_count);
131                 break;
132         case LNET_MSG_REPLY:
133                 atomic_inc(&counts->co_reply_count);
134                 break;
135         case LNET_MSG_HELLO:
136                 atomic_inc(&counts->co_hello_count);
137                 break;
138         default:
139                 CERROR("There is a BUG in the code. Unknown message type\n");
140                 break;
141         }
142 }
143
144 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
145                      enum lnet_stats_type stats_type)
146 {
147         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
148         if (!counts)
149                 return 0;
150
151         return (atomic_read(&counts->co_ack_count) +
152                 atomic_read(&counts->co_put_count) +
153                 atomic_read(&counts->co_get_count) +
154                 atomic_read(&counts->co_reply_count) +
155                 atomic_read(&counts->co_hello_count));
156 }
157
158 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
159                                 struct lnet_comm_count *counts)
160 {
161         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
162         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
163         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
164         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
165         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
166 }
167
168 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
169                               struct lnet_element_stats *stats)
170 {
171         struct lnet_comm_count *counts;
172
173         LASSERT(msg_stats);
174         LASSERT(stats);
175
176         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
177         if (!counts)
178                 return;
179         assign_stats(&msg_stats->im_send_stats, counts);
180
181         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
182         if (!counts)
183                 return;
184         assign_stats(&msg_stats->im_recv_stats, counts);
185
186         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
187         if (!counts)
188                 return;
189         assign_stats(&msg_stats->im_drop_stats, counts);
190 }
191
192 int
193 lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
194 {
195         struct lnet_test_peer *tp;
196         struct list_head *el;
197         struct list_head *next;
198         LIST_HEAD(cull);
199
200         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
201         if (threshold != 0) {
202                 /* Adding a new entry */
203                 LIBCFS_ALLOC(tp, sizeof(*tp));
204                 if (tp == NULL)
205                         return -ENOMEM;
206
207                 tp->tp_nid = nid;
208                 tp->tp_threshold = threshold;
209
210                 lnet_net_lock(0);
211                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
212                 lnet_net_unlock(0);
213                 return 0;
214         }
215
216         lnet_net_lock(0);
217
218         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
219                 tp = list_entry(el, struct lnet_test_peer, tp_list);
220
221                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
222                     nid == LNET_NID_ANY ||      /* removing all entries */
223                     tp->tp_nid == nid) {        /* matched this one */
224                         list_move(&tp->tp_list, &cull);
225                 }
226         }
227
228         lnet_net_unlock(0);
229
230         while (!list_empty(&cull)) {
231                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
232
233                 list_del(&tp->tp_list);
234                 LIBCFS_FREE(tp, sizeof(*tp));
235         }
236         return 0;
237 }
238
239 static int
240 fail_peer (lnet_nid_t nid, int outgoing)
241 {
242         struct lnet_test_peer *tp;
243         struct list_head *el;
244         struct list_head *next;
245         LIST_HEAD(cull);
246         int fail = 0;
247
248         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
249         lnet_net_lock(0);
250
251         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
252                 tp = list_entry(el, struct lnet_test_peer, tp_list);
253
254                 if (tp->tp_threshold == 0) {
255                         /* zombie entry */
256                         if (outgoing) {
257                                 /* only cull zombies on outgoing tests,
258                                  * since we may be at interrupt priority on
259                                  * incoming messages. */
260                                 list_move(&tp->tp_list, &cull);
261                         }
262                         continue;
263                 }
264
265                 if (tp->tp_nid == LNET_NID_ANY ||       /* fail every peer */
266                     nid == tp->tp_nid) {                /* fail this peer */
267                         fail = 1;
268
269                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
270                                 tp->tp_threshold--;
271                                 if (outgoing &&
272                                     tp->tp_threshold == 0) {
273                                         /* see above */
274                                         list_move(&tp->tp_list, &cull);
275                                 }
276                         }
277                         break;
278                 }
279         }
280
281         lnet_net_unlock(0);
282
283         while (!list_empty(&cull)) {
284                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
285                 list_del(&tp->tp_list);
286
287                 LIBCFS_FREE(tp, sizeof(*tp));
288         }
289
290         return fail;
291 }
292
293 unsigned int
294 lnet_iov_nob(unsigned int niov, struct kvec *iov)
295 {
296         unsigned int nob = 0;
297
298         LASSERT(niov == 0 || iov != NULL);
299         while (niov-- > 0)
300                 nob += (iov++)->iov_len;
301
302         return (nob);
303 }
304 EXPORT_SYMBOL(lnet_iov_nob);
305
306 void
307 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
308                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
309                   unsigned int nob)
310 {
311         /* NB diov, siov are READ-ONLY */
312         unsigned int this_nob;
313
314         if (nob == 0)
315                 return;
316
317         /* skip complete frags before 'doffset' */
318         LASSERT(ndiov > 0);
319         while (doffset >= diov->iov_len) {
320                 doffset -= diov->iov_len;
321                 diov++;
322                 ndiov--;
323                 LASSERT(ndiov > 0);
324         }
325
326         /* skip complete frags before 'soffset' */
327         LASSERT(nsiov > 0);
328         while (soffset >= siov->iov_len) {
329                 soffset -= siov->iov_len;
330                 siov++;
331                 nsiov--;
332                 LASSERT(nsiov > 0);
333         }
334
335         do {
336                 LASSERT(ndiov > 0);
337                 LASSERT(nsiov > 0);
338                 this_nob = min3((unsigned int)diov->iov_len - doffset,
339                                 (unsigned int)siov->iov_len - soffset,
340                                 nob);
341
342                 memcpy((char *)diov->iov_base + doffset,
343                        (char *)siov->iov_base + soffset, this_nob);
344                 nob -= this_nob;
345
346                 if (diov->iov_len > doffset + this_nob) {
347                         doffset += this_nob;
348                 } else {
349                         diov++;
350                         ndiov--;
351                         doffset = 0;
352                 }
353
354                 if (siov->iov_len > soffset + this_nob) {
355                         soffset += this_nob;
356                 } else {
357                         siov++;
358                         nsiov--;
359                         soffset = 0;
360                 }
361         } while (nob > 0);
362 }
363 EXPORT_SYMBOL(lnet_copy_iov2iov);
364
365 unsigned int
366 lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
367 {
368         unsigned int  nob = 0;
369
370         LASSERT(niov == 0 || kiov != NULL);
371         while (niov-- > 0)
372                 nob += (kiov++)->bv_len;
373
374         return (nob);
375 }
376 EXPORT_SYMBOL(lnet_kiov_nob);
377
378 void
379 lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov,
380                     unsigned int doffset,
381                     unsigned int nsiov, struct bio_vec *siov,
382                     unsigned int soffset,
383                     unsigned int nob)
384 {
385         /* NB diov, siov are READ-ONLY */
386         unsigned int    this_nob;
387         char           *daddr = NULL;
388         char           *saddr = NULL;
389
390         if (nob == 0)
391                 return;
392
393         LASSERT (!in_interrupt ());
394
395         LASSERT (ndiov > 0);
396         while (doffset >= diov->bv_len) {
397                 doffset -= diov->bv_len;
398                 diov++;
399                 ndiov--;
400                 LASSERT(ndiov > 0);
401         }
402
403         LASSERT(nsiov > 0);
404         while (soffset >= siov->bv_len) {
405                 soffset -= siov->bv_len;
406                 siov++;
407                 nsiov--;
408                 LASSERT(nsiov > 0);
409         }
410
411         do {
412                 LASSERT(ndiov > 0);
413                 LASSERT(nsiov > 0);
414                 this_nob = min3(diov->bv_len - doffset,
415                                 siov->bv_len - soffset,
416                                 nob);
417
418                 if (daddr == NULL)
419                         daddr = ((char *)kmap(diov->bv_page)) +
420                                 diov->bv_offset + doffset;
421                 if (saddr == NULL)
422                         saddr = ((char *)kmap(siov->bv_page)) +
423                                 siov->bv_offset + soffset;
424
425                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
426                  * However in practice at least one of the kiovs will be mapped
427                  * kernel pages and the map/unmap will be NOOPs */
428
429                 memcpy (daddr, saddr, this_nob);
430                 nob -= this_nob;
431
432                 if (diov->bv_len > doffset + this_nob) {
433                         daddr += this_nob;
434                         doffset += this_nob;
435                 } else {
436                         kunmap(diov->bv_page);
437                         daddr = NULL;
438                         diov++;
439                         ndiov--;
440                         doffset = 0;
441                 }
442
443                 if (siov->bv_len > soffset + this_nob) {
444                         saddr += this_nob;
445                         soffset += this_nob;
446                 } else {
447                         kunmap(siov->bv_page);
448                         saddr = NULL;
449                         siov++;
450                         nsiov--;
451                         soffset = 0;
452                 }
453         } while (nob > 0);
454
455         if (daddr != NULL)
456                 kunmap(diov->bv_page);
457         if (saddr != NULL)
458                 kunmap(siov->bv_page);
459 }
460 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
461
462 void
463 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
464                     unsigned int nkiov, struct bio_vec *kiov,
465                     unsigned int kiovoffset,
466                     unsigned int nob)
467 {
468         /* NB iov, kiov are READ-ONLY */
469         unsigned int    this_nob;
470         char           *addr = NULL;
471
472         if (nob == 0)
473                 return;
474
475         LASSERT (!in_interrupt ());
476
477         LASSERT (niov > 0);
478         while (iovoffset >= iov->iov_len) {
479                 iovoffset -= iov->iov_len;
480                 iov++;
481                 niov--;
482                 LASSERT(niov > 0);
483         }
484
485         LASSERT(nkiov > 0);
486         while (kiovoffset >= kiov->bv_len) {
487                 kiovoffset -= kiov->bv_len;
488                 kiov++;
489                 nkiov--;
490                 LASSERT(nkiov > 0);
491         }
492
493         do {
494                 LASSERT(niov > 0);
495                 LASSERT(nkiov > 0);
496                 this_nob = min3((unsigned int)iov->iov_len - iovoffset,
497                                 (unsigned int)kiov->bv_len - kiovoffset,
498                                 nob);
499
500                 if (addr == NULL)
501                         addr = ((char *)kmap(kiov->bv_page)) +
502                                 kiov->bv_offset + kiovoffset;
503
504                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
505                 nob -= this_nob;
506
507                 if (iov->iov_len > iovoffset + this_nob) {
508                         iovoffset += this_nob;
509                 } else {
510                         iov++;
511                         niov--;
512                         iovoffset = 0;
513                 }
514
515                 if (kiov->bv_len > kiovoffset + this_nob) {
516                         addr += this_nob;
517                         kiovoffset += this_nob;
518                 } else {
519                         kunmap(kiov->bv_page);
520                         addr = NULL;
521                         kiov++;
522                         nkiov--;
523                         kiovoffset = 0;
524                 }
525
526         } while (nob > 0);
527
528         if (addr != NULL)
529                 kunmap(kiov->bv_page);
530 }
531 EXPORT_SYMBOL(lnet_copy_kiov2iov);
532
533 void
534 lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
535                    unsigned int kiovoffset,
536                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
537                    unsigned int nob)
538 {
539         /* NB kiov, iov are READ-ONLY */
540         unsigned int    this_nob;
541         char           *addr = NULL;
542
543         if (nob == 0)
544                 return;
545
546         LASSERT (!in_interrupt ());
547
548         LASSERT (nkiov > 0);
549         while (kiovoffset >= kiov->bv_len) {
550                 kiovoffset -= kiov->bv_len;
551                 kiov++;
552                 nkiov--;
553                 LASSERT(nkiov > 0);
554         }
555
556         LASSERT(niov > 0);
557         while (iovoffset >= iov->iov_len) {
558                 iovoffset -= iov->iov_len;
559                 iov++;
560                 niov--;
561                 LASSERT(niov > 0);
562         }
563
564         do {
565                 LASSERT(nkiov > 0);
566                 LASSERT(niov > 0);
567                 this_nob = min3((unsigned int)kiov->bv_len - kiovoffset,
568                                 (unsigned int)iov->iov_len - iovoffset,
569                                 nob);
570
571                 if (addr == NULL)
572                         addr = ((char *)kmap(kiov->bv_page)) +
573                                 kiov->bv_offset + kiovoffset;
574
575                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
576                 nob -= this_nob;
577
578                 if (kiov->bv_len > kiovoffset + this_nob) {
579                         addr += this_nob;
580                         kiovoffset += this_nob;
581                 } else {
582                         kunmap(kiov->bv_page);
583                         addr = NULL;
584                         kiov++;
585                         nkiov--;
586                         kiovoffset = 0;
587                 }
588
589                 if (iov->iov_len > iovoffset + this_nob) {
590                         iovoffset += this_nob;
591                 } else {
592                         iov++;
593                         niov--;
594                         iovoffset = 0;
595                 }
596         } while (nob > 0);
597
598         if (addr != NULL)
599                 kunmap(kiov->bv_page);
600 }
601 EXPORT_SYMBOL(lnet_copy_iov2kiov);
602
603 int
604 lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
605                   int src_niov, struct bio_vec *src,
606                   unsigned int offset, unsigned int len)
607 {
608         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
609          * for exactly 'len' bytes, and return the number of entries.
610          * NB not destructive to 'src' */
611         unsigned int    frag_len;
612         unsigned int    niov;
613
614         if (len == 0)                           /* no data => */
615                 return (0);                     /* no frags */
616
617         LASSERT(src_niov > 0);
618         while (offset >= src->bv_len) {      /* skip initial frags */
619                 offset -= src->bv_len;
620                 src_niov--;
621                 src++;
622                 LASSERT(src_niov > 0);
623         }
624
625         niov = 1;
626         for (;;) {
627                 LASSERT(src_niov > 0);
628                 LASSERT((int)niov <= dst_niov);
629
630                 frag_len = src->bv_len - offset;
631                 dst->bv_page = src->bv_page;
632                 dst->bv_offset = src->bv_offset + offset;
633
634                 if (len <= frag_len) {
635                         dst->bv_len = len;
636                         LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
637                         return niov;
638                 }
639
640                 dst->bv_len = frag_len;
641                 LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
642
643                 len -= frag_len;
644                 dst++;
645                 src++;
646                 niov++;
647                 src_niov--;
648                 offset = 0;
649         }
650 }
651 EXPORT_SYMBOL(lnet_extract_kiov);
652
653 void
654 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
655              int delayed, unsigned int offset, unsigned int mlen,
656              unsigned int rlen)
657 {
658         unsigned int niov = 0;
659         struct kvec *iov = NULL;
660         struct bio_vec  *kiov = NULL;
661         int rc;
662
663         LASSERT (!in_interrupt ());
664         LASSERT (mlen == 0 || msg != NULL);
665
666         if (msg != NULL) {
667                 LASSERT(msg->msg_receiving);
668                 LASSERT(!msg->msg_sending);
669                 LASSERT(rlen == msg->msg_len);
670                 LASSERT(mlen <= msg->msg_len);
671                 LASSERT(msg->msg_offset == offset);
672                 LASSERT(msg->msg_wanted == mlen);
673
674                 msg->msg_receiving = 0;
675
676                 if (mlen != 0) {
677                         niov = msg->msg_niov;
678                         kiov = msg->msg_kiov;
679
680                         LASSERT (niov > 0);
681                         LASSERT ((iov == NULL) != (kiov == NULL));
682                 }
683         }
684
685         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
686                                              niov, kiov, offset, mlen,
687                                              rlen);
688         if (rc < 0)
689                 lnet_finalize(msg, rc);
690 }
691
692 static void
693 lnet_setpayloadbuffer(struct lnet_msg *msg)
694 {
695         struct lnet_libmd *md = msg->msg_md;
696
697         LASSERT(msg->msg_len > 0);
698         LASSERT(!msg->msg_routing);
699         LASSERT(md != NULL);
700         LASSERT(msg->msg_niov == 0);
701         LASSERT(msg->msg_kiov == NULL);
702
703         msg->msg_niov = md->md_niov;
704         msg->msg_kiov = md->md_kiov;
705 }
706
707 void
708 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target,
709                unsigned int offset, unsigned int len)
710 {
711         msg->msg_type = type;
712         msg->msg_target = target;
713         msg->msg_len = len;
714         msg->msg_offset = offset;
715
716         if (len != 0)
717                 lnet_setpayloadbuffer(msg);
718
719         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
720         msg->msg_hdr.type           = cpu_to_le32(type);
721         /* dest_nid will be overwritten by lnet_select_pathway() */
722         msg->msg_hdr.dest_nid       = cpu_to_le64(target.nid);
723         msg->msg_hdr.dest_pid       = cpu_to_le32(target.pid);
724         /* src_nid will be set later */
725         msg->msg_hdr.src_pid        = cpu_to_le32(the_lnet.ln_pid);
726         msg->msg_hdr.payload_length = cpu_to_le32(len);
727 }
728
729 void
730 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
731 {
732         void *priv = msg->msg_private;
733         int rc;
734
735         LASSERT(!in_interrupt());
736         LASSERT(nid_is_lo0(&ni->ni_nid) ||
737                 (msg->msg_txcredit && msg->msg_peertxcredit));
738
739         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
740         if (rc < 0) {
741                 msg->msg_no_resend = true;
742                 lnet_finalize(msg, rc);
743         }
744 }
745
746 static int
747 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
748 {
749         int     rc;
750
751         LASSERT(!msg->msg_sending);
752         LASSERT(msg->msg_receiving);
753         LASSERT(!msg->msg_rx_ready_delay);
754         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
755
756         msg->msg_rx_ready_delay = 1;
757         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
758                                                   &msg->msg_private);
759         if (rc != 0) {
760                 CERROR("recv from %s / send to %s aborted: "
761                        "eager_recv failed %d\n",
762                        libcfs_nid2str(msg->msg_rxpeer->lpni_nid),
763                        libcfs_id2str(msg->msg_target), rc);
764                 LASSERT(rc < 0); /* required by my callers */
765         }
766
767         return rc;
768 }
769
770 static bool
771 lnet_is_peer_deadline_passed(struct lnet_peer_ni *lpni, time64_t now)
772 {
773         time64_t deadline;
774
775         deadline = lpni->lpni_last_alive +
776                    lpni->lpni_net->net_tunables.lct_peer_timeout;
777
778         /*
779          * assume peer_ni is alive as long as we're within the configured
780          * peer timeout
781          */
782         if (deadline > now)
783                 return false;
784
785         return true;
786 }
787
788 /* NB: returns 1 when alive, 0 when dead, negative when error;
789  *     may drop the lnet_net_lock */
790 static int
791 lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
792                        struct lnet_msg *msg)
793 {
794         time64_t now = ktime_get_seconds();
795
796         if (!lnet_peer_aliveness_enabled(lpni))
797                 return -ENODEV;
798
799         /*
800          * If we're resending a message, let's attempt to send it even if
801          * the peer is down to fulfill our resend quota on the message
802          */
803         if (msg->msg_retry_count > 0)
804                 return 1;
805
806         /* try and send recovery messages irregardless */
807         if (msg->msg_recovery)
808                 return 1;
809
810         /* always send any responses */
811         if (lnet_msg_is_response(msg))
812                 return 1;
813
814         if (!lnet_is_peer_deadline_passed(lpni, now))
815                 return true;
816
817         return lnet_is_peer_ni_alive(lpni);
818 }
819
820 /**
821  * \param msg The message to be sent.
822  * \param do_send True if lnet_ni_send() should be called in this function.
823  *        lnet_send() is going to lnet_net_unlock immediately after this, so
824  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
825  *
826  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
827  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
828  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
829  * \retval -ECANCELED If the MD of the message has been unlinked.
830  */
831 static int
832 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
833 {
834         struct lnet_peer_ni     *lp = msg->msg_txpeer;
835         struct lnet_ni          *ni = msg->msg_txni;
836         int                     cpt = msg->msg_tx_cpt;
837         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
838
839         /* non-lnet_send() callers have checked before */
840         LASSERT(!do_send || msg->msg_tx_delayed);
841         LASSERT(!msg->msg_receiving);
842         LASSERT(msg->msg_tx_committed);
843
844         /* can't get here if we're sending to the loopback interface */
845         if (the_lnet.ln_loni)
846                 LASSERT(lp->lpni_nid !=
847                         lnet_nid_to_nid4(&the_lnet.ln_loni->ni_nid));
848
849         /* NB 'lp' is always the next hop */
850         if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
851             lnet_peer_alive_locked(ni, lp, msg) == 0) {
852                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
853                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
854                         msg->msg_len;
855                 lnet_net_unlock(cpt);
856                 if (msg->msg_txpeer)
857                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
858                                         msg->msg_type,
859                                         LNET_STATS_TYPE_DROP);
860                 if (msg->msg_txni)
861                         lnet_incr_stats(&msg->msg_txni->ni_stats,
862                                         msg->msg_type,
863                                         LNET_STATS_TYPE_DROP);
864
865                 CNETERR("Dropping message for %s: peer not alive\n",
866                         libcfs_id2str(msg->msg_target));
867                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
868                 if (do_send)
869                         lnet_finalize(msg, -EHOSTUNREACH);
870
871                 lnet_net_lock(cpt);
872                 return -EHOSTUNREACH;
873         }
874
875         if (msg->msg_md != NULL &&
876             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
877                 lnet_net_unlock(cpt);
878
879                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
880                         "called on the MD/ME.\n",
881                         libcfs_id2str(msg->msg_target));
882                 if (do_send) {
883                         msg->msg_no_resend = true;
884                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
885                                msg, libcfs_id2str(msg->msg_target));
886                         lnet_finalize(msg, -ECANCELED);
887                 }
888
889                 lnet_net_lock(cpt);
890                 return -ECANCELED;
891         }
892
893         if (!msg->msg_peertxcredit) {
894                 spin_lock(&lp->lpni_lock);
895                 LASSERT((lp->lpni_txcredits < 0) ==
896                         !list_empty(&lp->lpni_txq));
897
898                 msg->msg_peertxcredit = 1;
899                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr);
900                 lp->lpni_txcredits--;
901
902                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
903                         lp->lpni_mintxcredits = lp->lpni_txcredits;
904
905                 if (lp->lpni_txcredits < 0) {
906                         msg->msg_tx_delayed = 1;
907                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
908                         spin_unlock(&lp->lpni_lock);
909                         return LNET_CREDIT_WAIT;
910                 }
911                 spin_unlock(&lp->lpni_lock);
912         }
913
914         if (!msg->msg_txcredit) {
915                 LASSERT((tq->tq_credits < 0) ==
916                         !list_empty(&tq->tq_delayed));
917
918                 msg->msg_txcredit = 1;
919                 tq->tq_credits--;
920                 atomic_dec(&ni->ni_tx_credits);
921
922                 if (tq->tq_credits < tq->tq_credits_min)
923                         tq->tq_credits_min = tq->tq_credits;
924
925                 if (tq->tq_credits < 0) {
926                         msg->msg_tx_delayed = 1;
927                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
928                         return LNET_CREDIT_WAIT;
929                 }
930         }
931
932         if (unlikely(!list_empty(&the_lnet.ln_delay_rules)) &&
933             lnet_delay_rule_match_locked(&msg->msg_hdr, msg)) {
934                 msg->msg_tx_delayed = 1;
935                 return LNET_CREDIT_WAIT;
936         }
937
938         /* unset the tx_delay flag as we're going to send it now */
939         msg->msg_tx_delayed = 0;
940
941         if (do_send) {
942                 lnet_net_unlock(cpt);
943                 lnet_ni_send(ni, msg);
944                 lnet_net_lock(cpt);
945         }
946         return LNET_CREDIT_OK;
947 }
948
949
950 static struct lnet_rtrbufpool *
951 lnet_msg2bufpool(struct lnet_msg *msg)
952 {
953         struct lnet_rtrbufpool  *rbp;
954         int                     cpt;
955
956         LASSERT(msg->msg_rx_committed);
957
958         cpt = msg->msg_rx_cpt;
959         rbp = &the_lnet.ln_rtrpools[cpt][0];
960
961         LASSERT(msg->msg_len <= LNET_MTU);
962         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
963                 rbp++;
964                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
965         }
966
967         return rbp;
968 }
969
970 static int
971 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
972 {
973         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
974          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
975          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
976          * received or OK to receive */
977         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
978         struct lnet_peer *lp;
979         struct lnet_rtrbufpool *rbp;
980         struct lnet_rtrbuf *rb;
981
982         LASSERT(msg->msg_kiov == NULL);
983         LASSERT(msg->msg_niov == 0);
984         LASSERT(msg->msg_routing);
985         LASSERT(msg->msg_receiving);
986         LASSERT(!msg->msg_sending);
987         LASSERT(lpni->lpni_peer_net);
988         LASSERT(lpni->lpni_peer_net->lpn_peer);
989
990         lp = lpni->lpni_peer_net->lpn_peer;
991
992         /* non-lnet_parse callers only receive delayed messages */
993         LASSERT(!do_recv || msg->msg_rx_delayed);
994
995         if (!msg->msg_peerrtrcredit) {
996                 /* lpni_lock protects the credit manipulation */
997                 spin_lock(&lpni->lpni_lock);
998
999                 msg->msg_peerrtrcredit = 1;
1000                 lpni->lpni_rtrcredits--;
1001                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
1002                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
1003
1004                 if (lpni->lpni_rtrcredits < 0) {
1005                         spin_unlock(&lpni->lpni_lock);
1006                         /* must have checked eager_recv before here */
1007                         LASSERT(msg->msg_rx_ready_delay);
1008                         msg->msg_rx_delayed = 1;
1009                         /* lp_lock protects the lp_rtrq */
1010                         spin_lock(&lp->lp_lock);
1011                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1012                         spin_unlock(&lp->lp_lock);
1013                         return LNET_CREDIT_WAIT;
1014                 }
1015                 spin_unlock(&lpni->lpni_lock);
1016         }
1017
1018         rbp = lnet_msg2bufpool(msg);
1019
1020         if (!msg->msg_rtrcredit) {
1021                 msg->msg_rtrcredit = 1;
1022                 rbp->rbp_credits--;
1023                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1024                         rbp->rbp_mincredits = rbp->rbp_credits;
1025
1026                 if (rbp->rbp_credits < 0) {
1027                         /* must have checked eager_recv before here */
1028                         LASSERT(msg->msg_rx_ready_delay);
1029                         msg->msg_rx_delayed = 1;
1030                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1031                         return LNET_CREDIT_WAIT;
1032                 }
1033         }
1034
1035         LASSERT(!list_empty(&rbp->rbp_bufs));
1036         rb = list_entry(rbp->rbp_bufs.next, struct lnet_rtrbuf, rb_list);
1037         list_del(&rb->rb_list);
1038
1039         msg->msg_niov = rbp->rbp_npages;
1040         msg->msg_kiov = &rb->rb_kiov[0];
1041
1042         /* unset the msg-rx_delayed flag since we're receiving the message */
1043         msg->msg_rx_delayed = 0;
1044
1045         if (do_recv) {
1046                 int cpt = msg->msg_rx_cpt;
1047
1048                 lnet_net_unlock(cpt);
1049                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1050                              0, msg->msg_len, msg->msg_len);
1051                 lnet_net_lock(cpt);
1052         }
1053         return LNET_CREDIT_OK;
1054 }
1055
1056 void
1057 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1058 {
1059         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1060         struct lnet_ni          *txni = msg->msg_txni;
1061         struct lnet_msg         *msg2;
1062
1063         if (msg->msg_txcredit) {
1064                 struct lnet_ni       *ni = msg->msg_txni;
1065                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1066
1067                 /* give back NI txcredits */
1068                 msg->msg_txcredit = 0;
1069
1070                 LASSERT((tq->tq_credits < 0) ==
1071                         !list_empty(&tq->tq_delayed));
1072
1073                 tq->tq_credits++;
1074                 atomic_inc(&ni->ni_tx_credits);
1075                 if (tq->tq_credits <= 0) {
1076                         msg2 = list_entry(tq->tq_delayed.next,
1077                                           struct lnet_msg, msg_list);
1078                         list_del(&msg2->msg_list);
1079
1080                         LASSERT(msg2->msg_txni == ni);
1081                         LASSERT(msg2->msg_tx_delayed);
1082                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1083
1084                         (void) lnet_post_send_locked(msg2, 1);
1085                 }
1086         }
1087
1088         if (msg->msg_peertxcredit) {
1089                 /* give back peer txcredits */
1090                 msg->msg_peertxcredit = 0;
1091
1092                 spin_lock(&txpeer->lpni_lock);
1093                 LASSERT((txpeer->lpni_txcredits < 0) ==
1094                         !list_empty(&txpeer->lpni_txq));
1095
1096                 txpeer->lpni_txqnob -= msg->msg_len + sizeof(struct lnet_hdr);
1097                 LASSERT(txpeer->lpni_txqnob >= 0);
1098
1099                 txpeer->lpni_txcredits++;
1100                 if (txpeer->lpni_txcredits <= 0) {
1101                         int msg2_cpt;
1102
1103                         msg2 = list_entry(txpeer->lpni_txq.next,
1104                                               struct lnet_msg, msg_list);
1105                         list_del(&msg2->msg_list);
1106                         spin_unlock(&txpeer->lpni_lock);
1107
1108                         LASSERT(msg2->msg_txpeer == txpeer);
1109                         LASSERT(msg2->msg_tx_delayed);
1110
1111                         msg2_cpt = msg2->msg_tx_cpt;
1112
1113                         /*
1114                          * The msg_cpt can be different from the msg2_cpt
1115                          * so we need to make sure we lock the correct cpt
1116                          * for msg2.
1117                          * Once we call lnet_post_send_locked() it is no
1118                          * longer safe to access msg2, since it could've
1119                          * been freed by lnet_finalize(), but we still
1120                          * need to relock the correct cpt, so we cache the
1121                          * msg2_cpt for the purpose of the check that
1122                          * follows the call to lnet_pose_send_locked().
1123                          */
1124                         if (msg2_cpt != msg->msg_tx_cpt) {
1125                                 lnet_net_unlock(msg->msg_tx_cpt);
1126                                 lnet_net_lock(msg2_cpt);
1127                         }
1128                         (void) lnet_post_send_locked(msg2, 1);
1129                         if (msg2_cpt != msg->msg_tx_cpt) {
1130                                 lnet_net_unlock(msg2_cpt);
1131                                 lnet_net_lock(msg->msg_tx_cpt);
1132                         }
1133                 } else {
1134                         spin_unlock(&txpeer->lpni_lock);
1135                 }
1136         }
1137
1138         if (txni != NULL) {
1139                 msg->msg_txni = NULL;
1140                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1141         }
1142
1143         if (txpeer != NULL) {
1144                 msg->msg_txpeer = NULL;
1145                 lnet_peer_ni_decref_locked(txpeer);
1146         }
1147 }
1148
1149 void
1150 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1151 {
1152         struct lnet_msg *msg;
1153
1154         if (list_empty(&rbp->rbp_msgs))
1155                 return;
1156         msg = list_entry(rbp->rbp_msgs.next,
1157                          struct lnet_msg, msg_list);
1158         list_del(&msg->msg_list);
1159
1160         (void)lnet_post_routed_recv_locked(msg, 1);
1161 }
1162
1163 void
1164 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1165 {
1166         struct lnet_msg *msg;
1167         struct lnet_msg *tmp;
1168
1169         lnet_net_unlock(cpt);
1170
1171         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1172                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1173                              0, 0, 0, msg->msg_hdr.payload_length);
1174                 list_del_init(&msg->msg_list);
1175                 msg->msg_no_resend = true;
1176                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1177                 lnet_finalize(msg, -ECANCELED);
1178         }
1179
1180         lnet_net_lock(cpt);
1181 }
1182
1183 void
1184 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1185 {
1186         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1187         struct lnet_peer *lp;
1188         struct lnet_ni *rxni = msg->msg_rxni;
1189         struct lnet_msg *msg2;
1190
1191         if (msg->msg_rtrcredit) {
1192                 /* give back global router credits */
1193                 struct lnet_rtrbuf *rb;
1194                 struct lnet_rtrbufpool *rbp;
1195
1196                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1197                  * there until it gets one allocated, or aborts the wait
1198                  * itself */
1199                 LASSERT(msg->msg_kiov != NULL);
1200
1201                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1202                 rbp = rb->rb_pool;
1203
1204                 msg->msg_kiov = NULL;
1205                 msg->msg_rtrcredit = 0;
1206
1207                 LASSERT(rbp == lnet_msg2bufpool(msg));
1208
1209                 LASSERT((rbp->rbp_credits > 0) ==
1210                         !list_empty(&rbp->rbp_bufs));
1211
1212                 /* If routing is now turned off, we just drop this buffer and
1213                  * don't bother trying to return credits.  */
1214                 if (!the_lnet.ln_routing) {
1215                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1216                         goto routing_off;
1217                 }
1218
1219                 /* It is possible that a user has lowered the desired number of
1220                  * buffers in this pool.  Make sure we never put back
1221                  * more buffers than the stated number. */
1222                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1223                         /* Discard this buffer so we don't have too
1224                          * many. */
1225                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1226                         rbp->rbp_nbuffers--;
1227                 } else {
1228                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1229                         rbp->rbp_credits++;
1230                         if (rbp->rbp_credits <= 0)
1231                                 lnet_schedule_blocked_locked(rbp);
1232                 }
1233         }
1234
1235 routing_off:
1236         if (msg->msg_peerrtrcredit) {
1237                 LASSERT(rxpeerni);
1238                 LASSERT(rxpeerni->lpni_peer_net);
1239                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1240
1241                 /* give back peer router credits */
1242                 msg->msg_peerrtrcredit = 0;
1243
1244                 spin_lock(&rxpeerni->lpni_lock);
1245                 rxpeerni->lpni_rtrcredits++;
1246                 spin_unlock(&rxpeerni->lpni_lock);
1247
1248                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1249                 spin_lock(&lp->lp_lock);
1250
1251                 /* drop all messages which are queued to be routed on that
1252                  * peer. */
1253                 if (!the_lnet.ln_routing) {
1254                         LIST_HEAD(drop);
1255                         list_splice_init(&lp->lp_rtrq, &drop);
1256                         spin_unlock(&lp->lp_lock);
1257                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1258                 } else if (!list_empty(&lp->lp_rtrq)) {
1259                         int msg2_cpt;
1260
1261                         msg2 = list_entry(lp->lp_rtrq.next,
1262                                           struct lnet_msg, msg_list);
1263                         list_del(&msg2->msg_list);
1264                         msg2_cpt = msg2->msg_rx_cpt;
1265                         spin_unlock(&lp->lp_lock);
1266                         /*
1267                          * messages on the lp_rtrq can be from any NID in
1268                          * the peer, which means they might have different
1269                          * cpts. We need to make sure we lock the right
1270                          * one.
1271                          */
1272                         if (msg2_cpt != msg->msg_rx_cpt) {
1273                                 lnet_net_unlock(msg->msg_rx_cpt);
1274                                 lnet_net_lock(msg2_cpt);
1275                         }
1276                         (void) lnet_post_routed_recv_locked(msg2, 1);
1277                         if (msg2_cpt != msg->msg_rx_cpt) {
1278                                 lnet_net_unlock(msg2_cpt);
1279                                 lnet_net_lock(msg->msg_rx_cpt);
1280                         }
1281                 } else {
1282                         spin_unlock(&lp->lp_lock);
1283                 }
1284         }
1285         if (rxni != NULL) {
1286                 msg->msg_rxni = NULL;
1287                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1288         }
1289         if (rxpeerni != NULL) {
1290                 msg->msg_rxpeer = NULL;
1291                 lnet_peer_ni_decref_locked(rxpeerni);
1292         }
1293 }
1294
1295 static struct lnet_peer_ni *
1296 lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid,
1297                     struct lnet_peer *peer,
1298                     struct lnet_peer_ni *best_lpni,
1299                     struct lnet_peer_net *peer_net)
1300 {
1301         /*
1302          * Look at the peer NIs for the destination peer that connect
1303          * to the chosen net. If a peer_ni is preferred when using the
1304          * best_ni to communicate, we use that one. If there is no
1305          * preferred peer_ni, or there are multiple preferred peer_ni,
1306          * the available transmit credits are used. If the transmit
1307          * credits are equal, we round-robin over the peer_ni.
1308          */
1309         struct lnet_peer_ni *lpni = NULL;
1310         int best_lpni_credits = (best_lpni) ? best_lpni->lpni_txcredits :
1311                 INT_MIN;
1312         int best_lpni_healthv = (best_lpni) ?
1313                 atomic_read(&best_lpni->lpni_healthv) : 0;
1314         bool best_lpni_is_preferred = false;
1315         bool lpni_is_preferred;
1316         int lpni_healthv;
1317         __u32 lpni_sel_prio;
1318         __u32 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1319
1320         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1321                 /*
1322                  * if the best_ni we've chosen aleady has this lpni
1323                  * preferred, then let's use it
1324                  */
1325                 if (best_ni) {
1326                         /* FIXME need to handle large-addr nid */
1327                         lpni_is_preferred = lnet_peer_is_pref_nid_locked(
1328                                 lpni, lnet_nid_to_nid4(&best_ni->ni_nid));
1329                         CDEBUG(D_NET, "%s lpni_is_preferred = %d\n",
1330                                libcfs_nidstr(&best_ni->ni_nid),
1331                                lpni_is_preferred);
1332                 } else {
1333                         lpni_is_preferred = false;
1334                 }
1335
1336                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1337                 lpni_sel_prio = lpni->lpni_sel_priority;
1338
1339                 if (best_lpni)
1340                         CDEBUG(D_NET, "n:[%s, %s] h:[%d, %d] p:[%d, %d] c:[%d, %d] s:[%d, %d]\n",
1341                                 libcfs_nid2str(lpni->lpni_nid),
1342                                 libcfs_nid2str(best_lpni->lpni_nid),
1343                                 lpni_healthv, best_lpni_healthv,
1344                                 lpni_sel_prio, best_sel_prio,
1345                                 lpni->lpni_txcredits, best_lpni_credits,
1346                                 lpni->lpni_seq, best_lpni->lpni_seq);
1347                 else
1348                         goto select_lpni;
1349
1350                 /* pick the healthiest peer ni */
1351                 if (lpni_healthv < best_lpni_healthv)
1352                         continue;
1353                 else if (lpni_healthv > best_lpni_healthv) {
1354                         if (best_lpni_is_preferred)
1355                                 best_lpni_is_preferred = false;
1356                         goto select_lpni;
1357                 }
1358
1359                 if (lpni_sel_prio > best_sel_prio)
1360                         continue;
1361                 else if (lpni_sel_prio < best_sel_prio) {
1362                         if (best_lpni_is_preferred)
1363                                 best_lpni_is_preferred = false;
1364                         goto select_lpni;
1365                 }
1366
1367                 /* if this is a preferred peer use it */
1368                 if (!best_lpni_is_preferred && lpni_is_preferred) {
1369                         best_lpni_is_preferred = true;
1370                         goto select_lpni;
1371                 } else if (best_lpni_is_preferred && !lpni_is_preferred) {
1372                         /* this is not the preferred peer so let's ignore
1373                          * it.
1374                          */
1375                         continue;
1376                 }
1377
1378                 if (lpni->lpni_txcredits < best_lpni_credits)
1379                         /* We already have a peer that has more credits
1380                          * available than this one. No need to consider
1381                          * this peer further.
1382                          */
1383                         continue;
1384                 else if (lpni->lpni_txcredits > best_lpni_credits)
1385                         goto select_lpni;
1386
1387                 /* The best peer found so far and the current peer
1388                  * have the same number of available credits let's
1389                  * make sure to select between them using Round Robin
1390                  */
1391                 if (best_lpni && (best_lpni->lpni_seq <= lpni->lpni_seq))
1392                         continue;
1393 select_lpni:
1394                 best_lpni_is_preferred = lpni_is_preferred;
1395                 best_lpni_healthv = lpni_healthv;
1396                 best_sel_prio = lpni_sel_prio;
1397                 best_lpni = lpni;
1398                 best_lpni_credits = lpni->lpni_txcredits;
1399         }
1400
1401         /* if we still can't find a peer ni then we can't reach it */
1402         if (!best_lpni) {
1403                 __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1404                         LNET_NIDNET(dst_nid);
1405                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1406                                 libcfs_net2str(net_id));
1407                 return NULL;
1408         }
1409
1410         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1411                libcfs_nid2str(best_lpni->lpni_nid));
1412
1413         return best_lpni;
1414 }
1415
1416 /*
1417  * Prerequisite: the best_ni should already be set in the sd
1418  * Find the best lpni.
1419  * If the net id is provided then restrict lpni selection on
1420  * that particular net.
1421  * Otherwise find any reachable lpni. When dealing with an MR
1422  * gateway and it has multiple lpnis which we can use
1423  * we want to select the best one from the list of reachable
1424  * ones.
1425  */
1426 static inline struct lnet_peer_ni *
1427 lnet_find_best_lpni(struct lnet_ni *lni, lnet_nid_t dst_nid,
1428                     struct lnet_peer *peer, __u32 net_id)
1429 {
1430         struct lnet_peer_net *peer_net;
1431
1432         /* find the best_lpni on any local network */
1433         if (net_id == LNET_NET_ANY) {
1434                 struct lnet_peer_ni *best_lpni = NULL;
1435                 struct lnet_peer_net *lpn;
1436                 list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
1437                         /* no net specified find any reachable peer ni */
1438                         if (!lnet_islocalnet_locked(lpn->lpn_net_id))
1439                                 continue;
1440                         best_lpni = lnet_select_peer_ni(lni, dst_nid, peer,
1441                                                         best_lpni, lpn);
1442                 }
1443
1444                 return best_lpni;
1445         }
1446         /* restrict on the specified net */
1447         peer_net = lnet_peer_get_net_locked(peer, net_id);
1448         if (peer_net)
1449                 return lnet_select_peer_ni(lni, dst_nid, peer, NULL, peer_net);
1450
1451         return NULL;
1452 }
1453
1454 static int
1455 lnet_compare_gw_lpnis(struct lnet_peer_ni *lpni1, struct lnet_peer_ni *lpni2)
1456 {
1457         if (lpni1->lpni_txqnob < lpni2->lpni_txqnob)
1458                 return 1;
1459
1460         if (lpni1->lpni_txqnob > lpni2->lpni_txqnob)
1461                 return -1;
1462
1463         if (lpni1->lpni_txcredits > lpni2->lpni_txcredits)
1464                 return 1;
1465
1466         if (lpni1->lpni_txcredits < lpni2->lpni_txcredits)
1467                 return -1;
1468
1469         return 0;
1470 }
1471
1472 /* Compare route priorities and hop counts */
1473 static int
1474 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
1475 {
1476         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1477         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1478
1479         if (r1->lr_priority < r2->lr_priority)
1480                 return 1;
1481
1482         if (r1->lr_priority > r2->lr_priority)
1483                 return -1;
1484
1485         if (r1_hops < r2_hops)
1486                 return 1;
1487
1488         if (r1_hops > r2_hops)
1489                 return -1;
1490
1491         return 0;
1492 }
1493
1494 static struct lnet_route *
1495 lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net,
1496                        struct lnet_peer_ni *remote_lpni,
1497                        struct lnet_route **prev_route,
1498                        struct lnet_peer_ni **gwni)
1499 {
1500         struct lnet_peer_ni *lpni, *best_gw_ni = NULL;
1501         struct lnet_route *best_route;
1502         struct lnet_route *last_route;
1503         struct lnet_route *route;
1504         int rc;
1505         bool best_rte_is_preferred = false;
1506         lnet_nid_t gw_pnid;
1507
1508         CDEBUG(D_NET, "Looking up a route to %s, from %s\n",
1509                libcfs_net2str(rnet->lrn_net), libcfs_net2str(src_net));
1510
1511         best_route = last_route = NULL;
1512         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1513                 if (!lnet_is_route_alive(route))
1514                         continue;
1515                 gw_pnid = route->lr_gateway->lp_primary_nid;
1516
1517                 /* no protection on below fields, but it's harmless */
1518                 if (last_route && (last_route->lr_seq - route->lr_seq < 0))
1519                         last_route = route;
1520
1521                 /* if the best route found is in the preferred list then
1522                  * tag it as preferred and use it later on. But if we
1523                  * didn't find any routes which are on the preferred list
1524                  * then just use the best route possible.
1525                  */
1526                 rc = lnet_peer_is_pref_rtr_locked(remote_lpni, gw_pnid);
1527
1528                 if (!best_route || (rc && !best_rte_is_preferred)) {
1529                         /* Restrict the selection of the router NI on the
1530                          * src_net provided. If the src_net is LNET_NID_ANY,
1531                          * then select the best interface available.
1532                          */
1533                         lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1534                                                    route->lr_gateway,
1535                                                    src_net);
1536                         if (!lpni) {
1537                                 CDEBUG(D_NET,
1538                                        "Gateway %s does not have a peer NI on net %s\n",
1539                                        libcfs_nid2str(gw_pnid),
1540                                        libcfs_net2str(src_net));
1541                                 continue;
1542                         }
1543                 }
1544
1545                 if (rc && !best_rte_is_preferred) {
1546                         /* This is the first preferred route we found,
1547                          * so it beats any route found previously
1548                          */
1549                         best_route = route;
1550                         if (!last_route)
1551                                 last_route = route;
1552                         best_gw_ni = lpni;
1553                         best_rte_is_preferred = true;
1554                         CDEBUG(D_NET, "preferred gw = %s\n",
1555                                libcfs_nid2str(gw_pnid));
1556                         continue;
1557                 } else if ((!rc) && best_rte_is_preferred)
1558                         /* The best route we found so far is in the preferred
1559                          * list, so it beats any non-preferred route
1560                          */
1561                         continue;
1562
1563                 if (!best_route) {
1564                         best_route = last_route = route;
1565                         best_gw_ni = lpni;
1566                         continue;
1567                 }
1568
1569                 rc = lnet_compare_routes(route, best_route);
1570                 if (rc == -1)
1571                         continue;
1572
1573                 /* Restrict the selection of the router NI on the
1574                  * src_net provided. If the src_net is LNET_NID_ANY,
1575                  * then select the best interface available.
1576                  */
1577                 lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1578                                            route->lr_gateway,
1579                                            src_net);
1580                 if (!lpni) {
1581                         CDEBUG(D_NET,
1582                                "Gateway %s does not have a peer NI on net %s\n",
1583                                libcfs_nid2str(gw_pnid),
1584                                libcfs_net2str(src_net));
1585                         continue;
1586                 }
1587
1588                 if (rc == 1) {
1589                         best_route = route;
1590                         best_gw_ni = lpni;
1591                         continue;
1592                 }
1593
1594                 rc = lnet_compare_gw_lpnis(lpni, best_gw_ni);
1595                 if (rc == -1)
1596                         continue;
1597
1598                 if (rc == 1 || route->lr_seq <= best_route->lr_seq) {
1599                         best_route = route;
1600                         best_gw_ni = lpni;
1601                         continue;
1602                 }
1603         }
1604
1605         *prev_route = last_route;
1606         *gwni = best_gw_ni;
1607
1608         return best_route;
1609 }
1610
1611 static inline unsigned int
1612 lnet_dev_prio_of_md(struct lnet_ni *ni, unsigned int dev_idx)
1613 {
1614         if (dev_idx == UINT_MAX)
1615                 return UINT_MAX;
1616
1617         if (!ni || !ni->ni_net || !ni->ni_net->net_lnd ||
1618             !ni->ni_net->net_lnd->lnd_get_dev_prio)
1619                 return UINT_MAX;
1620
1621         return ni->ni_net->net_lnd->lnd_get_dev_prio(ni, dev_idx);
1622 }
1623
1624 static struct lnet_ni *
1625 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1626                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1627                  struct lnet_msg *msg, int md_cpt)
1628 {
1629         struct lnet_libmd *md = msg->msg_md;
1630         unsigned int offset = msg->msg_offset;
1631         unsigned int shortest_distance;
1632         struct lnet_ni *ni = NULL;
1633         int best_credits;
1634         int best_healthv;
1635         __u32 best_sel_prio;
1636         unsigned int best_dev_prio;
1637         unsigned int dev_idx = UINT_MAX;
1638         struct page *page = lnet_get_first_page(md, offset);
1639         msg->msg_rdma_force = lnet_is_rdma_only_page(page);
1640
1641         if (msg->msg_rdma_force)
1642                 dev_idx = lnet_get_dev_idx(page);
1643
1644         /*
1645          * If there is no peer_ni that we can send to on this network,
1646          * then there is no point in looking for a new best_ni here.
1647         */
1648         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1649                 return best_ni;
1650
1651         if (best_ni == NULL) {
1652                 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1653                 shortest_distance = UINT_MAX;
1654                 best_dev_prio = UINT_MAX;
1655                 best_credits = INT_MIN;
1656                 best_healthv = 0;
1657         } else {
1658                 best_dev_prio = lnet_dev_prio_of_md(best_ni, dev_idx);
1659                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1660                                                      best_ni->ni_dev_cpt);
1661                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1662                 best_healthv = atomic_read(&best_ni->ni_healthv);
1663                 best_sel_prio = best_ni->ni_sel_priority;
1664         }
1665
1666         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1667                 unsigned int distance;
1668                 int ni_credits;
1669                 int ni_healthv;
1670                 int ni_fatal;
1671                 __u32 ni_sel_prio;
1672                 unsigned int ni_dev_prio;
1673
1674                 ni_credits = atomic_read(&ni->ni_tx_credits);
1675                 ni_healthv = atomic_read(&ni->ni_healthv);
1676                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1677                 ni_sel_prio = ni->ni_sel_priority;
1678
1679                 /*
1680                  * calculate the distance from the CPT on which
1681                  * the message memory is allocated to the CPT of
1682                  * the NI's physical device
1683                  */
1684                 distance = cfs_cpt_distance(lnet_cpt_table(),
1685                                             md_cpt,
1686                                             ni->ni_dev_cpt);
1687
1688                 ni_dev_prio = lnet_dev_prio_of_md(ni, dev_idx);
1689
1690                 /*
1691                  * All distances smaller than the NUMA range
1692                  * are treated equally.
1693                  */
1694                 if (distance < lnet_numa_range)
1695                         distance = lnet_numa_range;
1696
1697                 /*
1698                  * Select on health, selection policy, direct dma prio,
1699                  * shorter distance, available credits, then round-robin.
1700                  */
1701                 if (ni_fatal)
1702                         continue;
1703
1704                 if (best_ni)
1705                         CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d, p:%u, g:%u] with best_ni %s [c:%d, d:%d, s:%d, p:%u, g:%u]\n",
1706                                libcfs_nidstr(&ni->ni_nid), ni_credits, distance,
1707                                ni->ni_seq, ni_sel_prio, ni_dev_prio,
1708                                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid)
1709                                : "not selected", best_credits, shortest_distance,
1710                                (best_ni) ? best_ni->ni_seq : 0,
1711                                best_sel_prio, best_dev_prio);
1712                 else
1713                         goto select_ni;
1714
1715                 if (ni_healthv < best_healthv)
1716                         continue;
1717                 else if (ni_healthv > best_healthv)
1718                         goto select_ni;
1719
1720                 if (ni_sel_prio > best_sel_prio)
1721                         continue;
1722                 else if (ni_sel_prio < best_sel_prio)
1723                         goto select_ni;
1724
1725                 if (ni_dev_prio > best_dev_prio)
1726                         continue;
1727                 else if (ni_dev_prio < best_dev_prio)
1728                         goto select_ni;
1729
1730                 if (distance > shortest_distance)
1731                         continue;
1732                 else if (distance < shortest_distance)
1733                         goto select_ni;
1734
1735                 if (ni_credits < best_credits)
1736                         continue;
1737                 else if (ni_credits > best_credits)
1738                         goto select_ni;
1739
1740                 if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1741                         continue;
1742
1743 select_ni:
1744                 best_sel_prio = ni_sel_prio;
1745                 best_dev_prio = ni_dev_prio;
1746                 shortest_distance = distance;
1747                 best_healthv = ni_healthv;
1748                 best_ni = ni;
1749                 best_credits = ni_credits;
1750         }
1751
1752         CDEBUG(D_NET, "selected best_ni %s\n",
1753                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid) : "no selection");
1754
1755         return best_ni;
1756 }
1757
1758 /*
1759  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1760  * because such traffic is required to perform discovery. We therefore
1761  * exclude all GET and PUT on that portal. We also exclude all ACK and
1762  * REPLY traffic, but that is because the portal is not tracked in the
1763  * message structure for these message types. We could restrict this
1764  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1765  */
1766 static bool
1767 lnet_msg_discovery(struct lnet_msg *msg)
1768 {
1769         if (msg->msg_type == LNET_MSG_PUT) {
1770                 if (msg->msg_hdr.msg.put.ptl_index != LNET_RESERVED_PORTAL)
1771                         return true;
1772         } else if (msg->msg_type == LNET_MSG_GET) {
1773                 if (msg->msg_hdr.msg.get.ptl_index != LNET_RESERVED_PORTAL)
1774                         return true;
1775         }
1776         return false;
1777 }
1778
1779 #define SRC_SPEC        0x0001
1780 #define SRC_ANY         0x0002
1781 #define LOCAL_DST       0x0004
1782 #define REMOTE_DST      0x0008
1783 #define MR_DST          0x0010
1784 #define NMR_DST         0x0020
1785 #define SND_RESP        0x0040
1786
1787 /* The following to defines are used for return codes */
1788 #define REPEAT_SEND     0x1000
1789 #define PASS_THROUGH    0x2000
1790
1791 /* The different cases lnet_select pathway needs to handle */
1792 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1793 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1794 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1795 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1796 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1797 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1798 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1799 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1800
1801 static int
1802 lnet_handle_lo_send(struct lnet_send_data *sd)
1803 {
1804         struct lnet_msg *msg = sd->sd_msg;
1805         int cpt = sd->sd_cpt;
1806
1807         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1808                 return -ESHUTDOWN;
1809
1810         /* No send credit hassles with LOLND */
1811         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1812         msg->msg_hdr.dest_nid =
1813                 cpu_to_le64(lnet_nid_to_nid4(&the_lnet.ln_loni->ni_nid));
1814         if (!msg->msg_routing)
1815                 msg->msg_hdr.src_nid =
1816                         cpu_to_le64(lnet_nid_to_nid4(&the_lnet.ln_loni->ni_nid));
1817         msg->msg_target.nid = lnet_nid_to_nid4(&the_lnet.ln_loni->ni_nid);
1818         lnet_msg_commit(msg, cpt);
1819         msg->msg_txni = the_lnet.ln_loni;
1820
1821         return LNET_CREDIT_OK;
1822 }
1823
1824 static int
1825 lnet_handle_send(struct lnet_send_data *sd)
1826 {
1827         struct lnet_ni *best_ni = sd->sd_best_ni;
1828         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1829         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1830         struct lnet_msg *msg = sd->sd_msg;
1831         int cpt2;
1832         __u32 send_case = sd->sd_send_case;
1833         int rc;
1834         __u32 routing = send_case & REMOTE_DST;
1835          struct lnet_rsp_tracker *rspt;
1836
1837         /* Increment sequence number of the selected peer, peer net,
1838          * local ni and local net so that we pick the next ones
1839          * in Round Robin.
1840          */
1841         best_lpni->lpni_seq++;
1842         best_lpni->lpni_peer_net->lpn_seq++;
1843         best_ni->ni_seq++;
1844         best_ni->ni_net->net_seq++;
1845
1846         CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n",
1847                libcfs_nidstr(&best_ni->ni_nid),
1848                best_ni->ni_seq, best_ni->ni_net->net_seq,
1849                atomic_read(&best_ni->ni_tx_credits),
1850                best_ni->ni_sel_priority,
1851                libcfs_nid2str(best_lpni->lpni_nid),
1852                best_lpni->lpni_seq, best_lpni->lpni_peer_net->lpn_seq,
1853                best_lpni->lpni_txcredits,
1854                best_lpni->lpni_sel_priority);
1855
1856         /*
1857          * grab a reference on the peer_ni so it sticks around even if
1858          * we need to drop and relock the lnet_net_lock below.
1859          */
1860         lnet_peer_ni_addref_locked(best_lpni);
1861
1862         /*
1863          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1864          * message. This ensures that we get a CPT that is correct for
1865          * the NI when the NI has been restricted to a subset of all CPTs.
1866          * If the selected CPT differs from the one currently locked, we
1867          * must unlock and relock the lnet_net_lock(), and then check whether
1868          * the configuration has changed. We don't have a hold on the best_ni
1869          * yet, and it may have vanished.
1870          */
1871         cpt2 = lnet_cpt_of_nid_locked(best_lpni->lpni_nid, best_ni);
1872         if (sd->sd_cpt != cpt2) {
1873                 __u32 seq = lnet_get_dlc_seq_locked();
1874                 lnet_net_unlock(sd->sd_cpt);
1875                 sd->sd_cpt = cpt2;
1876                 lnet_net_lock(sd->sd_cpt);
1877                 if (seq != lnet_get_dlc_seq_locked()) {
1878                         lnet_peer_ni_decref_locked(best_lpni);
1879                         return REPEAT_SEND;
1880                 }
1881         }
1882
1883         /*
1884          * store the best_lpni in the message right away to avoid having
1885          * to do the same operation under different conditions
1886          */
1887         msg->msg_txpeer = best_lpni;
1888         msg->msg_txni = best_ni;
1889
1890         /*
1891          * grab a reference for the best_ni since now it's in use in this
1892          * send. The reference will be dropped in lnet_finalize()
1893          */
1894         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1895
1896         /*
1897          * Always set the target.nid to the best peer picked. Either the
1898          * NID will be one of the peer NIDs selected, or the same NID as
1899          * what was originally set in the target or it will be the NID of
1900          * a router if this message should be routed
1901          */
1902         msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1903
1904         /*
1905          * lnet_msg_commit assigns the correct cpt to the message, which
1906          * is used to decrement the correct refcount on the ni when it's
1907          * time to return the credits
1908          */
1909         lnet_msg_commit(msg, sd->sd_cpt);
1910
1911         /*
1912          * If we are routing the message then we keep the src_nid that was
1913          * set by the originator. If we are not routing then we are the
1914          * originator and set it here.
1915          */
1916         if (!msg->msg_routing)
1917                 msg->msg_hdr.src_nid =
1918                         cpu_to_le64(lnet_nid_to_nid4(&msg->msg_txni->ni_nid));
1919
1920         if (routing) {
1921                 msg->msg_target_is_router = 1;
1922                 msg->msg_target.pid = LNET_PID_LUSTRE;
1923                 /*
1924                  * since we're routing we want to ensure that the
1925                  * msg_hdr.dest_nid is set to the final destination. When
1926                  * the router receives this message it knows how to route
1927                  * it.
1928                  *
1929                  * final_dst_lpni is set at the beginning of the
1930                  * lnet_select_pathway() function and is never changed.
1931                  * It's safe to use it here.
1932                  */
1933                 msg->msg_hdr.dest_nid = cpu_to_le64(final_dst_lpni->lpni_nid);
1934         } else {
1935                 /*
1936                  * if we're not routing set the dest_nid to the best peer
1937                  * ni NID that we picked earlier in the algorithm.
1938                  */
1939                 msg->msg_hdr.dest_nid = cpu_to_le64(msg->msg_txpeer->lpni_nid);
1940         }
1941
1942         /*
1943          * if we have response tracker block update it with the next hop
1944          * nid
1945          */
1946         if (msg->msg_md) {
1947                 rspt = msg->msg_md->md_rspt_ptr;
1948                 if (rspt) {
1949                         rspt->rspt_next_hop_nid = msg->msg_txpeer->lpni_nid;
1950                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1951                                libcfs_nid2str(rspt->rspt_next_hop_nid));
1952                 }
1953         }
1954
1955         rc = lnet_post_send_locked(msg, 0);
1956
1957         if (!rc)
1958                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
1959                        libcfs_nid2str(msg->msg_hdr.src_nid),
1960                        libcfs_nidstr(&msg->msg_txni->ni_nid),
1961                        libcfs_nid2str(sd->sd_src_nid),
1962                        libcfs_nid2str(msg->msg_hdr.dest_nid),
1963                        libcfs_nid2str(sd->sd_dst_nid),
1964                        libcfs_nid2str(msg->msg_txpeer->lpni_nid),
1965                        libcfs_nid2str(sd->sd_rtr_nid),
1966                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
1967
1968         return rc;
1969 }
1970
1971 static inline void
1972 lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni,
1973                          struct lnet_msg *msg)
1974 {
1975         if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) &&
1976             !lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) {
1977                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
1978                        libcfs_nidstr(&lni->ni_nid),
1979                        libcfs_nid2str(lpni->lpni_nid));
1980                 lnet_peer_ni_set_non_mr_pref_nid(
1981                         lpni, lnet_nid_to_nid4(&lni->ni_nid));
1982         }
1983 }
1984
1985 /*
1986  * Source Specified
1987  * Local Destination
1988  * non-mr peer
1989  *
1990  * use the source and destination NIDs as the pathway
1991  */
1992 static int
1993 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
1994 {
1995         /* the destination lpni is set before we get here. */
1996
1997         /* find local NI */
1998         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
1999         if (!sd->sd_best_ni) {
2000                 CERROR("Can't send to %s: src %s is not a "
2001                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
2002                                 libcfs_nid2str(sd->sd_src_nid));
2003                 return -EINVAL;
2004         }
2005
2006         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2007
2008         return lnet_handle_send(sd);
2009 }
2010
2011 /*
2012  * Source Specified
2013  * Local Destination
2014  * MR Peer
2015  *
2016  * Don't run the selection algorithm on the peer NIs. By specifying the
2017  * local NID, we're also saying that we should always use the destination NID
2018  * provided. This handles the case where we should be using the same
2019  * destination NID for the all the messages which belong to the same RPC
2020  * request.
2021  */
2022 static int
2023 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
2024 {
2025         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
2026         if (!sd->sd_best_ni) {
2027                 CERROR("Can't send to %s: src %s is not a "
2028                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
2029                                 libcfs_nid2str(sd->sd_src_nid));
2030                 return -EINVAL;
2031         }
2032
2033         if (sd->sd_best_lpni &&
2034             sd->sd_best_lpni->lpni_nid ==
2035             lnet_nid_to_nid4(&the_lnet.ln_loni->ni_nid))
2036                 return lnet_handle_lo_send(sd);
2037         else if (sd->sd_best_lpni)
2038                 return lnet_handle_send(sd);
2039
2040         CERROR("can't send to %s. no NI on %s\n",
2041                libcfs_nid2str(sd->sd_dst_nid),
2042                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
2043
2044         return -EHOSTUNREACH;
2045 }
2046
2047 struct lnet_ni *
2048 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
2049                               struct lnet_peer *peer,
2050                               struct lnet_peer_net *peer_net,
2051                               struct lnet_msg *msg,
2052                               int cpt)
2053 {
2054         struct lnet_net *local_net;
2055         struct lnet_ni *best_ni;
2056
2057         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
2058         if (!local_net)
2059                 return NULL;
2060
2061         /*
2062          * Iterate through the NIs in this local Net and select
2063          * the NI to send from. The selection is determined by
2064          * these 3 criterion in the following priority:
2065          *      1. NUMA
2066          *      2. NI available credits
2067          *      3. Round Robin
2068          */
2069         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
2070                                    peer, peer_net, msg, cpt);
2071
2072         return best_ni;
2073 }
2074
2075 static int
2076 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg,
2077                              int cpt)
2078 {
2079         struct lnet_peer *peer;
2080         struct lnet_peer_ni *new_lpni;
2081         int rc;
2082
2083         lnet_peer_ni_addref_locked(lpni);
2084
2085         peer = lpni->lpni_peer_net->lpn_peer;
2086
2087         if (lnet_peer_gw_discovery(peer)) {
2088                 lnet_peer_ni_decref_locked(lpni);
2089                 return 0;
2090         }
2091
2092         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
2093                 lnet_peer_ni_decref_locked(lpni);
2094                 return 0;
2095         }
2096
2097         rc = lnet_discover_peer_locked(lpni, cpt, false);
2098         if (rc) {
2099                 lnet_peer_ni_decref_locked(lpni);
2100                 return rc;
2101         }
2102
2103         new_lpni = lnet_find_peer_ni_locked(lpni->lpni_nid);
2104         if (!new_lpni) {
2105                 lnet_peer_ni_decref_locked(lpni);
2106                 return -ENOENT;
2107         }
2108
2109         peer = new_lpni->lpni_peer_net->lpn_peer;
2110         spin_lock(&peer->lp_lock);
2111         if (lpni == new_lpni && lnet_peer_is_uptodate_locked(peer)) {
2112                 /* The peer NI did not change and the peer is up to date.
2113                  * Nothing more to do.
2114                  */
2115                 spin_unlock(&peer->lp_lock);
2116                 lnet_peer_ni_decref_locked(lpni);
2117                 lnet_peer_ni_decref_locked(new_lpni);
2118                 return 0;
2119         }
2120         spin_unlock(&peer->lp_lock);
2121
2122         /* Either the peer NI changed during discovery, or the peer isn't up
2123          * to date. In both cases we want to queue the message on the
2124          * (possibly new) peer's pending queue and queue the peer for discovery
2125          */
2126         msg->msg_sending = 0;
2127         msg->msg_txpeer = NULL;
2128         lnet_net_unlock(cpt);
2129         lnet_peer_queue_message(peer, msg);
2130         lnet_net_lock(cpt);
2131
2132         lnet_peer_ni_decref_locked(lpni);
2133         lnet_peer_ni_decref_locked(new_lpni);
2134
2135         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2136                msg, libcfs_nid2str(peer->lp_primary_nid));
2137
2138         return LNET_DC_WAIT;
2139 }
2140
2141 static int
2142 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2143                              lnet_nid_t dst_nid,
2144                              struct lnet_peer_ni **gw_lpni,
2145                              struct lnet_peer **gw_peer)
2146 {
2147         int rc;
2148         struct lnet_peer *gw;
2149         struct lnet_peer *lp;
2150         struct lnet_peer_net *lpn;
2151         struct lnet_peer_net *best_lpn = NULL;
2152         struct lnet_remotenet *rnet, *best_rnet = NULL;
2153         struct lnet_route *best_route = NULL;
2154         struct lnet_route *last_route = NULL;
2155         struct lnet_peer_ni *lpni = NULL;
2156         struct lnet_peer_ni *gwni = NULL;
2157         bool route_found = false;
2158         lnet_nid_t src_nid = (sd->sd_src_nid != LNET_NID_ANY) ? sd->sd_src_nid :
2159                 (sd->sd_best_ni != NULL)
2160                 ? lnet_nid_to_nid4(&sd->sd_best_ni->ni_nid)
2161                 : LNET_NID_ANY;
2162         int best_lpn_healthv = 0;
2163         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2164
2165         CDEBUG(D_NET, "using src nid %s for route restriction\n",
2166                libcfs_nid2str(src_nid));
2167
2168         /* If a router nid was specified then we are replying to a GET or
2169          * sending an ACK. In this case we use the gateway associated with the
2170          * specified router nid.
2171          */
2172         if (sd->sd_rtr_nid != LNET_NID_ANY) {
2173                 gwni = lnet_find_peer_ni_locked(sd->sd_rtr_nid);
2174                 if (gwni) {
2175                         gw = gwni->lpni_peer_net->lpn_peer;
2176                         lnet_peer_ni_decref_locked(gwni);
2177                         if (gw->lp_rtr_refcount)
2178                                 route_found = true;
2179                 } else {
2180                         CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n",
2181                                libcfs_nid2str(sd->sd_rtr_nid));
2182                 }
2183         }
2184
2185         if (!route_found) {
2186                 if (sd->sd_msg->msg_routing) {
2187                         /* If I'm routing this message then I need to find the
2188                          * next hop based on the destination NID
2189                          */
2190                         best_rnet = lnet_find_rnet_locked(LNET_NIDNET(sd->sd_dst_nid));
2191                         if (!best_rnet) {
2192                                 CERROR("Unable to route message to %s - Route table may be misconfigured\n",
2193                                        libcfs_nid2str(sd->sd_dst_nid));
2194                                 return -EHOSTUNREACH;
2195                         }
2196                 } else {
2197                         /* we've already looked up the initial lpni using
2198                          * dst_nid
2199                          */
2200                         lpni = sd->sd_best_lpni;
2201                         /* the peer tree must be in existence */
2202                         LASSERT(lpni && lpni->lpni_peer_net &&
2203                                 lpni->lpni_peer_net->lpn_peer);
2204                         lp = lpni->lpni_peer_net->lpn_peer;
2205
2206                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2207                                 /* is this remote network reachable?  */
2208                                 rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2209                                 if (!rnet)
2210                                         continue;
2211
2212                                 if (!best_lpn) {
2213                                         best_lpn = lpn;
2214                                         best_rnet = rnet;
2215                                 }
2216
2217                                 /* select the preferred peer net */
2218                                 if (best_lpn_healthv > lpn->lpn_healthv)
2219                                         continue;
2220                                 else if (best_lpn_healthv < lpn->lpn_healthv)
2221                                         goto use_lpn;
2222
2223                                 if (best_lpn_sel_prio < lpn->lpn_sel_priority)
2224                                         continue;
2225                                 else if (best_lpn_sel_prio > lpn->lpn_sel_priority)
2226                                         goto use_lpn;
2227
2228                                 if (best_lpn->lpn_seq <= lpn->lpn_seq)
2229                                         continue;
2230 use_lpn:
2231                                 best_lpn_healthv = lpn->lpn_healthv;
2232                                 best_lpn_sel_prio = lpn->lpn_sel_priority;
2233                                 best_lpn = lpn;
2234                                 best_rnet = rnet;
2235                         }
2236
2237                         if (!best_lpn) {
2238                                 CERROR("peer %s has no available nets\n",
2239                                        libcfs_nid2str(sd->sd_dst_nid));
2240                                 return -EHOSTUNREACH;
2241                         }
2242
2243                         sd->sd_best_lpni = lnet_find_best_lpni(sd->sd_best_ni,
2244                                                                sd->sd_dst_nid,
2245                                                                lp,
2246                                                                best_lpn->lpn_net_id);
2247                         if (!sd->sd_best_lpni) {
2248                                 CERROR("peer %s is unreachable\n",
2249                                        libcfs_nid2str(sd->sd_dst_nid));
2250                                 return -EHOSTUNREACH;
2251                         }
2252
2253                         /* We're attempting to round robin over the remote peer
2254                          * NI's so update the final destination we selected
2255                          */
2256                         sd->sd_final_dst_lpni = sd->sd_best_lpni;
2257
2258                         /* Increment the sequence number of the remote lpni so
2259                          * we can round robin over the different interfaces of
2260                          * the remote lpni
2261                          */
2262                         sd->sd_best_lpni->lpni_seq++;
2263                 }
2264
2265                 /*
2266                  * find the best route. Restrict the selection on the net of the
2267                  * local NI if we've already picked the local NI to send from.
2268                  * Otherwise, let's pick any route we can find and then find
2269                  * a local NI we can reach the route's gateway on. Any route we
2270                  * select will be reachable by virtue of the restriction we have
2271                  * when adding a route.
2272                  */
2273                 best_route = lnet_find_route_locked(best_rnet,
2274                                                     LNET_NIDNET(src_nid),
2275                                                     sd->sd_best_lpni,
2276                                                     &last_route, &gwni);
2277
2278                 if (!best_route) {
2279                         CERROR("no route to %s from %s\n",
2280                                libcfs_nid2str(dst_nid),
2281                                libcfs_nid2str(src_nid));
2282                         return -EHOSTUNREACH;
2283                 }
2284
2285                 if (!gwni) {
2286                         CERROR("Internal Error. Route expected to %s from %s\n",
2287                                libcfs_nid2str(dst_nid),
2288                                libcfs_nid2str(src_nid));
2289                         return -EFAULT;
2290                 }
2291
2292                 gw = best_route->lr_gateway;
2293                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2294         }
2295
2296         /*
2297          * Discover this gateway if it hasn't already been discovered.
2298          * This means we might delay the message until discovery has
2299          * completed
2300          */
2301         rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt);
2302         if (rc)
2303                 return rc;
2304
2305         if (!sd->sd_best_ni) {
2306                 lpn = gwni->lpni_peer_net;
2307                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw, lpn,
2308                                                                sd->sd_msg,
2309                                                                sd->sd_md_cpt);
2310                 if (!sd->sd_best_ni) {
2311                         CERROR("Internal Error. Expected local ni on %s but non found: %s\n",
2312                                libcfs_net2str(lpn->lpn_net_id),
2313                                libcfs_nid2str(sd->sd_src_nid));
2314                         return -EFAULT;
2315                 }
2316         }
2317
2318         *gw_lpni = gwni;
2319         *gw_peer = gw;
2320
2321         /*
2322          * increment the sequence numbers since now we're sure we're
2323          * going to use this path
2324          */
2325         if (sd->sd_rtr_nid == LNET_NID_ANY) {
2326                 LASSERT(best_route && last_route);
2327                 best_route->lr_seq = last_route->lr_seq + 1;
2328                 if (best_lpn)
2329                         best_lpn->lpn_seq++;
2330         }
2331
2332         return 0;
2333 }
2334
2335 /*
2336  * Handle two cases:
2337  *
2338  * Case 1:
2339  *  Source specified
2340  *  Remote destination
2341  *  Non-MR destination
2342  *
2343  * Case 2:
2344  *  Source specified
2345  *  Remote destination
2346  *  MR destination
2347  *
2348  * The handling of these two cases is similar. Even though the destination
2349  * can be MR or non-MR, we'll deal directly with the router.
2350  */
2351 static int
2352 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2353 {
2354         int rc;
2355         struct lnet_peer_ni *gw_lpni = NULL;
2356         struct lnet_peer *gw_peer = NULL;
2357
2358         /* find local NI */
2359         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
2360         if (!sd->sd_best_ni) {
2361                 CERROR("Can't send to %s: src %s is not a "
2362                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
2363                                 libcfs_nid2str(sd->sd_src_nid));
2364                 return -EINVAL;
2365         }
2366
2367         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2368                                      &gw_peer);
2369         if (rc)
2370                 return rc;
2371
2372         if (sd->sd_send_case & NMR_DST)
2373                 /*
2374                  * since the final destination is non-MR let's set its preferred
2375                  * NID before we send
2376                  */
2377                 lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni,
2378                                          sd->sd_msg);
2379
2380         /*
2381          * We're going to send to the gw found so let's set its
2382          * info
2383          */
2384         sd->sd_peer = gw_peer;
2385         sd->sd_best_lpni = gw_lpni;
2386
2387         return lnet_handle_send(sd);
2388 }
2389
2390 struct lnet_ni *
2391 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2392                                struct lnet_msg *msg, bool discovery)
2393 {
2394         struct lnet_peer_net *lpn = NULL;
2395         struct lnet_peer_net *best_lpn = NULL;
2396         struct lnet_net *net = NULL;
2397         struct lnet_net *best_net = NULL;
2398         struct lnet_ni *best_ni = NULL;
2399         int best_lpn_healthv = 0;
2400         int best_net_healthv = 0;
2401         int net_healthv;
2402         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2403         __u32 lpn_sel_prio;
2404         __u32 best_net_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2405         __u32 net_sel_prio;
2406         bool exit = false;
2407
2408         /*
2409          * The peer can have multiple interfaces, some of them can be on
2410          * the local network and others on a routed network. We should
2411          * prefer the local network. However if the local network is not
2412          * available then we need to try the routed network
2413          */
2414
2415         /* go through all the peer nets and find the best_ni */
2416         list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
2417                 /*
2418                  * The peer's list of nets can contain non-local nets. We
2419                  * want to only examine the local ones.
2420                  */
2421                 net = lnet_get_net_locked(lpn->lpn_net_id);
2422                 if (!net)
2423                         continue;
2424
2425                 lpn_sel_prio = lpn->lpn_sel_priority;
2426                 net_healthv = lnet_get_net_healthv_locked(net);
2427                 net_sel_prio = net->net_sel_priority;
2428
2429                 /*
2430                  * if this is a discovery message and lp_disc_net_id is
2431                  * specified then use that net to send the discovery on.
2432                  */
2433                 if (peer->lp_disc_net_id == lpn->lpn_net_id &&
2434                     discovery) {
2435                         exit = true;
2436                         goto select_lpn;
2437                 }
2438
2439                 if (!best_lpn)
2440                         goto select_lpn;
2441
2442                 /* always select the lpn with the best health */
2443                 if (best_lpn_healthv > lpn->lpn_healthv)
2444                         continue;
2445                 else if (best_lpn_healthv < lpn->lpn_healthv)
2446                         goto select_lpn;
2447
2448                 /* select the preferred peer and local nets */
2449                 if (best_lpn_sel_prio < lpn_sel_prio)
2450                         continue;
2451                 else if (best_lpn_sel_prio > lpn_sel_prio)
2452                         goto select_lpn;
2453
2454                 if (best_net_healthv > net_healthv)
2455                         continue;
2456                 else if (best_net_healthv < net_healthv)
2457                         goto select_lpn;
2458
2459                 if (best_net_sel_prio < net_sel_prio)
2460                         continue;
2461                 else if (best_net_sel_prio > net_sel_prio)
2462                         goto select_lpn;
2463
2464                 if (best_lpn->lpn_seq < lpn->lpn_seq)
2465                         continue;
2466                 else if (best_lpn->lpn_seq > lpn->lpn_seq)
2467                         goto select_lpn;
2468
2469                 /* round robin over the local networks */
2470                 if (best_net->net_seq <= net->net_seq)
2471                         continue;
2472
2473 select_lpn:
2474                 best_net_healthv = net_healthv;
2475                 best_net_sel_prio = net_sel_prio;
2476                 best_lpn_healthv = lpn->lpn_healthv;
2477                 best_lpn_sel_prio = lpn_sel_prio;
2478                 best_lpn = lpn;
2479                 best_net = net;
2480
2481                 if (exit)
2482                         break;
2483         }
2484
2485         if (best_lpn) {
2486                 /* Select the best NI on the same net as best_lpn chosen
2487                  * above
2488                  */
2489                 best_ni = lnet_find_best_ni_on_spec_net(NULL, peer, best_lpn,
2490                                                         msg, md_cpt);
2491         }
2492
2493         return best_ni;
2494 }
2495
2496 static struct lnet_ni *
2497 lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt)
2498 {
2499         struct lnet_ni *best_ni = NULL;
2500         struct lnet_peer_net *peer_net = lpni->lpni_peer_net;
2501         struct lnet_peer_ni *lpni_entry;
2502
2503         /*
2504          * We must use a consistent source address when sending to a
2505          * non-MR peer. However, a non-MR peer can have multiple NIDs
2506          * on multiple networks, and we may even need to talk to this
2507          * peer on multiple networks -- certain types of
2508          * load-balancing configuration do this.
2509          *
2510          * So we need to pick the NI the peer prefers for this
2511          * particular network.
2512          */
2513         LASSERT(peer_net);
2514         list_for_each_entry(lpni_entry, &peer_net->lpn_peer_nis,
2515                             lpni_peer_nis) {
2516                 if (lpni_entry->lpni_pref_nnids == 0)
2517                         continue;
2518                 LASSERT(lpni_entry->lpni_pref_nnids == 1);
2519                 best_ni = lnet_nid2ni_locked(lpni_entry->lpni_pref.nid, cpt);
2520                 break;
2521         }
2522
2523         return best_ni;
2524 }
2525
2526 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2527 static int
2528 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2529 {
2530         struct lnet_ni *best_ni = NULL;
2531         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
2532
2533         /*
2534          * We must use a consistent source address when sending to a
2535          * non-MR peer. However, a non-MR peer can have multiple NIDs
2536          * on multiple networks, and we may even need to talk to this
2537          * peer on multiple networks -- certain types of
2538          * load-balancing configuration do this.
2539          *
2540          * So we need to pick the NI the peer prefers for this
2541          * particular network.
2542          */
2543
2544         best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2545                                                        sd->sd_cpt);
2546
2547         /* if best_ni is still not set just pick one */
2548         if (!best_ni) {
2549                 best_ni =
2550                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2551                                                 sd->sd_best_lpni->lpni_peer_net,
2552                                                 sd->sd_msg,
2553                                                 sd->sd_md_cpt);
2554                 /* If there is no best_ni we don't have a route */
2555                 if (!best_ni) {
2556                         CERROR("no path to %s from net %s\n",
2557                                 libcfs_nid2str(best_lpni->lpni_nid),
2558                                 libcfs_net2str(best_lpni->lpni_net->net_id));
2559                         return -EHOSTUNREACH;
2560                 }
2561         }
2562
2563         sd->sd_best_ni = best_ni;
2564
2565         /* Set preferred NI if necessary. */
2566         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2567
2568         return 0;
2569 }
2570
2571
2572 /*
2573  * Source not specified
2574  * Local destination
2575  * Non-MR Peer
2576  *
2577  * always use the same source NID for NMR peers
2578  * If we've talked to that peer before then we already have a preferred
2579  * source NI associated with it. Otherwise, we select a preferred local NI
2580  * and store it in the peer
2581  */
2582 static int
2583 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2584 {
2585         int rc = 0;
2586
2587         /* sd->sd_best_lpni is already set to the final destination */
2588
2589         /*
2590          * At this point we should've created the peer ni and peer. If we
2591          * can't find it, then something went wrong. Instead of assert
2592          * output a relevant message and fail the send
2593          */
2594         if (!sd->sd_best_lpni) {
2595                 CERROR("Internal fault. Unable to send msg %s to %s. "
2596                        "NID not known\n",
2597                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2598                        libcfs_nid2str(sd->sd_dst_nid));
2599                 return -EFAULT;
2600         }
2601
2602         if (sd->sd_msg->msg_routing) {
2603                 /* If I'm forwarding this message then I can choose any NI
2604                  * on the destination peer net
2605                  */
2606                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL,
2607                                                                sd->sd_peer,
2608                                                                sd->sd_best_lpni->lpni_peer_net,
2609                                                                sd->sd_msg,
2610                                                                sd->sd_md_cpt);
2611                 if (!sd->sd_best_ni) {
2612                         CERROR("Unable to forward message to %s. No local NI available\n",
2613                                libcfs_nid2str(sd->sd_dst_nid));
2614                         rc = -EHOSTUNREACH;
2615                 }
2616         } else
2617                 rc = lnet_select_preferred_best_ni(sd);
2618
2619         if (!rc)
2620                 rc = lnet_handle_send(sd);
2621
2622         return rc;
2623 }
2624
2625 static int
2626 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2627 {
2628         /*
2629          * NOTE we've already handled the remote peer case. So we only
2630          * need to worry about the local case here.
2631          *
2632          * if we're sending a response, ACK or reply, we need to send it
2633          * to the destination NID given to us. At this point we already
2634          * have the peer_ni we're suppose to send to, so just find the
2635          * best_ni on the peer net and use that. Since we're sending to an
2636          * MR peer then we can just run the selection algorithm on our
2637          * local NIs and pick the best one.
2638          */
2639         if (sd->sd_send_case & SND_RESP) {
2640                 sd->sd_best_ni =
2641                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2642                                                 sd->sd_best_lpni->lpni_peer_net,
2643                                                 sd->sd_msg,
2644                                                 sd->sd_md_cpt);
2645
2646                 if (!sd->sd_best_ni) {
2647                         /*
2648                          * We're not going to deal with not able to send
2649                          * a response to the provided final destination
2650                          */
2651                         CERROR("Can't send response to %s. "
2652                                "No local NI available\n",
2653                                 libcfs_nid2str(sd->sd_dst_nid));
2654                         return -EHOSTUNREACH;
2655                 }
2656
2657                 return lnet_handle_send(sd);
2658         }
2659
2660         /*
2661          * If we get here that means we're sending a fresh request, PUT or
2662          * GET, so we need to run our standard selection algorithm.
2663          * First find the best local interface that's on any of the peer's
2664          * networks.
2665          */
2666         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2667                                         sd->sd_md_cpt,
2668                                         sd->sd_msg,
2669                                         lnet_msg_discovery(sd->sd_msg));
2670         if (sd->sd_best_ni) {
2671                 sd->sd_best_lpni =
2672                   lnet_find_best_lpni(sd->sd_best_ni, sd->sd_dst_nid,
2673                                       sd->sd_peer,
2674                                       sd->sd_best_ni->ni_net->net_id);
2675
2676                 /*
2677                  * if we're successful in selecting a peer_ni on the local
2678                  * network, then send to it. Otherwise fall through and
2679                  * try and see if we can reach it over another routed
2680                  * network
2681                  */
2682                 if (sd->sd_best_lpni &&
2683                     sd->sd_best_lpni->lpni_nid ==
2684                     lnet_nid_to_nid4(&the_lnet.ln_loni->ni_nid)) {
2685                         /*
2686                          * in case we initially started with a routed
2687                          * destination, let's reset to local
2688                          */
2689                         sd->sd_send_case &= ~REMOTE_DST;
2690                         sd->sd_send_case |= LOCAL_DST;
2691                         return lnet_handle_lo_send(sd);
2692                 } else if (sd->sd_best_lpni) {
2693                         /*
2694                          * in case we initially started with a routed
2695                          * destination, let's reset to local
2696                          */
2697                         sd->sd_send_case &= ~REMOTE_DST;
2698                         sd->sd_send_case |= LOCAL_DST;
2699                         return lnet_handle_send(sd);
2700                 }
2701
2702                 CERROR("Internal Error. Expected to have a best_lpni: "
2703                        "%s -> %s\n",
2704                        libcfs_nid2str(sd->sd_src_nid),
2705                        libcfs_nid2str(sd->sd_dst_nid));
2706
2707                 return -EFAULT;
2708         }
2709
2710         /*
2711          * Peer doesn't have a local network. Let's see if there is
2712          * a remote network we can reach it on.
2713          */
2714         return PASS_THROUGH;
2715 }
2716
2717 /*
2718  * Case 1:
2719  *      Source NID not specified
2720  *      Local destination
2721  *      MR peer
2722  *
2723  * Case 2:
2724  *      Source NID not speified
2725  *      Remote destination
2726  *      MR peer
2727  *
2728  * In both of these cases if we're sending a response, ACK or REPLY, then
2729  * we need to send to the destination NID provided.
2730  *
2731  * In the remote case let's deal with MR routers.
2732  *
2733  */
2734
2735 static int
2736 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2737 {
2738         int rc = 0;
2739         struct lnet_peer *gw_peer = NULL;
2740         struct lnet_peer_ni *gw_lpni = NULL;
2741
2742         /*
2743          * handle sending a response to a remote peer here so we don't
2744          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2745          */
2746         if (sd->sd_send_case & REMOTE_DST &&
2747             sd->sd_send_case & SND_RESP) {
2748                 struct lnet_peer_ni *gw;
2749                 struct lnet_peer *gw_peer;
2750
2751                 rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw,
2752                                                   &gw_peer);
2753                 if (rc < 0) {
2754                         CERROR("Can't send response to %s. "
2755                                "No route available\n",
2756                                 libcfs_nid2str(sd->sd_dst_nid));
2757                         return -EHOSTUNREACH;
2758                 } else if (rc > 0) {
2759                         return rc;
2760                 }
2761
2762                 sd->sd_best_lpni = gw;
2763                 sd->sd_peer = gw_peer;
2764
2765                 return lnet_handle_send(sd);
2766         }
2767
2768         /*
2769          * Even though the NID for the peer might not be on a local network,
2770          * since the peer is MR there could be other interfaces on the
2771          * local network. In that case we'd still like to prefer the local
2772          * network over the routed network. If we're unable to do that
2773          * then we select the best router among the different routed networks,
2774          * and if the router is MR then we can deal with it as such.
2775          */
2776         rc = lnet_handle_any_mr_dsta(sd);
2777         if (rc != PASS_THROUGH)
2778                 return rc;
2779
2780         /*
2781          * Now that we must route to the destination, we must consider the
2782          * MR case, where the destination has multiple interfaces, some of
2783          * which we can route to and others we do not. For this reason we
2784          * need to select the destination which we can route to and if
2785          * there are multiple, we need to round robin.
2786          */
2787         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2788                                           &gw_peer);
2789         if (rc)
2790                 return rc;
2791
2792         sd->sd_send_case &= ~LOCAL_DST;
2793         sd->sd_send_case |= REMOTE_DST;
2794
2795         sd->sd_peer = gw_peer;
2796         sd->sd_best_lpni = gw_lpni;
2797
2798         return lnet_handle_send(sd);
2799 }
2800
2801 /*
2802  * Source not specified
2803  * Remote destination
2804  * Non-MR peer
2805  *
2806  * Must send to the specified peer NID using the same source NID that
2807  * we've used before. If it's the first time to talk to that peer then
2808  * find the source NI and assign it as preferred to that peer
2809  */
2810 static int
2811 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2812 {
2813         int rc;
2814         struct lnet_peer_ni *gw_lpni = NULL;
2815         struct lnet_peer *gw_peer = NULL;
2816
2817         /*
2818          * Let's see if we have a preferred NI to talk to this NMR peer
2819          */
2820         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2821                                                               sd->sd_cpt);
2822
2823         /*
2824          * find the router and that'll find the best NI if we didn't find
2825          * it already.
2826          */
2827         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2828                                           &gw_peer);
2829         if (rc)
2830                 return rc;
2831
2832         /*
2833          * set the best_ni we've chosen as the preferred one for
2834          * this peer
2835          */
2836         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2837
2838         /* we'll be sending to the gw */
2839         sd->sd_best_lpni = gw_lpni;
2840         sd->sd_peer = gw_peer;
2841
2842         return lnet_handle_send(sd);
2843 }
2844
2845 static int
2846 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2847 {
2848         /*
2849          * turn off the SND_RESP bit.
2850          * It will be checked in the case handling
2851          */
2852         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2853
2854         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2855                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2856                 (send_case & SRC_SPEC) ? libcfs_nid2str(sd->sd_src_nid) : "",
2857                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2858                 libcfs_nid2str(sd->sd_dst_nid),
2859                 (send_case & LOCAL_DST) ? "local" : "routed");
2860
2861         switch (send_case) {
2862         /*
2863          * For all cases where the source is specified, we should always
2864          * use the destination NID, whether it's an MR destination or not,
2865          * since we're continuing a series of related messages for the
2866          * same RPC
2867          */
2868         case SRC_SPEC_LOCAL_NMR_DST:
2869                 return lnet_handle_spec_local_nmr_dst(sd);
2870         case SRC_SPEC_LOCAL_MR_DST:
2871                 return lnet_handle_spec_local_mr_dst(sd);
2872         case SRC_SPEC_ROUTER_NMR_DST:
2873         case SRC_SPEC_ROUTER_MR_DST:
2874                 return lnet_handle_spec_router_dst(sd);
2875         case SRC_ANY_LOCAL_NMR_DST:
2876                 return lnet_handle_any_local_nmr_dst(sd);
2877         case SRC_ANY_LOCAL_MR_DST:
2878         case SRC_ANY_ROUTER_MR_DST:
2879                 return lnet_handle_any_mr_dst(sd);
2880         case SRC_ANY_ROUTER_NMR_DST:
2881                 return lnet_handle_any_router_nmr_dst(sd);
2882         default:
2883                 CERROR("Unknown send case\n");
2884                 return -1;
2885         }
2886 }
2887
2888 static int
2889 lnet_select_pathway(lnet_nid_t src_nid, lnet_nid_t dst_nid,
2890                     struct lnet_msg *msg, lnet_nid_t rtr_nid)
2891 {
2892         struct lnet_peer_ni *lpni;
2893         struct lnet_peer *peer;
2894         struct lnet_send_data send_data;
2895         int cpt, rc;
2896         int md_cpt;
2897         __u32 send_case = 0;
2898         bool final_hop;
2899         bool mr_forwarding_allowed;
2900
2901         memset(&send_data, 0, sizeof(send_data));
2902
2903         /*
2904          * get an initial CPT to use for locking. The idea here is not to
2905          * serialize the calls to select_pathway, so that as many
2906          * operations can run concurrently as possible. To do that we use
2907          * the CPT where this call is being executed. Later on when we
2908          * determine the CPT to use in lnet_message_commit, we switch the
2909          * lock and check if there was any configuration change.  If none,
2910          * then we proceed, if there is, then we restart the operation.
2911          */
2912         cpt = lnet_net_lock_current();
2913
2914         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
2915         if (md_cpt == CFS_CPT_ANY)
2916                 md_cpt = cpt;
2917
2918 again:
2919
2920         /*
2921          * If we're being asked to send to the loopback interface, there
2922          * is no need to go through any selection. We can just shortcut
2923          * the entire process and send over lolnd
2924          */
2925         send_data.sd_msg = msg;
2926         send_data.sd_cpt = cpt;
2927         if (dst_nid == LNET_NID_LO_0) {
2928                 rc = lnet_handle_lo_send(&send_data);
2929                 lnet_net_unlock(cpt);
2930                 return rc;
2931         }
2932
2933         /*
2934          * find an existing peer_ni, or create one and mark it as having been
2935          * created due to network traffic. This call will create the
2936          * peer->peer_net->peer_ni tree.
2937          */
2938         lpni = lnet_nid2peerni_locked(dst_nid, LNET_NID_ANY, cpt);
2939         if (IS_ERR(lpni)) {
2940                 lnet_net_unlock(cpt);
2941                 return PTR_ERR(lpni);
2942         }
2943
2944         /*
2945          * Cache the original src_nid and rtr_nid. If we need to resend the
2946          * message then we'll need to know whether the src_nid was originally
2947          * specified for this message. If it was originally specified,
2948          * then we need to keep using the same src_nid since it's
2949          * continuing the same sequence of messages. Similarly, rtr_nid will
2950          * affect our choice of next hop.
2951          */
2952         msg->msg_src_nid_param = src_nid;
2953         msg->msg_rtr_nid_param = rtr_nid;
2954
2955         /*
2956          * If necessary, perform discovery on the peer that owns this peer_ni.
2957          * Note, this can result in the ownership of this peer_ni changing
2958          * to another peer object.
2959          */
2960         rc = lnet_initiate_peer_discovery(lpni, msg, cpt);
2961         if (rc) {
2962                 lnet_peer_ni_decref_locked(lpni);
2963                 lnet_net_unlock(cpt);
2964                 return rc;
2965         }
2966         lnet_peer_ni_decref_locked(lpni);
2967
2968         peer = lpni->lpni_peer_net->lpn_peer;
2969
2970         /*
2971          * Identify the different send cases
2972          */
2973         if (src_nid == LNET_NID_ANY)
2974                 send_case |= SRC_ANY;
2975         else
2976                 send_case |= SRC_SPEC;
2977
2978         if (lnet_get_net_locked(LNET_NIDNET(dst_nid)))
2979                 send_case |= LOCAL_DST;
2980         else
2981                 send_case |= REMOTE_DST;
2982
2983         final_hop = false;
2984         if (msg->msg_routing && (send_case & LOCAL_DST))
2985                 final_hop = true;
2986
2987         /* Determine whether to allow MR forwarding for this message.
2988          * NB: MR forwarding is allowed if the message originator and the
2989          * destination are both MR capable, and the destination lpni that was
2990          * originally chosen by the originator is unhealthy or down.
2991          * We check the MR capability of the destination further below
2992          */
2993         mr_forwarding_allowed = false;
2994         if (final_hop) {
2995                 struct lnet_peer *src_lp;
2996                 struct lnet_peer_ni *src_lpni;
2997
2998                 src_lpni = lnet_nid2peerni_locked(msg->msg_hdr.src_nid,
2999                                                   LNET_NID_ANY, cpt);
3000                 /* We don't fail the send if we hit any errors here. We'll just
3001                  * try to send it via non-multi-rail criteria
3002                  */
3003                 if (!IS_ERR(src_lpni)) {
3004                         /* Drop ref taken by lnet_nid2peerni_locked() */
3005                         lnet_peer_ni_decref_locked(src_lpni);
3006                         src_lp = lpni->lpni_peer_net->lpn_peer;
3007                         if (lnet_peer_is_multi_rail(src_lp) &&
3008                             !lnet_is_peer_ni_alive(lpni))
3009                                 mr_forwarding_allowed = true;
3010
3011                 }
3012                 CDEBUG(D_NET, "msg %p MR forwarding %s\n", msg,
3013                        mr_forwarding_allowed ? "allowed" : "not allowed");
3014         }
3015
3016         /*
3017          * Deal with the peer as NMR in the following cases:
3018          * 1. the peer is NMR
3019          * 2. We're trying to recover a specific peer NI
3020          * 3. I'm a router sending to the final destination and MR forwarding is
3021          *    not allowed for this message (as determined above).
3022          *    In this case the source of the message would've
3023          *    already selected the final destination so my job
3024          *    is to honor the selection.
3025          */
3026         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery ||
3027             (final_hop && !mr_forwarding_allowed))
3028                 send_case |= NMR_DST;
3029         else
3030                 send_case |= MR_DST;
3031
3032         if (lnet_msg_is_response(msg))
3033                 send_case |= SND_RESP;
3034
3035         /* assign parameters to the send_data */
3036         send_data.sd_rtr_nid = rtr_nid;
3037         send_data.sd_src_nid = src_nid;
3038         send_data.sd_dst_nid = dst_nid;
3039         send_data.sd_best_lpni = lpni;
3040         /*
3041          * keep a pointer to the final destination in case we're going to
3042          * route, so we'll need to access it later
3043          */
3044         send_data.sd_final_dst_lpni = lpni;
3045         send_data.sd_peer = peer;
3046         send_data.sd_md_cpt = md_cpt;
3047         send_data.sd_send_case = send_case;
3048
3049         rc = lnet_handle_send_case_locked(&send_data);
3050
3051         /*
3052          * Update the local cpt since send_data.sd_cpt might've been
3053          * updated as a result of calling lnet_handle_send_case_locked().
3054          */
3055         cpt = send_data.sd_cpt;
3056
3057         if (rc == REPEAT_SEND)
3058                 goto again;
3059
3060         lnet_net_unlock(cpt);
3061
3062         return rc;
3063 }
3064
3065 int
3066 lnet_send(lnet_nid_t src_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid)
3067 {
3068         lnet_nid_t              dst_nid = msg->msg_target.nid;
3069         int                     rc;
3070
3071         /*
3072          * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
3073          * but we might want to use pre-determined router for ACK/REPLY
3074          * in the future
3075          */
3076         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
3077         LASSERT(msg->msg_txpeer == NULL);
3078         LASSERT(msg->msg_txni == NULL);
3079         LASSERT(!msg->msg_sending);
3080         LASSERT(!msg->msg_target_is_router);
3081         LASSERT(!msg->msg_receiving);
3082
3083         msg->msg_sending = 1;
3084
3085         LASSERT(!msg->msg_tx_committed);
3086
3087         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
3088         if (rc < 0) {
3089                 if (rc == -EHOSTUNREACH)
3090                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
3091                 else
3092                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
3093                 return rc;
3094         }
3095
3096         if (rc == LNET_CREDIT_OK)
3097                 lnet_ni_send(msg->msg_txni, msg);
3098
3099         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
3100         return 0;
3101 }
3102
3103 enum lnet_mt_event_type {
3104         MT_TYPE_LOCAL_NI = 0,
3105         MT_TYPE_PEER_NI
3106 };
3107
3108 struct lnet_mt_event_info {
3109         enum lnet_mt_event_type mt_type;
3110         lnet_nid_t mt_nid;
3111 };
3112
3113 /* called with res_lock held */
3114 void
3115 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
3116 {
3117         struct lnet_rsp_tracker *rspt;
3118
3119         /*
3120          * msg has a refcount on the MD so the MD is not going away.
3121          * The rspt queue for the cpt is protected by
3122          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
3123          */
3124         if (!md->md_rspt_ptr)
3125                 return;
3126
3127         rspt = md->md_rspt_ptr;
3128
3129         /* debug code */
3130         LASSERT(rspt->rspt_cpt == cpt);
3131
3132         md->md_rspt_ptr = NULL;
3133
3134         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3135                 /*
3136                  * The monitor thread has invalidated this handle because the
3137                  * response timed out, but it failed to lookup the MD. That
3138                  * means this response tracker is on the zombie list. We can
3139                  * safely remove it under the resource lock (held by caller) and
3140                  * free the response tracker block.
3141                  */
3142                 list_del(&rspt->rspt_on_list);
3143                 lnet_rspt_free(rspt, cpt);
3144         } else {
3145                 /*
3146                  * invalidate the handle to indicate that a response has been
3147                  * received, which will then lead the monitor thread to clean up
3148                  * the rspt block.
3149                  */
3150                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
3151         }
3152 }
3153
3154 void
3155 lnet_clean_zombie_rstqs(void)
3156 {
3157         struct lnet_rsp_tracker *rspt, *tmp;
3158         int i;
3159
3160         cfs_cpt_for_each(i, lnet_cpt_table()) {
3161                 list_for_each_entry_safe(rspt, tmp,
3162                                          the_lnet.ln_mt_zombie_rstqs[i],
3163                                          rspt_on_list) {
3164                         list_del(&rspt->rspt_on_list);
3165                         lnet_rspt_free(rspt, i);
3166                 }
3167         }
3168
3169         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
3170 }
3171
3172 static void
3173 lnet_finalize_expired_responses(void)
3174 {
3175         struct lnet_libmd *md;
3176         struct lnet_rsp_tracker *rspt, *tmp;
3177         ktime_t now;
3178         int i;
3179
3180         if (the_lnet.ln_mt_rstq == NULL)
3181                 return;
3182
3183         cfs_cpt_for_each(i, lnet_cpt_table()) {
3184                 LIST_HEAD(local_queue);
3185
3186                 lnet_net_lock(i);
3187                 if (!the_lnet.ln_mt_rstq[i]) {
3188                         lnet_net_unlock(i);
3189                         continue;
3190                 }
3191                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
3192                 lnet_net_unlock(i);
3193
3194                 now = ktime_get();
3195
3196                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
3197                         /*
3198                          * The rspt mdh will be invalidated when a response
3199                          * is received or whenever we want to discard the
3200                          * block the monitor thread will walk the queue
3201                          * and clean up any rsts with an invalid mdh.
3202                          * The monitor thread will walk the queue until
3203                          * the first unexpired rspt block. This means that
3204                          * some rspt blocks which received their
3205                          * corresponding responses will linger in the
3206                          * queue until they are cleaned up eventually.
3207                          */
3208                         lnet_res_lock(i);
3209                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3210                                 lnet_res_unlock(i);
3211                                 list_del(&rspt->rspt_on_list);
3212                                 lnet_rspt_free(rspt, i);
3213                                 continue;
3214                         }
3215
3216                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
3217                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
3218                                 struct lnet_peer_ni *lpni;
3219                                 lnet_nid_t nid;
3220
3221                                 md = lnet_handle2md(&rspt->rspt_mdh);
3222                                 if (!md) {
3223                                         /* MD has been queued for unlink, but
3224                                          * rspt hasn't been detached (Note we've
3225                                          * checked above that the rspt_mdh is
3226                                          * valid). Since we cannot lookup the MD
3227                                          * we're unable to detach the rspt
3228                                          * ourselves. Thus, move the rspt to the
3229                                          * zombie list where we'll wait for
3230                                          * either:
3231                                          *   1. The remaining operations on the
3232                                          *   MD to complete. In this case the
3233                                          *   final operation will result in
3234                                          *   lnet_msg_detach_md()->
3235                                          *   lnet_detach_rsp_tracker() where
3236                                          *   we will clean up this response
3237                                          *   tracker.
3238                                          *   2. LNet to shutdown. In this case
3239                                          *   we'll wait until after all LND Nets
3240                                          *   have shutdown and then we can
3241                                          *   safely free any remaining response
3242                                          *   tracker blocks on the zombie list.
3243                                          * Note: We need to hold the resource
3244                                          * lock when adding to the zombie list
3245                                          * because we may have concurrent access
3246                                          * with lnet_detach_rsp_tracker().
3247                                          */
3248                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
3249                                         list_move(&rspt->rspt_on_list,
3250                                                   the_lnet.ln_mt_zombie_rstqs[i]);
3251                                         lnet_res_unlock(i);
3252                                         continue;
3253                                 }
3254                                 LASSERT(md->md_rspt_ptr == rspt);
3255                                 md->md_rspt_ptr = NULL;
3256                                 lnet_res_unlock(i);
3257
3258                                 LNetMDUnlink(rspt->rspt_mdh);
3259
3260                                 nid = rspt->rspt_next_hop_nid;
3261
3262                                 list_del(&rspt->rspt_on_list);
3263                                 lnet_rspt_free(rspt, i);
3264
3265                                 /* If we're shutting down we just want to clean
3266                                  * up the rspt blocks
3267                                  */
3268                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3269                                         continue;
3270
3271                                 lnet_net_lock(i);
3272                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
3273                                 lnet_net_unlock(i);
3274
3275                                 CDEBUG(D_NET,
3276                                        "Response timeout: md = %p: nid = %s\n",
3277                                        md, libcfs_nid2str(nid));
3278
3279                                 /*
3280                                  * If there is a timeout on the response
3281                                  * from the next hop decrement its health
3282                                  * value so that we don't use it
3283                                  */
3284                                 lnet_net_lock(0);
3285                                 lpni = lnet_find_peer_ni_locked(nid);
3286                                 if (lpni) {
3287                                         lnet_handle_remote_failure_locked(lpni);
3288                                         lnet_peer_ni_decref_locked(lpni);
3289                                 }
3290                                 lnet_net_unlock(0);
3291                         } else {
3292                                 lnet_res_unlock(i);
3293                                 break;
3294                         }
3295                 }
3296
3297                 if (!list_empty(&local_queue)) {
3298                         lnet_net_lock(i);
3299                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
3300                         lnet_net_unlock(i);
3301                 }
3302         }
3303 }
3304
3305 static void
3306 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
3307 {
3308         struct lnet_msg *msg;
3309
3310         while (!list_empty(resendq)) {
3311                 struct lnet_peer_ni *lpni;
3312
3313                 msg = list_entry(resendq->next, struct lnet_msg,
3314                                  msg_list);
3315
3316                 list_del_init(&msg->msg_list);
3317
3318                 lpni = lnet_find_peer_ni_locked(msg->msg_hdr.dest_nid);
3319                 if (!lpni) {
3320                         lnet_net_unlock(cpt);
3321                         CERROR("Expected that a peer is already created for %s\n",
3322                                libcfs_nid2str(msg->msg_hdr.dest_nid));
3323                         msg->msg_no_resend = true;
3324                         lnet_finalize(msg, -EFAULT);
3325                         lnet_net_lock(cpt);
3326                 } else {
3327                         int rc;
3328
3329                         lnet_peer_ni_decref_locked(lpni);
3330
3331                         lnet_net_unlock(cpt);
3332                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3333                                libcfs_nid2str(msg->msg_src_nid_param),
3334                                libcfs_id2str(msg->msg_target),
3335                                lnet_msgtyp2str(msg->msg_type),
3336                                msg->msg_recovery,
3337                                msg->msg_retry_count);
3338                         rc = lnet_send(msg->msg_src_nid_param, msg,
3339                                        msg->msg_rtr_nid_param);
3340                         if (rc) {
3341                                 CERROR("Error sending %s to %s: %d\n",
3342                                        lnet_msgtyp2str(msg->msg_type),
3343                                        libcfs_id2str(msg->msg_target), rc);
3344                                 msg->msg_no_resend = true;
3345                                 lnet_finalize(msg, rc);
3346                         }
3347                         lnet_net_lock(cpt);
3348                         if (!rc)
3349                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3350                 }
3351         }
3352 }
3353
3354 static void
3355 lnet_resend_pending_msgs(void)
3356 {
3357         int i;
3358
3359         cfs_cpt_for_each(i, lnet_cpt_table()) {
3360                 lnet_net_lock(i);
3361                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3362                 lnet_net_unlock(i);
3363         }
3364 }
3365
3366 /* called with cpt and ni_lock held */
3367 static void
3368 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3369 {
3370         struct lnet_handle_md recovery_mdh;
3371
3372         LNetInvalidateMDHandle(&recovery_mdh);
3373
3374         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3375             force) {
3376                 recovery_mdh = ni->ni_ping_mdh;
3377                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3378         }
3379         lnet_ni_unlock(ni);
3380         lnet_net_unlock(cpt);
3381         if (!LNetMDHandleIsInvalid(recovery_mdh))
3382                 LNetMDUnlink(recovery_mdh);
3383         lnet_net_lock(cpt);
3384         lnet_ni_lock(ni);
3385 }
3386
3387 static void
3388 lnet_recover_local_nis(void)
3389 {
3390         struct lnet_mt_event_info *ev_info;
3391         LIST_HEAD(processed_list);
3392         LIST_HEAD(local_queue);
3393         struct lnet_handle_md mdh;
3394         struct lnet_ni *tmp;
3395         struct lnet_ni *ni;
3396         lnet_nid_t nid;
3397         int healthv;
3398         int rc;
3399         time64_t now;
3400
3401         /*
3402          * splice the recovery queue on a local queue. We will iterate
3403          * through the local queue and update it as needed. Once we're
3404          * done with the traversal, we'll splice the local queue back on
3405          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3406          * will be traversed in the next iteration.
3407          */
3408         lnet_net_lock(0);
3409         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3410                          &local_queue);
3411         lnet_net_unlock(0);
3412
3413         now = ktime_get_seconds();
3414
3415         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3416                 /*
3417                  * if an NI is being deleted or it is now healthy, there
3418                  * is no need to keep it around in the recovery queue.
3419                  * The monitor thread is the only thread responsible for
3420                  * removing the NI from the recovery queue.
3421                  * Multiple threads can be adding NIs to the recovery
3422                  * queue.
3423                  */
3424                 healthv = atomic_read(&ni->ni_healthv);
3425
3426                 lnet_net_lock(0);
3427                 lnet_ni_lock(ni);
3428                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3429                     healthv == LNET_MAX_HEALTH_VALUE) {
3430                         list_del_init(&ni->ni_recovery);
3431                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3432                         lnet_ni_unlock(ni);
3433                         lnet_ni_decref_locked(ni, 0);
3434                         lnet_net_unlock(0);
3435                         continue;
3436                 }
3437
3438                 /*
3439                  * if the local NI failed recovery we must unlink the md.
3440                  * But we want to keep the local_ni on the recovery queue
3441                  * so we can continue the attempts to recover it.
3442                  */
3443                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3444                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3445                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3446                 }
3447
3448
3449                 lnet_ni_unlock(ni);
3450
3451                 if (now < ni->ni_next_ping) {
3452                         lnet_net_unlock(0);
3453                         continue;
3454                 }
3455
3456                 lnet_net_unlock(0);
3457
3458                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3459                        libcfs_nidstr(&ni->ni_nid));
3460
3461                 lnet_ni_lock(ni);
3462                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3463                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3464                         lnet_ni_unlock(ni);
3465
3466                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3467                         if (!ev_info) {
3468                                 CERROR("out of memory. Can't recover %s\n",
3469                                        libcfs_nidstr(&ni->ni_nid));
3470                                 lnet_ni_lock(ni);
3471                                 ni->ni_recovery_state &=
3472                                   ~LNET_NI_RECOVERY_PENDING;
3473                                 lnet_ni_unlock(ni);
3474                                 continue;
3475                         }
3476
3477                         mdh = ni->ni_ping_mdh;
3478                         /*
3479                          * Invalidate the ni mdh in case it's deleted.
3480                          * We'll unlink the mdh in this case below.
3481                          */
3482                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3483                         /* FIXME need to handle large-addr nid */
3484                         nid = lnet_nid_to_nid4(&ni->ni_nid);
3485
3486                         /*
3487                          * remove the NI from the local queue and drop the
3488                          * reference count to it while we're recovering
3489                          * it. The reason for that, is that the NI could
3490                          * be deleted, and the way the code is structured
3491                          * is if we don't drop the NI, then the deletion
3492                          * code will enter a loop waiting for the
3493                          * reference count to be removed while holding the
3494                          * ln_mutex_lock(). When we look up the peer to
3495                          * send to in lnet_select_pathway() we will try to
3496                          * lock the ln_mutex_lock() as well, leading to
3497                          * a deadlock. By dropping the refcount and
3498                          * removing it from the list, we allow for the NI
3499                          * to be removed, then we use the cached NID to
3500                          * look it up again. If it's gone, then we just
3501                          * continue examining the rest of the queue.
3502                          */
3503                         lnet_net_lock(0);
3504                         list_del_init(&ni->ni_recovery);
3505                         lnet_ni_decref_locked(ni, 0);
3506                         lnet_net_unlock(0);
3507
3508                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3509                         ev_info->mt_nid = nid;
3510                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3511                                             ev_info, the_lnet.ln_mt_handler,
3512                                             true);
3513                         /* lookup the nid again */
3514                         lnet_net_lock(0);
3515                         ni = lnet_nid2ni_locked(nid, 0);
3516                         if (!ni) {
3517                                 /*
3518                                  * the NI has been deleted when we dropped
3519                                  * the ref count
3520                                  */
3521                                 lnet_net_unlock(0);
3522                                 LNetMDUnlink(mdh);
3523                                 continue;
3524                         }
3525                         ni->ni_ping_count++;
3526
3527                         ni->ni_ping_mdh = mdh;
3528                         lnet_ni_add_to_recoveryq_locked(ni, &processed_list,
3529                                                         now);
3530
3531                         if (rc) {
3532                                 lnet_ni_lock(ni);
3533                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3534                                 lnet_ni_unlock(ni);
3535                         }
3536                         lnet_net_unlock(0);
3537                 } else
3538                         lnet_ni_unlock(ni);
3539         }
3540
3541         /*
3542          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3543          * reexamined in the next iteration.
3544          */
3545         list_splice_init(&processed_list, &local_queue);
3546         lnet_net_lock(0);
3547         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3548         lnet_net_unlock(0);
3549 }
3550
3551 static int
3552 lnet_resendqs_create(void)
3553 {
3554         struct list_head **resendqs;
3555         resendqs = lnet_create_array_of_queues();
3556
3557         if (!resendqs)
3558                 return -ENOMEM;
3559
3560         lnet_net_lock(LNET_LOCK_EX);
3561         the_lnet.ln_mt_resendqs = resendqs;
3562         lnet_net_unlock(LNET_LOCK_EX);
3563
3564         return 0;
3565 }
3566
3567 static void
3568 lnet_clean_local_ni_recoveryq(void)
3569 {
3570         struct lnet_ni *ni;
3571
3572         /* This is only called when the monitor thread has stopped */
3573         lnet_net_lock(0);
3574
3575         while (!list_empty(&the_lnet.ln_mt_localNIRecovq)) {
3576                 ni = list_entry(the_lnet.ln_mt_localNIRecovq.next,
3577                                 struct lnet_ni, ni_recovery);
3578                 list_del_init(&ni->ni_recovery);
3579                 lnet_ni_lock(ni);
3580                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3581                 lnet_ni_unlock(ni);
3582                 lnet_ni_decref_locked(ni, 0);
3583         }
3584
3585         lnet_net_unlock(0);
3586 }
3587
3588 static void
3589 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3590                                      bool force)
3591 {
3592         struct lnet_handle_md recovery_mdh;
3593
3594         LNetInvalidateMDHandle(&recovery_mdh);
3595
3596         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3597                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3598                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3599         }
3600         spin_unlock(&lpni->lpni_lock);
3601         lnet_net_unlock(cpt);
3602         if (!LNetMDHandleIsInvalid(recovery_mdh))
3603                 LNetMDUnlink(recovery_mdh);
3604         lnet_net_lock(cpt);
3605         spin_lock(&lpni->lpni_lock);
3606 }
3607
3608 static void
3609 lnet_clean_peer_ni_recoveryq(void)
3610 {
3611         struct lnet_peer_ni *lpni, *tmp;
3612
3613         lnet_net_lock(LNET_LOCK_EX);
3614
3615         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3616                                  lpni_recovery) {
3617                 list_del_init(&lpni->lpni_recovery);
3618                 spin_lock(&lpni->lpni_lock);
3619                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3620                 spin_unlock(&lpni->lpni_lock);
3621                 lnet_peer_ni_decref_locked(lpni);
3622         }
3623
3624         lnet_net_unlock(LNET_LOCK_EX);
3625 }
3626
3627 static void
3628 lnet_clean_resendqs(void)
3629 {
3630         struct lnet_msg *msg, *tmp;
3631         LIST_HEAD(msgs);
3632         int i;
3633
3634         cfs_cpt_for_each(i, lnet_cpt_table()) {
3635                 lnet_net_lock(i);
3636                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3637                 lnet_net_unlock(i);
3638                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3639                         list_del_init(&msg->msg_list);
3640                         msg->msg_no_resend = true;
3641                         lnet_finalize(msg, -ESHUTDOWN);
3642                 }
3643         }
3644
3645         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3646 }
3647
3648 static void
3649 lnet_recover_peer_nis(void)
3650 {
3651         struct lnet_mt_event_info *ev_info;
3652         LIST_HEAD(processed_list);
3653         LIST_HEAD(local_queue);
3654         struct lnet_handle_md mdh;
3655         struct lnet_peer_ni *lpni;
3656         struct lnet_peer_ni *tmp;
3657         lnet_nid_t nid;
3658         int healthv;
3659         int rc;
3660         time64_t now;
3661
3662         /*
3663          * Always use cpt 0 for locking across all interactions with
3664          * ln_mt_peerNIRecovq
3665          */
3666         lnet_net_lock(0);
3667         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3668                          &local_queue);
3669         lnet_net_unlock(0);
3670
3671         now = ktime_get_seconds();
3672
3673         list_for_each_entry_safe(lpni, tmp, &local_queue,
3674                                  lpni_recovery) {
3675                 /*
3676                  * The same protection strategy is used here as is in the
3677                  * local recovery case.
3678                  */
3679                 lnet_net_lock(0);
3680                 healthv = atomic_read(&lpni->lpni_healthv);
3681                 spin_lock(&lpni->lpni_lock);
3682                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3683                     healthv == LNET_MAX_HEALTH_VALUE) {
3684                         list_del_init(&lpni->lpni_recovery);
3685                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3686                         spin_unlock(&lpni->lpni_lock);
3687                         lnet_peer_ni_decref_locked(lpni);
3688                         lnet_net_unlock(0);
3689                         continue;
3690                 }
3691
3692                 /*
3693                  * If the peer NI has failed recovery we must unlink the
3694                  * md. But we want to keep the peer ni on the recovery
3695                  * queue so we can try to continue recovering it
3696                  */
3697                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3698                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3699                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3700                 }
3701
3702                 spin_unlock(&lpni->lpni_lock);
3703
3704                 if (now < lpni->lpni_next_ping) {
3705                         lnet_net_unlock(0);
3706                         continue;
3707                 }
3708
3709                 lnet_net_unlock(0);
3710
3711                 /*
3712                  * NOTE: we're racing with peer deletion from user space.
3713                  * It's possible that a peer is deleted after we check its
3714                  * state. In this case the recovery can create a new peer
3715                  */
3716                 spin_lock(&lpni->lpni_lock);
3717                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3718                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3719                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3720                         spin_unlock(&lpni->lpni_lock);
3721
3722                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3723                         if (!ev_info) {
3724                                 CERROR("out of memory. Can't recover %s\n",
3725                                        libcfs_nid2str(lpni->lpni_nid));
3726                                 spin_lock(&lpni->lpni_lock);
3727                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3728                                 spin_unlock(&lpni->lpni_lock);
3729                                 continue;
3730                         }
3731
3732                         /* look at the comments in lnet_recover_local_nis() */
3733                         mdh = lpni->lpni_recovery_ping_mdh;
3734                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3735                         nid = lpni->lpni_nid;
3736                         lnet_net_lock(0);
3737                         list_del_init(&lpni->lpni_recovery);
3738                         lnet_peer_ni_decref_locked(lpni);
3739                         lnet_net_unlock(0);
3740
3741                         ev_info->mt_type = MT_TYPE_PEER_NI;
3742                         ev_info->mt_nid = nid;
3743                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3744                                             ev_info, the_lnet.ln_mt_handler,
3745                                             true);
3746                         lnet_net_lock(0);
3747                         /*
3748                          * lnet_find_peer_ni_locked() grabs a refcount for
3749                          * us. No need to take it explicitly.
3750                          */
3751                         lpni = lnet_find_peer_ni_locked(nid);
3752                         if (!lpni) {
3753                                 lnet_net_unlock(0);
3754                                 LNetMDUnlink(mdh);
3755                                 continue;
3756                         }
3757
3758                         lpni->lpni_ping_count++;
3759
3760                         lpni->lpni_recovery_ping_mdh = mdh;
3761
3762                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
3763                                                              &processed_list,
3764                                                              now);
3765                         if (rc) {
3766                                 spin_lock(&lpni->lpni_lock);
3767                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3768                                 spin_unlock(&lpni->lpni_lock);
3769                         }
3770
3771                         /* Drop the ref taken by lnet_find_peer_ni_locked() */
3772                         lnet_peer_ni_decref_locked(lpni);
3773                         lnet_net_unlock(0);
3774                 } else
3775                         spin_unlock(&lpni->lpni_lock);
3776         }
3777
3778         list_splice_init(&processed_list, &local_queue);
3779         lnet_net_lock(0);
3780         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3781         lnet_net_unlock(0);
3782 }
3783
3784 static int
3785 lnet_monitor_thread(void *arg)
3786 {
3787         time64_t rsp_timeout = 0;
3788         time64_t now;
3789
3790         wait_for_completion(&the_lnet.ln_started);
3791         /*
3792          * The monitor thread takes care of the following:
3793          *  1. Checks the aliveness of routers
3794          *  2. Checks if there are messages on the resend queue to resend
3795          *     them.
3796          *  3. Check if there are any NIs on the local recovery queue and
3797          *     pings them
3798          *  4. Checks if there are any NIs on the remote recovery queue
3799          *     and pings them.
3800          */
3801         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
3802                 now = ktime_get_real_seconds();
3803
3804                 if (lnet_router_checker_active())
3805                         lnet_check_routers();
3806
3807                 lnet_resend_pending_msgs();
3808
3809                 if (now >= rsp_timeout) {
3810                         lnet_finalize_expired_responses();
3811                         rsp_timeout = now + (lnet_transaction_timeout / 2);
3812                 }
3813
3814                 lnet_recover_local_nis();
3815                 lnet_recover_peer_nis();
3816
3817                 /*
3818                  * TODO do we need to check if we should sleep without
3819                  * timeout?  Technically, an active system will always
3820                  * have messages in flight so this check will always
3821                  * evaluate to false. And on an idle system do we care
3822                  * if we wake up every 1 second? Although, we've seen
3823                  * cases where we get a complaint that an idle thread
3824                  * is waking up unnecessarily.
3825                  */
3826                 wait_for_completion_interruptible_timeout(
3827                         &the_lnet.ln_mt_wait_complete,
3828                         cfs_time_seconds(1));
3829                 /* Must re-init the completion before testing anything,
3830                  * including ln_mt_state.
3831                  */
3832                 reinit_completion(&the_lnet.ln_mt_wait_complete);
3833         }
3834
3835         /* Shutting down */
3836         lnet_net_lock(LNET_LOCK_EX);
3837         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3838         lnet_net_unlock(LNET_LOCK_EX);
3839
3840         /* signal that the monitor thread is exiting */
3841         up(&the_lnet.ln_mt_signal);
3842
3843         return 0;
3844 }
3845
3846 /*
3847  * lnet_send_ping
3848  * Sends a ping.
3849  * Returns == 0 if success
3850  * Returns > 0 if LNetMDBind or prior fails
3851  * Returns < 0 if LNetGet fails
3852  */
3853 int
3854 lnet_send_ping(lnet_nid_t dest_nid,
3855                struct lnet_handle_md *mdh, int nnis,
3856                void *user_data, lnet_handler_t handler, bool recovery)
3857 {
3858         struct lnet_md md = { NULL };
3859         struct lnet_process_id id;
3860         struct lnet_ping_buffer *pbuf;
3861         int rc;
3862
3863         if (dest_nid == LNET_NID_ANY) {
3864                 rc = -EHOSTUNREACH;
3865                 goto fail_error;
3866         }
3867
3868         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
3869         if (!pbuf) {
3870                 rc = ENOMEM;
3871                 goto fail_error;
3872         }
3873
3874         /* initialize md content */
3875         md.start     = &pbuf->pb_info;
3876         md.length    = LNET_PING_INFO_SIZE(nnis);
3877         md.threshold = 2; /* GET/REPLY */
3878         md.max_size  = 0;
3879         md.options   = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE;
3880         md.user_ptr  = user_data;
3881         md.handler   = handler;
3882
3883         rc = LNetMDBind(&md, LNET_UNLINK, mdh);
3884         if (rc) {
3885                 lnet_ping_buffer_decref(pbuf);
3886                 CERROR("Can't bind MD: %d\n", rc);
3887                 rc = -rc; /* change the rc to positive */
3888                 goto fail_error;
3889         }
3890         id.pid = LNET_PID_LUSTRE;
3891         id.nid = dest_nid;
3892
3893         rc = LNetGet(LNET_NID_ANY, *mdh, id,
3894                      LNET_RESERVED_PORTAL,
3895                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
3896
3897         if (rc)
3898                 goto fail_unlink_md;
3899
3900         return 0;
3901
3902 fail_unlink_md:
3903         LNetMDUnlink(*mdh);
3904         LNetInvalidateMDHandle(mdh);
3905 fail_error:
3906         return rc;
3907 }
3908
3909 static void
3910 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
3911                            int status, bool send, bool unlink_event)
3912 {
3913         lnet_nid_t nid = ev_info->mt_nid;
3914
3915         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
3916                 struct lnet_ni *ni;
3917
3918                 lnet_net_lock(0);
3919                 ni = lnet_nid2ni_locked(nid, 0);
3920                 if (!ni) {
3921                         lnet_net_unlock(0);
3922                         return;
3923                 }
3924                 lnet_ni_lock(ni);
3925                 if (!send || (send && status != 0))
3926                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3927                 if (status)
3928                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
3929                 lnet_ni_unlock(ni);
3930                 lnet_net_unlock(0);
3931
3932                 if (status != 0) {
3933                         CERROR("local NI (%s) recovery failed with %d\n",
3934                                libcfs_nid2str(nid), status);
3935                         return;
3936                 }
3937                 /*
3938                  * need to increment healthv for the ni here, because in
3939                  * the lnet_finalize() path we don't have access to this
3940                  * NI. And in order to get access to it, we'll need to
3941                  * carry forward too much information.
3942                  * In the peer case, it'll naturally be incremented
3943                  */
3944                 if (!unlink_event)
3945                         lnet_inc_healthv(&ni->ni_healthv,
3946                                          lnet_health_sensitivity);
3947         } else {
3948                 struct lnet_peer_ni *lpni;
3949                 int cpt;
3950
3951                 cpt = lnet_net_lock_current();
3952                 lpni = lnet_find_peer_ni_locked(nid);
3953                 if (!lpni) {
3954                         lnet_net_unlock(cpt);
3955                         return;
3956                 }
3957                 spin_lock(&lpni->lpni_lock);
3958                 if (!send || (send && status != 0))
3959                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3960                 if (status)
3961                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
3962                 spin_unlock(&lpni->lpni_lock);
3963                 lnet_peer_ni_decref_locked(lpni);
3964                 lnet_net_unlock(cpt);
3965
3966                 if (status != 0)
3967                         CERROR("peer NI (%s) recovery failed with %d\n",
3968                                libcfs_nid2str(nid), status);
3969         }
3970 }
3971
3972 void
3973 lnet_mt_event_handler(struct lnet_event *event)
3974 {
3975         struct lnet_mt_event_info *ev_info = event->md_user_ptr;
3976         struct lnet_ping_buffer *pbuf;
3977
3978         /* TODO: remove assert */
3979         LASSERT(event->type == LNET_EVENT_REPLY ||
3980                 event->type == LNET_EVENT_SEND ||
3981                 event->type == LNET_EVENT_UNLINK);
3982
3983         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
3984                event->status);
3985
3986         switch (event->type) {
3987         case LNET_EVENT_UNLINK:
3988                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
3989                        libcfs_nid2str(ev_info->mt_nid));
3990                 /* fallthrough */
3991         case LNET_EVENT_REPLY:
3992                 lnet_handle_recovery_reply(ev_info, event->status, false,
3993                                            event->type == LNET_EVENT_UNLINK);
3994                 break;
3995         case LNET_EVENT_SEND:
3996                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
3997                                libcfs_nid2str(ev_info->mt_nid),
3998                                (event->status) ? "unsuccessfully" :
3999                                "successfully", event->status);
4000                 lnet_handle_recovery_reply(ev_info, event->status, true, false);
4001                 break;
4002         default:
4003                 CERROR("Unexpected event: %d\n", event->type);
4004                 break;
4005         }
4006         if (event->unlinked) {
4007                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
4008                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
4009                 lnet_ping_buffer_decref(pbuf);
4010         }
4011 }
4012
4013 static int
4014 lnet_rsp_tracker_create(void)
4015 {
4016         struct list_head **rstqs;
4017         rstqs = lnet_create_array_of_queues();
4018
4019         if (!rstqs)
4020                 return -ENOMEM;
4021
4022         the_lnet.ln_mt_rstq = rstqs;
4023
4024         return 0;
4025 }
4026
4027 static void
4028 lnet_rsp_tracker_clean(void)
4029 {
4030         lnet_finalize_expired_responses();
4031
4032         cfs_percpt_free(the_lnet.ln_mt_rstq);
4033         the_lnet.ln_mt_rstq = NULL;
4034 }
4035
4036 int lnet_monitor_thr_start(void)
4037 {
4038         int rc = 0;
4039         struct task_struct *task;
4040
4041         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
4042                 return -EALREADY;
4043
4044         rc = lnet_resendqs_create();
4045         if (rc)
4046                 return rc;
4047
4048         rc = lnet_rsp_tracker_create();
4049         if (rc)
4050                 goto clean_queues;
4051
4052         sema_init(&the_lnet.ln_mt_signal, 0);
4053
4054         lnet_net_lock(LNET_LOCK_EX);
4055         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
4056         lnet_net_unlock(LNET_LOCK_EX);
4057         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
4058         if (IS_ERR(task)) {
4059                 rc = PTR_ERR(task);
4060                 CERROR("Can't start monitor thread: %d\n", rc);
4061                 goto clean_thread;
4062         }
4063
4064         return 0;
4065
4066 clean_thread:
4067         lnet_net_lock(LNET_LOCK_EX);
4068         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4069         lnet_net_unlock(LNET_LOCK_EX);
4070         /* block until event callback signals exit */
4071         down(&the_lnet.ln_mt_signal);
4072         /* clean up */
4073         lnet_net_lock(LNET_LOCK_EX);
4074         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
4075         lnet_net_unlock(LNET_LOCK_EX);
4076         lnet_rsp_tracker_clean();
4077         lnet_clean_local_ni_recoveryq();
4078         lnet_clean_peer_ni_recoveryq();
4079         lnet_clean_resendqs();
4080         the_lnet.ln_mt_handler = NULL;
4081         return rc;
4082 clean_queues:
4083         lnet_rsp_tracker_clean();
4084         lnet_clean_local_ni_recoveryq();
4085         lnet_clean_peer_ni_recoveryq();
4086         lnet_clean_resendqs();
4087         return rc;
4088 }
4089
4090 void lnet_monitor_thr_stop(void)
4091 {
4092         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
4093                 return;
4094
4095         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
4096         lnet_net_lock(LNET_LOCK_EX);
4097         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4098         lnet_net_unlock(LNET_LOCK_EX);
4099
4100         /* tell the monitor thread that we're shutting down */
4101         complete(&the_lnet.ln_mt_wait_complete);
4102
4103         /* block until monitor thread signals that it's done */
4104         down(&the_lnet.ln_mt_signal);
4105         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
4106
4107         /* perform cleanup tasks */
4108         lnet_rsp_tracker_clean();
4109         lnet_clean_local_ni_recoveryq();
4110         lnet_clean_peer_ni_recoveryq();
4111         lnet_clean_resendqs();
4112 }
4113
4114 void
4115 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
4116                   __u32 msg_type)
4117 {
4118         lnet_net_lock(cpt);
4119         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
4120         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4121         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
4122         lnet_net_unlock(cpt);
4123
4124         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
4125 }
4126
4127 static void
4128 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
4129 {
4130         struct lnet_hdr *hdr = &msg->msg_hdr;
4131
4132         if (msg->msg_wanted != 0)
4133                 lnet_setpayloadbuffer(msg);
4134
4135         lnet_build_msg_event(msg, LNET_EVENT_PUT);
4136
4137         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
4138          * it back into the ACK during lnet_finalize() */
4139         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
4140                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
4141
4142         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
4143                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
4144 }
4145
4146 static int
4147 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
4148 {
4149         struct lnet_hdr         *hdr = &msg->msg_hdr;
4150         struct lnet_match_info  info;
4151         int                     rc;
4152         bool                    ready_delay;
4153
4154         /* Convert put fields to host byte order */
4155         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
4156         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
4157         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
4158
4159         /* Primary peer NID. */
4160         info.mi_id.nid  = msg->msg_initiator;
4161         info.mi_id.pid  = hdr->src_pid;
4162         info.mi_opc     = LNET_MD_OP_PUT;
4163         info.mi_portal  = hdr->msg.put.ptl_index;
4164         info.mi_rlength = hdr->payload_length;
4165         info.mi_roffset = hdr->msg.put.offset;
4166         info.mi_mbits   = hdr->msg.put.match_bits;
4167         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
4168
4169         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
4170         ready_delay = msg->msg_rx_ready_delay;
4171
4172  again:
4173         rc = lnet_ptl_match_md(&info, msg);
4174         switch (rc) {
4175         default:
4176                 LBUG();
4177
4178         case LNET_MATCHMD_OK:
4179                 lnet_recv_put(ni, msg);
4180                 return 0;
4181
4182         case LNET_MATCHMD_NONE:
4183                 if (ready_delay)
4184                         /* no eager_recv or has already called it, should
4185                          * have been attached on delayed list */
4186                         return 0;
4187
4188                 rc = lnet_ni_eager_recv(ni, msg);
4189                 if (rc == 0) {
4190                         ready_delay = true;
4191                         goto again;
4192                 }
4193                 /* fall through */
4194
4195         case LNET_MATCHMD_DROP:
4196                 CNETERR("Dropping PUT from %s portal %d match %llu"
4197                         " offset %d length %d: %d\n",
4198                         libcfs_id2str(info.mi_id), info.mi_portal,
4199                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
4200
4201                 return -ENOENT; /* -ve: OK but no match */
4202         }
4203 }
4204
4205 static int
4206 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
4207 {
4208         struct lnet_match_info info;
4209         struct lnet_hdr *hdr = &msg->msg_hdr;
4210         struct lnet_process_id source_id;
4211         struct lnet_handle_wire reply_wmd;
4212         int rc;
4213
4214         /* Convert get fields to host byte order */
4215         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
4216         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
4217         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
4218         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
4219
4220         source_id.nid = hdr->src_nid;
4221         source_id.pid = hdr->src_pid;
4222         /* Primary peer NID */
4223         info.mi_id.nid  = msg->msg_initiator;
4224         info.mi_id.pid  = hdr->src_pid;
4225         info.mi_opc     = LNET_MD_OP_GET;
4226         info.mi_portal  = hdr->msg.get.ptl_index;
4227         info.mi_rlength = hdr->msg.get.sink_length;
4228         info.mi_roffset = hdr->msg.get.src_offset;
4229         info.mi_mbits   = hdr->msg.get.match_bits;
4230         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
4231
4232         rc = lnet_ptl_match_md(&info, msg);
4233         if (rc == LNET_MATCHMD_DROP) {
4234                 CNETERR("Dropping GET from %s portal %d match %llu"
4235                         " offset %d length %d\n",
4236                         libcfs_id2str(info.mi_id), info.mi_portal,
4237                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
4238                 return -ENOENT; /* -ve: OK but no match */
4239         }
4240
4241         LASSERT(rc == LNET_MATCHMD_OK);
4242
4243         lnet_build_msg_event(msg, LNET_EVENT_GET);
4244
4245         reply_wmd = hdr->msg.get.return_wmd;
4246
4247         lnet_prep_send(msg, LNET_MSG_REPLY, source_id,
4248                        msg->msg_offset, msg->msg_wanted);
4249
4250         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
4251
4252         if (rdma_get) {
4253                 /* The LND completes the REPLY from her recv procedure */
4254                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
4255                              msg->msg_offset, msg->msg_len, msg->msg_len);
4256                 return 0;
4257         }
4258
4259         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
4260         msg->msg_receiving = 0;
4261
4262         /* FIXME need to handle large-addr nid */
4263         rc = lnet_send(lnet_nid_to_nid4(&ni->ni_nid), msg, msg->msg_from);
4264         if (rc < 0) {
4265                 /* didn't get as far as lnet_ni_send() */
4266                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
4267                        libcfs_nidstr(&ni->ni_nid),
4268                        libcfs_id2str(info.mi_id), rc);
4269
4270                 lnet_finalize(msg, rc);
4271         }
4272
4273         return 0;
4274 }
4275
4276 static int
4277 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
4278 {
4279         void *private = msg->msg_private;
4280         struct lnet_hdr *hdr = &msg->msg_hdr;
4281         struct lnet_process_id src = {0};
4282         struct lnet_libmd *md;
4283         unsigned int rlength;
4284         unsigned int mlength;
4285         int cpt;
4286
4287         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4288         lnet_res_lock(cpt);
4289
4290         src.nid = hdr->src_nid;
4291         src.pid = hdr->src_pid;
4292
4293         /* NB handles only looked up by creator (no flips) */
4294         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4295         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4296                 CNETERR("%s: Dropping REPLY from %s for %s "
4297                         "MD %#llx.%#llx\n",
4298                         libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
4299                         (md == NULL) ? "invalid" : "inactive",
4300                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4301                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4302                 if (md != NULL && md->md_me != NULL)
4303                         CERROR("REPLY MD also attached to portal %d\n",
4304                                md->md_me->me_portal);
4305
4306                 lnet_res_unlock(cpt);
4307                 return -ENOENT; /* -ve: OK but no match */
4308         }
4309
4310         LASSERT(md->md_offset == 0);
4311
4312         rlength = hdr->payload_length;
4313         mlength = min(rlength, md->md_length);
4314
4315         if (mlength < rlength &&
4316             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4317                 CNETERR("%s: Dropping REPLY from %s length %d "
4318                         "for MD %#llx would overflow (%d)\n",
4319                         libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
4320                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4321                         mlength);
4322                 lnet_res_unlock(cpt);
4323                 return -ENOENT; /* -ve: OK but no match */
4324         }
4325
4326         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4327                libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
4328                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4329
4330         lnet_msg_attach_md(msg, md, 0, mlength);
4331
4332         if (mlength != 0)
4333                 lnet_setpayloadbuffer(msg);
4334
4335         lnet_res_unlock(cpt);
4336
4337         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4338
4339         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4340         return 0;
4341 }
4342
4343 static int
4344 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4345 {
4346         struct lnet_hdr *hdr = &msg->msg_hdr;
4347         struct lnet_process_id src = {0};
4348         struct lnet_libmd *md;
4349         int cpt;
4350
4351         src.nid = hdr->src_nid;
4352         src.pid = hdr->src_pid;
4353
4354         /* Convert ack fields to host byte order */
4355         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4356         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4357
4358         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4359         lnet_res_lock(cpt);
4360
4361         /* NB handles only looked up by creator (no flips) */
4362         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4363         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4364                 /* Don't moan; this is expected */
4365                 CDEBUG(D_NET,
4366                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4367                        libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
4368                        (md == NULL) ? "invalid" : "inactive",
4369                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4370                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4371                 if (md != NULL && md->md_me != NULL)
4372                         CERROR("Source MD also attached to portal %d\n",
4373                                md->md_me->me_portal);
4374
4375                 lnet_res_unlock(cpt);
4376                 return -ENOENT;                  /* -ve! */
4377         }
4378
4379         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4380                libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
4381                hdr->msg.ack.dst_wmd.wh_object_cookie);
4382
4383         lnet_msg_attach_md(msg, md, 0, 0);
4384
4385         lnet_res_unlock(cpt);
4386
4387         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4388
4389         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4390         return 0;
4391 }
4392
4393 /**
4394  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4395  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4396  * \retval -ve                  error code
4397  */
4398 int
4399 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4400 {
4401         int     rc = 0;
4402
4403         if (!the_lnet.ln_routing)
4404                 return -ECANCELED;
4405
4406         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4407             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4408                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4409                         msg->msg_rx_ready_delay = 1;
4410                 } else {
4411                         lnet_net_unlock(msg->msg_rx_cpt);
4412                         rc = lnet_ni_eager_recv(ni, msg);
4413                         lnet_net_lock(msg->msg_rx_cpt);
4414                 }
4415         }
4416
4417         if (rc == 0)
4418                 rc = lnet_post_routed_recv_locked(msg, 0);
4419         return rc;
4420 }
4421
4422 int
4423 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4424 {
4425         int     rc;
4426
4427         switch (msg->msg_type) {
4428         case LNET_MSG_ACK:
4429                 rc = lnet_parse_ack(ni, msg);
4430                 break;
4431         case LNET_MSG_PUT:
4432                 rc = lnet_parse_put(ni, msg);
4433                 break;
4434         case LNET_MSG_GET:
4435                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4436                 break;
4437         case LNET_MSG_REPLY:
4438                 rc = lnet_parse_reply(ni, msg);
4439                 break;
4440         default: /* prevent an unused label if !kernel */
4441                 LASSERT(0);
4442                 return -EPROTO;
4443         }
4444
4445         LASSERT(rc == 0 || rc == -ENOENT);
4446         return rc;
4447 }
4448
4449 char *
4450 lnet_msgtyp2str (int type)
4451 {
4452         switch (type) {
4453         case LNET_MSG_ACK:
4454                 return ("ACK");
4455         case LNET_MSG_PUT:
4456                 return ("PUT");
4457         case LNET_MSG_GET:
4458                 return ("GET");
4459         case LNET_MSG_REPLY:
4460                 return ("REPLY");
4461         case LNET_MSG_HELLO:
4462                 return ("HELLO");
4463         default:
4464                 return ("<UNKNOWN>");
4465         }
4466 }
4467
4468 int
4469 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
4470            void *private, int rdma_req)
4471 {
4472         struct lnet_peer_ni *lpni;
4473         struct lnet_msg *msg;
4474         __u32 payload_length;
4475         lnet_pid_t dest_pid;
4476         lnet_nid_t dest_nid;
4477         lnet_nid_t src_nid;
4478         bool push = false;
4479         int for_me;
4480         __u32 type;
4481         int rc = 0;
4482         int cpt;
4483
4484         LASSERT (!in_interrupt ());
4485
4486         type = le32_to_cpu(hdr->type);
4487         src_nid = le64_to_cpu(hdr->src_nid);
4488         dest_nid = le64_to_cpu(hdr->dest_nid);
4489         dest_pid = le32_to_cpu(hdr->dest_pid);
4490         payload_length = le32_to_cpu(hdr->payload_length);
4491
4492         /* FIXME handle large-addr nids */
4493         for_me = (lnet_nid_to_nid4(&ni->ni_nid) == dest_nid);
4494         cpt = lnet_cpt_of_nid(from_nid, ni);
4495
4496         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4497                 libcfs_nid2str(dest_nid),
4498                 libcfs_nidstr(&ni->ni_nid),
4499                 libcfs_nid2str(src_nid),
4500                 lnet_msgtyp2str(type),
4501                 (for_me) ? "for me" : "routed");
4502
4503         switch (type) {
4504         case LNET_MSG_ACK:
4505         case LNET_MSG_GET:
4506                 if (payload_length > 0) {
4507                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4508                                libcfs_nid2str(from_nid),
4509                                libcfs_nid2str(src_nid),
4510                                lnet_msgtyp2str(type), payload_length);
4511                         return -EPROTO;
4512                 }
4513                 break;
4514
4515         case LNET_MSG_PUT:
4516         case LNET_MSG_REPLY:
4517                 if (payload_length >
4518                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4519                         CERROR("%s, src %s: bad %s payload %d "
4520                                "(%d max expected)\n",
4521                                libcfs_nid2str(from_nid),
4522                                libcfs_nid2str(src_nid),
4523                                lnet_msgtyp2str(type),
4524                                payload_length,
4525                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4526                         return -EPROTO;
4527                 }
4528                 break;
4529
4530         default:
4531                 CERROR("%s, src %s: Bad message type 0x%x\n",
4532                        libcfs_nid2str(from_nid),
4533                        libcfs_nid2str(src_nid), type);
4534                 return -EPROTO;
4535         }
4536
4537         if (the_lnet.ln_routing &&
4538             ni->ni_net->net_last_alive != ktime_get_real_seconds()) {
4539                 lnet_ni_lock(ni);
4540                 spin_lock(&ni->ni_net->net_lock);
4541                 ni->ni_net->net_last_alive = ktime_get_real_seconds();
4542                 spin_unlock(&ni->ni_net->net_lock);
4543                 push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP);
4544                 lnet_ni_unlock(ni);
4545         }
4546
4547         if (push)
4548                 lnet_push_update_to_peers(1);
4549
4550         /* Regard a bad destination NID as a protocol error.  Senders should
4551          * know what they're doing; if they don't they're misconfigured, buggy
4552          * or malicious so we chop them off at the knees :) */
4553
4554         if (!for_me) {
4555                 if (LNET_NIDNET(dest_nid) == LNET_NID_NET(&ni->ni_nid)) {
4556                         /* should have gone direct */
4557                         CERROR("%s, src %s: Bad dest nid %s "
4558                                "(should have been sent direct)\n",
4559                                 libcfs_nid2str(from_nid),
4560                                 libcfs_nid2str(src_nid),
4561                                 libcfs_nid2str(dest_nid));
4562                         return -EPROTO;
4563                 }
4564
4565                 if (lnet_islocalnid(dest_nid)) {
4566                         /* dest is another local NI; sender should have used
4567                          * this node's NID on its own network */
4568                         CERROR("%s, src %s: Bad dest nid %s "
4569                                "(it's my nid but on a different network)\n",
4570                                 libcfs_nid2str(from_nid),
4571                                 libcfs_nid2str(src_nid),
4572                                 libcfs_nid2str(dest_nid));
4573                         return -EPROTO;
4574                 }
4575
4576                 if (rdma_req && type == LNET_MSG_GET) {
4577                         CERROR("%s, src %s: Bad optimized GET for %s "
4578                                "(final destination must be me)\n",
4579                                 libcfs_nid2str(from_nid),
4580                                 libcfs_nid2str(src_nid),
4581                                 libcfs_nid2str(dest_nid));
4582                         return -EPROTO;
4583                 }
4584
4585                 if (!the_lnet.ln_routing) {
4586                         CERROR("%s, src %s: Dropping message for %s "
4587                                "(routing not enabled)\n",
4588                                 libcfs_nid2str(from_nid),
4589                                 libcfs_nid2str(src_nid),
4590                                 libcfs_nid2str(dest_nid));
4591                         goto drop;
4592                 }
4593         }
4594
4595         /* Message looks OK; we're not going to return an error, so we MUST
4596          * call back lnd_recv() come what may... */
4597
4598         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4599             fail_peer(src_nid, 0)) {                    /* shall we now? */
4600                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4601                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4602                        lnet_msgtyp2str(type));
4603                 goto drop;
4604         }
4605
4606         /* FIXME need to support large-addr nid */
4607         if (!list_empty(&the_lnet.ln_drop_rules) &&
4608             lnet_drop_rule_match(hdr, lnet_nid_to_nid4(&ni->ni_nid), NULL)) {
4609                 CDEBUG(D_NET,
4610                        "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
4611                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4612                        libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
4613                 goto drop;
4614         }
4615
4616         msg = lnet_msg_alloc();
4617         if (msg == NULL) {
4618                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4619                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4620                        lnet_msgtyp2str(type));
4621                 goto drop;
4622         }
4623
4624         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4625          * pointers NULL etc */
4626
4627         msg->msg_type = type;
4628         msg->msg_private = private;
4629         msg->msg_receiving = 1;
4630         msg->msg_rdma_get = rdma_req;
4631         msg->msg_len = msg->msg_wanted = payload_length;
4632         msg->msg_offset = 0;
4633         msg->msg_hdr = *hdr;
4634         /* for building message event */
4635         msg->msg_from = from_nid;
4636         if (!for_me) {
4637                 msg->msg_target.pid     = dest_pid;
4638                 msg->msg_target.nid     = dest_nid;
4639                 msg->msg_routing        = 1;
4640
4641         } else {
4642                 /* convert common msg->hdr fields to host byteorder */
4643                 msg->msg_hdr.type       = type;
4644                 msg->msg_hdr.src_nid    = src_nid;
4645                 msg->msg_hdr.src_pid    = le32_to_cpu(msg->msg_hdr.src_pid);
4646                 msg->msg_hdr.dest_nid   = dest_nid;
4647                 msg->msg_hdr.dest_pid   = dest_pid;
4648                 msg->msg_hdr.payload_length = payload_length;
4649         }
4650
4651         lnet_net_lock(cpt);
4652         /* FIXME support large-addr nid */
4653         lpni = lnet_nid2peerni_locked(from_nid, lnet_nid_to_nid4(&ni->ni_nid),
4654                                       cpt);
4655         if (IS_ERR(lpni)) {
4656                 lnet_net_unlock(cpt);
4657                 CERROR("%s, src %s: Dropping %s "
4658                        "(error %ld looking up sender)\n",
4659                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4660                        lnet_msgtyp2str(type), PTR_ERR(lpni));
4661                 lnet_msg_free(msg);
4662                 if (rc == -ESHUTDOWN)
4663                         /* We are shutting down.  Don't do anything more */
4664                         return 0;
4665                 goto drop;
4666         }
4667
4668         /* If this message was forwarded to us from a router then we may need
4669          * to update router aliveness or check for an asymmetrical route
4670          * (or both)
4671          */
4672         if (((lnet_drop_asym_route && for_me) ||
4673              !lpni->lpni_peer_net->lpn_peer->lp_alive) &&
4674             LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
4675                 __u32 src_net_id = LNET_NIDNET(src_nid);
4676                 struct lnet_peer *gw = lpni->lpni_peer_net->lpn_peer;
4677                 struct lnet_route *route;
4678                 bool found = false;
4679
4680                 list_for_each_entry(route, &gw->lp_routes, lr_gwlist) {
4681                         if (route->lr_net == src_net_id) {
4682                                 found = true;
4683                                 /* If we're transitioning the gateway from
4684                                  * dead -> alive, and discovery is disabled
4685                                  * locally or on the gateway, then we need to
4686                                  * update the cached route aliveness for each
4687                                  * route to the src_nid's net.
4688                                  *
4689                                  * Otherwise, we're only checking for
4690                                  * symmetrical route, and we can break the
4691                                  * loop
4692                                  */
4693                                 if (!gw->lp_alive &&
4694                                     lnet_is_discovery_disabled(gw))
4695                                         lnet_set_route_aliveness(route, true);
4696                                 else
4697                                         break;
4698                         }
4699                 }
4700                 if (lnet_drop_asym_route && for_me && !found) {
4701                         lnet_net_unlock(cpt);
4702                         /* we would not use from_nid to route a message to
4703                          * src_nid
4704                          * => asymmetric routing detected but forbidden
4705                          */
4706                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4707                                libcfs_nid2str(from_nid),
4708                                libcfs_nid2str(src_nid), lnet_msgtyp2str(type));
4709                         lnet_msg_free(msg);
4710                         goto drop;
4711                 }
4712                 if (!gw->lp_alive) {
4713                         struct lnet_peer_net *lpn;
4714                         struct lnet_peer_ni *lpni2;
4715
4716                         gw->lp_alive = true;
4717                         /* Mark all remote NIs on src_nid's net UP */
4718                         lpn = lnet_peer_get_net_locked(gw, src_net_id);
4719                         if (lpn)
4720                                 list_for_each_entry(lpni2, &lpn->lpn_peer_nis,
4721                                                     lpni_peer_nis)
4722                                         lpni2->lpni_ns_status = LNET_NI_STATUS_UP;
4723                 }
4724         }
4725
4726         lpni->lpni_last_alive = ktime_get_seconds();
4727
4728         msg->msg_rxpeer = lpni;
4729         msg->msg_rxni = ni;
4730         lnet_ni_addref_locked(ni, cpt);
4731         /* Multi-Rail: Primary NID of source. */
4732         msg->msg_initiator = lnet_peer_primary_nid_locked(src_nid);
4733
4734         /*
4735          * mark the status of this lpni as UP since we received a message
4736          * from it. The ping response reports back the ns_status which is
4737          * marked on the remote as up or down and we cache it here.
4738          */
4739         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
4740
4741         lnet_msg_commit(msg, cpt);
4742
4743         /* message delay simulation */
4744         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
4745                      lnet_delay_rule_match_locked(hdr, msg))) {
4746                 lnet_net_unlock(cpt);
4747                 return 0;
4748         }
4749
4750         if (!for_me) {
4751                 rc = lnet_parse_forward_locked(ni, msg);
4752                 lnet_net_unlock(cpt);
4753
4754                 if (rc < 0)
4755                         goto free_drop;
4756
4757                 if (rc == LNET_CREDIT_OK) {
4758                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
4759                                      0, payload_length, payload_length);
4760                 }
4761                 return 0;
4762         }
4763
4764         lnet_net_unlock(cpt);
4765
4766         rc = lnet_parse_local(ni, msg);
4767         if (rc != 0)
4768                 goto free_drop;
4769         return 0;
4770
4771  free_drop:
4772         LASSERT(msg->msg_md == NULL);
4773         lnet_finalize(msg, rc);
4774
4775  drop:
4776         lnet_drop_message(ni, cpt, private, payload_length, type);
4777         return 0;
4778 }
4779 EXPORT_SYMBOL(lnet_parse);
4780
4781 void
4782 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
4783 {
4784         while (!list_empty(head)) {
4785                 struct lnet_process_id id = {0};
4786                 struct lnet_msg *msg;
4787
4788                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4789                 list_del(&msg->msg_list);
4790
4791                 id.nid = msg->msg_hdr.src_nid;
4792                 id.pid = msg->msg_hdr.src_pid;
4793
4794                 LASSERT(msg->msg_md == NULL);
4795                 LASSERT(msg->msg_rx_delayed);
4796                 LASSERT(msg->msg_rxpeer != NULL);
4797                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4798
4799                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
4800                       " offset %d length %d: %s\n",
4801                       libcfs_id2str(id),
4802                       msg->msg_hdr.msg.put.ptl_index,
4803                       msg->msg_hdr.msg.put.match_bits,
4804                       msg->msg_hdr.msg.put.offset,
4805                       msg->msg_hdr.payload_length, reason);
4806
4807                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
4808                  * called lnet_drop_message(), so I just hang onto msg as well
4809                  * until that's done */
4810
4811                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
4812                                   msg->msg_private, msg->msg_len,
4813                                   msg->msg_type);
4814
4815                 msg->msg_no_resend = true;
4816                 /*
4817                  * NB: message will not generate event because w/o attached MD,
4818                  * but we still should give error code so lnet_msg_decommit()
4819                  * can skip counters operations and other checks.
4820                  */
4821                 lnet_finalize(msg, -ENOENT);
4822         }
4823 }
4824
4825 void
4826 lnet_recv_delayed_msg_list(struct list_head *head)
4827 {
4828         while (!list_empty(head)) {
4829                 struct lnet_msg *msg;
4830                 struct lnet_process_id id;
4831
4832                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4833                 list_del(&msg->msg_list);
4834
4835                 /* md won't disappear under me, since each msg
4836                  * holds a ref on it */
4837
4838                 id.nid = msg->msg_hdr.src_nid;
4839                 id.pid = msg->msg_hdr.src_pid;
4840
4841                 LASSERT(msg->msg_rx_delayed);
4842                 LASSERT(msg->msg_md != NULL);
4843                 LASSERT(msg->msg_rxpeer != NULL);
4844                 LASSERT(msg->msg_rxni != NULL);
4845                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4846
4847                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
4848                        "match %llu offset %d length %d.\n",
4849                         libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
4850                         msg->msg_hdr.msg.put.match_bits,
4851                         msg->msg_hdr.msg.put.offset,
4852                         msg->msg_hdr.payload_length);
4853
4854                 lnet_recv_put(msg->msg_rxni, msg);
4855         }
4856 }
4857
4858 static void
4859 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
4860                         struct lnet_libmd *md, struct lnet_handle_md mdh)
4861 {
4862         s64 timeout_ns;
4863         struct lnet_rsp_tracker *local_rspt;
4864
4865         /*
4866          * MD has a refcount taken by message so it's not going away.
4867          * The MD however can be looked up. We need to secure the access
4868          * to the md_rspt_ptr by taking the res_lock.
4869          * The rspt can be accessed without protection up to when it gets
4870          * added to the list.
4871          */
4872
4873         lnet_res_lock(cpt);
4874         local_rspt = md->md_rspt_ptr;
4875         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
4876         if (local_rspt != NULL) {
4877                 /*
4878                  * we already have an rspt attached to the md, so we'll
4879                  * update the deadline on that one.
4880                  */
4881                 lnet_rspt_free(rspt, cpt);
4882         } else {
4883                 /* new md */
4884                 rspt->rspt_mdh = mdh;
4885                 rspt->rspt_cpt = cpt;
4886                 /* store the rspt so we can access it when we get the REPLY */
4887                 md->md_rspt_ptr = rspt;
4888                 local_rspt = rspt;
4889         }
4890         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
4891
4892         /*
4893          * add to the list of tracked responses. It's added to tail of the
4894          * list in order to expire all the older entries first.
4895          */
4896         lnet_net_lock(cpt);
4897         list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
4898         lnet_net_unlock(cpt);
4899         lnet_res_unlock(cpt);
4900 }
4901
4902 /**
4903  * Initiate an asynchronous PUT operation.
4904  *
4905  * There are several events associated with a PUT: completion of the send on
4906  * the initiator node (LNET_EVENT_SEND), and when the send completes
4907  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
4908  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
4909  * used at the target node to indicate the completion of incoming data
4910  * delivery.
4911  *
4912  * The local events will be logged in the EQ associated with the MD pointed to
4913  * by \a mdh handle. Using a MD without an associated EQ results in these
4914  * events being discarded. In this case, the caller must have another
4915  * mechanism (e.g., a higher level protocol) for determining when it is safe
4916  * to modify the memory region associated with the MD.
4917  *
4918  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
4919  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
4920  *
4921  * \param self Indicates the NID of a local interface through which to send
4922  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
4923  * \param mdh A handle for the MD that describes the memory to be sent. The MD
4924  * must be "free floating" (See LNetMDBind()).
4925  * \param ack Controls whether an acknowledgment is requested.
4926  * Acknowledgments are only sent when they are requested by the initiating
4927  * process and the target MD enables them.
4928  * \param target A process identifier for the target process.
4929  * \param portal The index in the \a target's portal table.
4930  * \param match_bits The match bits to use for MD selection at the target
4931  * process.
4932  * \param offset The offset into the target MD (only used when the target
4933  * MD has the LNET_MD_MANAGE_REMOTE option set).
4934  * \param hdr_data 64 bits of user data that can be included in the message
4935  * header. This data is written to an event queue entry at the target if an
4936  * EQ is present on the matching MD.
4937  *
4938  * \retval  0      Success, and only in this case events will be generated
4939  * and logged to EQ (if it exists).
4940  * \retval -EIO    Simulated failure.
4941  * \retval -ENOMEM Memory allocation failure.
4942  * \retval -ENOENT Invalid MD object.
4943  *
4944  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
4945  */
4946 int
4947 LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
4948         struct lnet_process_id target, unsigned int portal,
4949         __u64 match_bits, unsigned int offset,
4950         __u64 hdr_data)
4951 {
4952         struct lnet_msg *msg;
4953         struct lnet_libmd *md;
4954         int cpt;
4955         int rc;
4956         struct lnet_rsp_tracker *rspt = NULL;
4957
4958         LASSERT(the_lnet.ln_refcount > 0);
4959
4960         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4961             fail_peer(target.nid, 1)) {                 /* shall we now? */
4962                 CERROR("Dropping PUT to %s: simulated failure\n",
4963                        libcfs_id2str(target));
4964                 return -EIO;
4965         }
4966
4967         msg = lnet_msg_alloc();
4968         if (msg == NULL) {
4969                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
4970                        libcfs_id2str(target));
4971                 return -ENOMEM;
4972         }
4973         msg->msg_vmflush = !!(current->flags & PF_MEMALLOC);
4974
4975         cpt = lnet_cpt_of_cookie(mdh.cookie);
4976
4977         if (ack == LNET_ACK_REQ) {
4978                 rspt = lnet_rspt_alloc(cpt);
4979                 if (!rspt) {
4980                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
4981                                 libcfs_id2str(target));
4982                         return -ENOMEM;
4983                 }
4984                 INIT_LIST_HEAD(&rspt->rspt_on_list);
4985         }
4986
4987         lnet_res_lock(cpt);
4988
4989         md = lnet_handle2md(&mdh);
4990         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4991                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
4992                        match_bits, portal, libcfs_id2str(target),
4993                        md == NULL ? -1 : md->md_threshold);
4994                 if (md != NULL && md->md_me != NULL)
4995                         CERROR("Source MD also attached to portal %d\n",
4996                                md->md_me->me_portal);
4997                 lnet_res_unlock(cpt);
4998
4999                 if (rspt)
5000                         lnet_rspt_free(rspt, cpt);
5001
5002                 lnet_msg_free(msg);
5003                 return -ENOENT;
5004         }
5005
5006         CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
5007
5008         lnet_msg_attach_md(msg, md, 0, 0);
5009
5010         lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
5011
5012         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
5013         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
5014         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
5015         msg->msg_hdr.msg.put.hdr_data = hdr_data;
5016
5017         /* NB handles only looked up by creator (no flips) */
5018         if (ack == LNET_ACK_REQ) {
5019                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5020                         the_lnet.ln_interface_cookie;
5021                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5022                         md->md_lh.lh_cookie;
5023         } else {
5024                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5025                         LNET_WIRE_HANDLE_COOKIE_NONE;
5026                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5027                         LNET_WIRE_HANDLE_COOKIE_NONE;
5028         }
5029
5030         lnet_res_unlock(cpt);
5031
5032         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5033
5034         if (rspt && lnet_response_tracking_enabled(LNET_MSG_PUT,
5035                                                    md->md_options))
5036                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5037         else if (rspt)
5038                 lnet_rspt_free(rspt, cpt);
5039
5040         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
5041                                  CFS_FAIL_ONCE))
5042                 rc = -EIO;
5043         else
5044                 rc = lnet_send(self, msg, LNET_NID_ANY);
5045
5046         if (rc != 0) {
5047                 CNETERR("Error sending PUT to %s: %d\n",
5048                         libcfs_id2str(target), rc);
5049                 msg->msg_no_resend = true;
5050                 lnet_finalize(msg, rc);
5051         }
5052
5053         /* completion will be signalled by an event */
5054         return 0;
5055 }
5056 EXPORT_SYMBOL(LNetPut);
5057
5058 /*
5059  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
5060  * returns a msg for the LND to pass to lnet_finalize() when the sink
5061  * data has been received.
5062  *
5063  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
5064  * lnet_finalize() is called on it, so the LND must call this first
5065  */
5066 struct lnet_msg *
5067 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
5068 {
5069         struct lnet_msg *msg = lnet_msg_alloc();
5070         struct lnet_libmd *getmd = getmsg->msg_md;
5071         struct lnet_process_id peer_id = getmsg->msg_target;
5072         int cpt;
5073
5074         LASSERT(!getmsg->msg_target_is_router);
5075         LASSERT(!getmsg->msg_routing);
5076
5077         if (msg == NULL) {
5078                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
5079                        libcfs_nidstr(&ni->ni_nid), libcfs_id2str(peer_id));
5080                 goto drop;
5081         }
5082
5083         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
5084         lnet_res_lock(cpt);
5085
5086         LASSERT(getmd->md_refcount > 0);
5087
5088         if (getmd->md_threshold == 0) {
5089                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
5090                         libcfs_nidstr(&ni->ni_nid), libcfs_id2str(peer_id),
5091                         getmd);
5092                 lnet_res_unlock(cpt);
5093                 goto drop;
5094         }
5095
5096         LASSERT(getmd->md_offset == 0);
5097
5098         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
5099                libcfs_nidstr(&ni->ni_nid), libcfs_id2str(peer_id), getmd);
5100
5101         /* setup information for lnet_build_msg_event */
5102         msg->msg_initiator = getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
5103         msg->msg_from = peer_id.nid;
5104         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
5105         msg->msg_hdr.src_nid = peer_id.nid;
5106         msg->msg_hdr.payload_length = getmd->md_length;
5107         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
5108
5109         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
5110         lnet_res_unlock(cpt);
5111
5112         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
5113
5114         lnet_net_lock(cpt);
5115         lnet_msg_commit(msg, cpt);
5116         lnet_net_unlock(cpt);
5117
5118         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
5119
5120         return msg;
5121
5122  drop:
5123         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
5124
5125         lnet_net_lock(cpt);
5126         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
5127         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
5128         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
5129                 getmd->md_length;
5130         lnet_net_unlock(cpt);
5131
5132         if (msg != NULL)
5133                 lnet_msg_free(msg);
5134
5135         return NULL;
5136 }
5137 EXPORT_SYMBOL(lnet_create_reply_msg);
5138
5139 void
5140 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
5141                        unsigned int len)
5142 {
5143         /* Set the REPLY length, now the RDMA that elides the REPLY message has
5144          * completed and I know it. */
5145         LASSERT(reply != NULL);
5146         LASSERT(reply->msg_type == LNET_MSG_GET);
5147         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
5148
5149         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
5150          * the end of my buffer, I might as well be dead. */
5151         LASSERT(len <= reply->msg_ev.mlength);
5152
5153         reply->msg_ev.mlength = len;
5154 }
5155 EXPORT_SYMBOL(lnet_set_reply_msg_len);
5156
5157 /**
5158  * Initiate an asynchronous GET operation.
5159  *
5160  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
5161  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
5162  * the target node in the REPLY has been written to local MD.
5163  *
5164  * On the target node, an LNET_EVENT_GET is logged when the GET request
5165  * arrives and is accepted into a MD.
5166  *
5167  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
5168  * \param mdh A handle for the MD that describes the memory into which the
5169  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
5170  *
5171  * \retval  0      Success, and only in this case events will be generated
5172  * and logged to EQ (if it exists) of the MD.
5173  * \retval -EIO    Simulated failure.
5174  * \retval -ENOMEM Memory allocation failure.
5175  * \retval -ENOENT Invalid MD object.
5176  */
5177 int
5178 LNetGet(lnet_nid_t self, struct lnet_handle_md mdh,
5179         struct lnet_process_id target, unsigned int portal,
5180         __u64 match_bits, unsigned int offset, bool recovery)
5181 {
5182         struct lnet_msg *msg;
5183         struct lnet_libmd *md;
5184         struct lnet_rsp_tracker *rspt;
5185         int cpt;
5186         int rc;
5187
5188         LASSERT(the_lnet.ln_refcount > 0);
5189
5190         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5191             fail_peer(target.nid, 1))                   /* shall we now? */
5192         {
5193                 CERROR("Dropping GET to %s: simulated failure\n",
5194                        libcfs_id2str(target));
5195                 return -EIO;
5196         }
5197
5198         msg = lnet_msg_alloc();
5199         if (!msg) {
5200                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
5201                        libcfs_id2str(target));
5202                 return -ENOMEM;
5203         }
5204
5205         cpt = lnet_cpt_of_cookie(mdh.cookie);
5206
5207         rspt = lnet_rspt_alloc(cpt);
5208         if (!rspt) {
5209                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
5210                        libcfs_id2str(target));
5211                 return -ENOMEM;
5212         }
5213         INIT_LIST_HEAD(&rspt->rspt_on_list);
5214
5215         msg->msg_recovery = recovery;
5216
5217         lnet_res_lock(cpt);
5218
5219         md = lnet_handle2md(&mdh);
5220         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5221                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
5222                        match_bits, portal, libcfs_id2str(target),
5223                        md == NULL ? -1 : md->md_threshold);
5224                 if (md != NULL && md->md_me != NULL)
5225                         CERROR("REPLY MD also attached to portal %d\n",
5226                                md->md_me->me_portal);
5227
5228                 lnet_res_unlock(cpt);
5229
5230                 lnet_msg_free(msg);
5231                 lnet_rspt_free(rspt, cpt);
5232                 return -ENOENT;
5233         }
5234
5235         CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
5236
5237         lnet_msg_attach_md(msg, md, 0, 0);
5238
5239         lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
5240
5241         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5242         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5243         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5244         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5245
5246         /* NB handles only looked up by creator (no flips) */
5247         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5248                 the_lnet.ln_interface_cookie;
5249         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5250                 md->md_lh.lh_cookie;
5251
5252         lnet_res_unlock(cpt);
5253
5254         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5255
5256         if (lnet_response_tracking_enabled(LNET_MSG_GET, md->md_options))
5257                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5258         else
5259                 lnet_rspt_free(rspt, cpt);
5260
5261         rc = lnet_send(self, msg, LNET_NID_ANY);
5262         if (rc < 0) {
5263                 CNETERR("Error sending GET to %s: %d\n",
5264                         libcfs_id2str(target), rc);
5265                 msg->msg_no_resend = true;
5266                 lnet_finalize(msg, rc);
5267         }
5268
5269         /* completion will be signalled by an event */
5270         return 0;
5271 }
5272 EXPORT_SYMBOL(LNetGet);
5273
5274 /**
5275  * Calculate distance to node at \a dstnid.
5276  *
5277  * \param dstnid Target NID.
5278  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5279  * is saved here.
5280  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5281  * here.
5282  *
5283  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5284  * local_nid_dist_zero is set, which is the default.
5285  * \retval positives Distance to target NID, i.e. number of hops plus one.
5286  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5287  */
5288 int
5289 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
5290 {
5291         struct list_head *e;
5292         struct lnet_ni *ni = NULL;
5293         struct lnet_remotenet *rnet;
5294         __u32 dstnet = LNET_NIDNET(dstnid);
5295         int hops;
5296         int cpt;
5297         __u32 order = 2;
5298         struct list_head *rn_list;
5299         bool matched_dstnet = false;
5300
5301         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5302          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5303          * keep order 0 free for 0@lo and order 1 free for a local NID
5304          * match */
5305
5306         LASSERT(the_lnet.ln_refcount > 0);
5307
5308         cpt = lnet_net_lock_current();
5309
5310         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5311                 /* FIXME support large-addr nid */
5312                 if (lnet_nid_to_nid4(&ni->ni_nid) == dstnid) {
5313                         if (srcnidp != NULL)
5314                                 *srcnidp = dstnid;
5315                         if (orderp != NULL) {
5316                                 if (dstnid == LNET_NID_LO_0)
5317                                         *orderp = 0;
5318                                 else
5319                                         *orderp = 1;
5320                         }
5321                         lnet_net_unlock(cpt);
5322
5323                         return local_nid_dist_zero ? 0 : 1;
5324                 }
5325
5326                 if (!matched_dstnet && LNET_NID_NET(&ni->ni_nid) == dstnet) {
5327                         matched_dstnet = true;
5328                         /* We matched the destination net, but we may have
5329                          * additional local NIs to inspect.
5330                          *
5331                          * We record the nid and order as appropriate, but
5332                          * they may be overwritten if we match local NI above.
5333                          */
5334                         if (srcnidp)
5335                                 /* FIXME support large-addr nids */
5336                                 *srcnidp = lnet_nid_to_nid4(&ni->ni_nid);
5337
5338                         if (orderp) {
5339                                 /* Check if ni was originally created in
5340                                  * current net namespace.
5341                                  * If not, assign order above 0xffff0000,
5342                                  * to make this ni not a priority.
5343                                  */
5344                                 if (current->nsproxy &&
5345                                     !net_eq(ni->ni_net_ns,
5346                                             current->nsproxy->net_ns))
5347                                         *orderp = order + 0xffff0000;
5348                                 else
5349                                         *orderp = order;
5350                         }
5351                 }
5352
5353                 order++;
5354         }
5355
5356         if (matched_dstnet) {
5357                 lnet_net_unlock(cpt);
5358                 return 1;
5359         }
5360
5361         rn_list = lnet_net2rnethash(dstnet);
5362         list_for_each(e, rn_list) {
5363                 rnet = list_entry(e, struct lnet_remotenet, lrn_list);
5364
5365                 if (rnet->lrn_net == dstnet) {
5366                         struct lnet_route *route;
5367                         struct lnet_route *shortest = NULL;
5368                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5369                         __u32 route_hops;
5370
5371                         LASSERT(!list_empty(&rnet->lrn_routes));
5372
5373                         list_for_each_entry(route, &rnet->lrn_routes,
5374                                             lr_list) {
5375                                 route_hops = route->lr_hops;
5376                                 if (route_hops == LNET_UNDEFINED_HOPS)
5377                                         route_hops = 1;
5378                                 if (shortest == NULL ||
5379                                     route_hops < shortest_hops) {
5380                                         shortest = route;
5381                                         shortest_hops = route_hops;
5382                                 }
5383                         }
5384
5385                         LASSERT(shortest != NULL);
5386                         hops = shortest_hops;
5387                         if (srcnidp != NULL) {
5388                                 struct lnet_net *net;
5389                                 net = lnet_get_net_locked(shortest->lr_lnet);
5390                                 LASSERT(net);
5391                                 ni = lnet_get_next_ni_locked(net, NULL);
5392                                 /* FIXME support large-addr nids */
5393                                 *srcnidp = lnet_nid_to_nid4(&ni->ni_nid);
5394                         }
5395                         if (orderp != NULL)
5396                                 *orderp = order;
5397                         lnet_net_unlock(cpt);
5398                         return hops + 1;
5399                 }
5400                 order++;
5401         }
5402
5403         lnet_net_unlock(cpt);
5404         return -EHOSTUNREACH;
5405 }
5406 EXPORT_SYMBOL(LNetDist);