Whamcloud - gitweb
LU-10391 lnet: change lnet_hdr to store large nids.
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/lib-move.c
32  *
33  * Data movement routines
34  */
35
36 #define DEBUG_SUBSYSTEM S_LNET
37
38 #include <linux/pagemap.h>
39
40 #include <lnet/lib-lnet.h>
41 #include <linux/nsproxy.h>
42 #include <lnet/lnet_rdma.h>
43 #include <net/net_namespace.h>
44
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48
49 struct lnet_send_data {
50         struct lnet_ni *sd_best_ni;
51         struct lnet_peer_ni *sd_best_lpni;
52         struct lnet_peer_ni *sd_final_dst_lpni;
53         struct lnet_peer *sd_peer;
54         struct lnet_peer *sd_gw_peer;
55         struct lnet_peer_ni *sd_gw_lpni;
56         struct lnet_peer_net *sd_peer_net;
57         struct lnet_msg *sd_msg;
58         struct lnet_nid sd_dst_nid;
59         struct lnet_nid sd_src_nid;
60         struct lnet_nid sd_rtr_nid;
61         int sd_cpt;
62         int sd_md_cpt;
63         __u32 sd_send_case;
64 };
65
66 static inline bool
67 lnet_msg_is_response(struct lnet_msg *msg)
68 {
69         return msg->msg_type == LNET_MSG_ACK || msg->msg_type == LNET_MSG_REPLY;
70 }
71
72 static inline bool
73 lnet_response_tracking_enabled(__u32 msg_type, unsigned int md_options)
74 {
75         if (md_options & LNET_MD_NO_TRACK_RESPONSE)
76                 /* Explicitly disabled in MD options */
77                 return false;
78
79         if (md_options & LNET_MD_TRACK_RESPONSE)
80                 /* Explicity enabled in MD options */
81                 return true;
82
83         if (lnet_response_tracking == 3)
84                 /* Enabled for all message types */
85                 return true;
86
87         if (msg_type == LNET_MSG_PUT)
88                 return lnet_response_tracking == 2;
89
90         if (msg_type == LNET_MSG_GET)
91                 return lnet_response_tracking == 1;
92
93         return false;
94 }
95
96 static inline struct lnet_comm_count *
97 get_stats_counts(struct lnet_element_stats *stats,
98                  enum lnet_stats_type stats_type)
99 {
100         switch (stats_type) {
101         case LNET_STATS_TYPE_SEND:
102                 return &stats->el_send_stats;
103         case LNET_STATS_TYPE_RECV:
104                 return &stats->el_recv_stats;
105         case LNET_STATS_TYPE_DROP:
106                 return &stats->el_drop_stats;
107         default:
108                 CERROR("Unknown stats type\n");
109         }
110
111         return NULL;
112 }
113
114 void lnet_incr_stats(struct lnet_element_stats *stats,
115                      enum lnet_msg_type msg_type,
116                      enum lnet_stats_type stats_type)
117 {
118         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
119         if (!counts)
120                 return;
121
122         switch (msg_type) {
123         case LNET_MSG_ACK:
124                 atomic_inc(&counts->co_ack_count);
125                 break;
126         case LNET_MSG_PUT:
127                 atomic_inc(&counts->co_put_count);
128                 break;
129         case LNET_MSG_GET:
130                 atomic_inc(&counts->co_get_count);
131                 break;
132         case LNET_MSG_REPLY:
133                 atomic_inc(&counts->co_reply_count);
134                 break;
135         case LNET_MSG_HELLO:
136                 atomic_inc(&counts->co_hello_count);
137                 break;
138         default:
139                 CERROR("There is a BUG in the code. Unknown message type\n");
140                 break;
141         }
142 }
143
144 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
145                      enum lnet_stats_type stats_type)
146 {
147         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
148         if (!counts)
149                 return 0;
150
151         return (atomic_read(&counts->co_ack_count) +
152                 atomic_read(&counts->co_put_count) +
153                 atomic_read(&counts->co_get_count) +
154                 atomic_read(&counts->co_reply_count) +
155                 atomic_read(&counts->co_hello_count));
156 }
157
158 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
159                                 struct lnet_comm_count *counts)
160 {
161         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
162         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
163         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
164         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
165         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
166 }
167
168 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
169                               struct lnet_element_stats *stats)
170 {
171         struct lnet_comm_count *counts;
172
173         LASSERT(msg_stats);
174         LASSERT(stats);
175
176         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
177         if (!counts)
178                 return;
179         assign_stats(&msg_stats->im_send_stats, counts);
180
181         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
182         if (!counts)
183                 return;
184         assign_stats(&msg_stats->im_recv_stats, counts);
185
186         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
187         if (!counts)
188                 return;
189         assign_stats(&msg_stats->im_drop_stats, counts);
190 }
191
192 int
193 lnet_fail_nid(lnet_nid_t nid4, unsigned int threshold)
194 {
195         struct lnet_test_peer *tp;
196         struct list_head *el;
197         struct list_head *next;
198         struct lnet_nid nid;
199         LIST_HEAD(cull);
200
201         lnet_nid4_to_nid(nid4, &nid);
202         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
203         if (threshold != 0) {
204                 /* Adding a new entry */
205                 LIBCFS_ALLOC(tp, sizeof(*tp));
206                 if (tp == NULL)
207                         return -ENOMEM;
208
209                 tp->tp_nid = nid;
210                 tp->tp_threshold = threshold;
211
212                 lnet_net_lock(0);
213                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
214                 lnet_net_unlock(0);
215                 return 0;
216         }
217
218         lnet_net_lock(0);
219
220         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
221                 tp = list_entry(el, struct lnet_test_peer, tp_list);
222
223                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
224                     LNET_NID_IS_ANY(&nid) ||    /* removing all entries */
225                     nid_same(&tp->tp_nid, &nid)) {      /* matched this one */
226                         list_move(&tp->tp_list, &cull);
227                 }
228         }
229
230         lnet_net_unlock(0);
231
232         while (!list_empty(&cull)) {
233                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
234
235                 list_del(&tp->tp_list);
236                 LIBCFS_FREE(tp, sizeof(*tp));
237         }
238         return 0;
239 }
240
241 static int
242 fail_peer(lnet_nid_t nid4, int outgoing)
243 {
244         struct lnet_test_peer *tp;
245         struct list_head *el;
246         struct list_head *next;
247         struct lnet_nid nid;
248         LIST_HEAD(cull);
249         int fail = 0;
250
251         lnet_nid4_to_nid(nid4, &nid);
252         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
253         lnet_net_lock(0);
254
255         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
256                 tp = list_entry(el, struct lnet_test_peer, tp_list);
257
258                 if (tp->tp_threshold == 0) {
259                         /* zombie entry */
260                         if (outgoing) {
261                                 /* only cull zombies on outgoing tests,
262                                  * since we may be at interrupt priority on
263                                  * incoming messages. */
264                                 list_move(&tp->tp_list, &cull);
265                         }
266                         continue;
267                 }
268
269                 if (LNET_NID_IS_ANY(&tp->tp_nid) ||     /* fail every peer */
270                     nid_same(&nid, &tp->tp_nid)) {      /* fail this peer */
271                         fail = 1;
272
273                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
274                                 tp->tp_threshold--;
275                                 if (outgoing &&
276                                     tp->tp_threshold == 0) {
277                                         /* see above */
278                                         list_move(&tp->tp_list, &cull);
279                                 }
280                         }
281                         break;
282                 }
283         }
284
285         lnet_net_unlock(0);
286
287         while (!list_empty(&cull)) {
288                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
289                 list_del(&tp->tp_list);
290
291                 LIBCFS_FREE(tp, sizeof(*tp));
292         }
293
294         return fail;
295 }
296
297 unsigned int
298 lnet_iov_nob(unsigned int niov, struct kvec *iov)
299 {
300         unsigned int nob = 0;
301
302         LASSERT(niov == 0 || iov != NULL);
303         while (niov-- > 0)
304                 nob += (iov++)->iov_len;
305
306         return (nob);
307 }
308 EXPORT_SYMBOL(lnet_iov_nob);
309
310 void
311 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
312                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
313                   unsigned int nob)
314 {
315         /* NB diov, siov are READ-ONLY */
316         unsigned int this_nob;
317
318         if (nob == 0)
319                 return;
320
321         /* skip complete frags before 'doffset' */
322         LASSERT(ndiov > 0);
323         while (doffset >= diov->iov_len) {
324                 doffset -= diov->iov_len;
325                 diov++;
326                 ndiov--;
327                 LASSERT(ndiov > 0);
328         }
329
330         /* skip complete frags before 'soffset' */
331         LASSERT(nsiov > 0);
332         while (soffset >= siov->iov_len) {
333                 soffset -= siov->iov_len;
334                 siov++;
335                 nsiov--;
336                 LASSERT(nsiov > 0);
337         }
338
339         do {
340                 LASSERT(ndiov > 0);
341                 LASSERT(nsiov > 0);
342                 this_nob = min3((unsigned int)diov->iov_len - doffset,
343                                 (unsigned int)siov->iov_len - soffset,
344                                 nob);
345
346                 memcpy((char *)diov->iov_base + doffset,
347                        (char *)siov->iov_base + soffset, this_nob);
348                 nob -= this_nob;
349
350                 if (diov->iov_len > doffset + this_nob) {
351                         doffset += this_nob;
352                 } else {
353                         diov++;
354                         ndiov--;
355                         doffset = 0;
356                 }
357
358                 if (siov->iov_len > soffset + this_nob) {
359                         soffset += this_nob;
360                 } else {
361                         siov++;
362                         nsiov--;
363                         soffset = 0;
364                 }
365         } while (nob > 0);
366 }
367 EXPORT_SYMBOL(lnet_copy_iov2iov);
368
369 unsigned int
370 lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
371 {
372         unsigned int  nob = 0;
373
374         LASSERT(niov == 0 || kiov != NULL);
375         while (niov-- > 0)
376                 nob += (kiov++)->bv_len;
377
378         return (nob);
379 }
380 EXPORT_SYMBOL(lnet_kiov_nob);
381
382 void
383 lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov,
384                     unsigned int doffset,
385                     unsigned int nsiov, struct bio_vec *siov,
386                     unsigned int soffset,
387                     unsigned int nob)
388 {
389         /* NB diov, siov are READ-ONLY */
390         unsigned int    this_nob;
391         char           *daddr = NULL;
392         char           *saddr = NULL;
393
394         if (nob == 0)
395                 return;
396
397         LASSERT (!in_interrupt ());
398
399         LASSERT (ndiov > 0);
400         while (doffset >= diov->bv_len) {
401                 doffset -= diov->bv_len;
402                 diov++;
403                 ndiov--;
404                 LASSERT(ndiov > 0);
405         }
406
407         LASSERT(nsiov > 0);
408         while (soffset >= siov->bv_len) {
409                 soffset -= siov->bv_len;
410                 siov++;
411                 nsiov--;
412                 LASSERT(nsiov > 0);
413         }
414
415         do {
416                 LASSERT(ndiov > 0);
417                 LASSERT(nsiov > 0);
418                 this_nob = min3(diov->bv_len - doffset,
419                                 siov->bv_len - soffset,
420                                 nob);
421
422                 if (daddr == NULL)
423                         daddr = ((char *)kmap(diov->bv_page)) +
424                                 diov->bv_offset + doffset;
425                 if (saddr == NULL)
426                         saddr = ((char *)kmap(siov->bv_page)) +
427                                 siov->bv_offset + soffset;
428
429                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
430                  * However in practice at least one of the kiovs will be mapped
431                  * kernel pages and the map/unmap will be NOOPs */
432
433                 memcpy (daddr, saddr, this_nob);
434                 nob -= this_nob;
435
436                 if (diov->bv_len > doffset + this_nob) {
437                         daddr += this_nob;
438                         doffset += this_nob;
439                 } else {
440                         kunmap(diov->bv_page);
441                         daddr = NULL;
442                         diov++;
443                         ndiov--;
444                         doffset = 0;
445                 }
446
447                 if (siov->bv_len > soffset + this_nob) {
448                         saddr += this_nob;
449                         soffset += this_nob;
450                 } else {
451                         kunmap(siov->bv_page);
452                         saddr = NULL;
453                         siov++;
454                         nsiov--;
455                         soffset = 0;
456                 }
457         } while (nob > 0);
458
459         if (daddr != NULL)
460                 kunmap(diov->bv_page);
461         if (saddr != NULL)
462                 kunmap(siov->bv_page);
463 }
464 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
465
466 void
467 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
468                     unsigned int nkiov, struct bio_vec *kiov,
469                     unsigned int kiovoffset,
470                     unsigned int nob)
471 {
472         /* NB iov, kiov are READ-ONLY */
473         unsigned int    this_nob;
474         char           *addr = NULL;
475
476         if (nob == 0)
477                 return;
478
479         LASSERT (!in_interrupt ());
480
481         LASSERT (niov > 0);
482         while (iovoffset >= iov->iov_len) {
483                 iovoffset -= iov->iov_len;
484                 iov++;
485                 niov--;
486                 LASSERT(niov > 0);
487         }
488
489         LASSERT(nkiov > 0);
490         while (kiovoffset >= kiov->bv_len) {
491                 kiovoffset -= kiov->bv_len;
492                 kiov++;
493                 nkiov--;
494                 LASSERT(nkiov > 0);
495         }
496
497         do {
498                 LASSERT(niov > 0);
499                 LASSERT(nkiov > 0);
500                 this_nob = min3((unsigned int)iov->iov_len - iovoffset,
501                                 (unsigned int)kiov->bv_len - kiovoffset,
502                                 nob);
503
504                 if (addr == NULL)
505                         addr = ((char *)kmap(kiov->bv_page)) +
506                                 kiov->bv_offset + kiovoffset;
507
508                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
509                 nob -= this_nob;
510
511                 if (iov->iov_len > iovoffset + this_nob) {
512                         iovoffset += this_nob;
513                 } else {
514                         iov++;
515                         niov--;
516                         iovoffset = 0;
517                 }
518
519                 if (kiov->bv_len > kiovoffset + this_nob) {
520                         addr += this_nob;
521                         kiovoffset += this_nob;
522                 } else {
523                         kunmap(kiov->bv_page);
524                         addr = NULL;
525                         kiov++;
526                         nkiov--;
527                         kiovoffset = 0;
528                 }
529
530         } while (nob > 0);
531
532         if (addr != NULL)
533                 kunmap(kiov->bv_page);
534 }
535 EXPORT_SYMBOL(lnet_copy_kiov2iov);
536
537 void
538 lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
539                    unsigned int kiovoffset,
540                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
541                    unsigned int nob)
542 {
543         /* NB kiov, iov are READ-ONLY */
544         unsigned int    this_nob;
545         char           *addr = NULL;
546
547         if (nob == 0)
548                 return;
549
550         LASSERT (!in_interrupt ());
551
552         LASSERT (nkiov > 0);
553         while (kiovoffset >= kiov->bv_len) {
554                 kiovoffset -= kiov->bv_len;
555                 kiov++;
556                 nkiov--;
557                 LASSERT(nkiov > 0);
558         }
559
560         LASSERT(niov > 0);
561         while (iovoffset >= iov->iov_len) {
562                 iovoffset -= iov->iov_len;
563                 iov++;
564                 niov--;
565                 LASSERT(niov > 0);
566         }
567
568         do {
569                 LASSERT(nkiov > 0);
570                 LASSERT(niov > 0);
571                 this_nob = min3((unsigned int)kiov->bv_len - kiovoffset,
572                                 (unsigned int)iov->iov_len - iovoffset,
573                                 nob);
574
575                 if (addr == NULL)
576                         addr = ((char *)kmap(kiov->bv_page)) +
577                                 kiov->bv_offset + kiovoffset;
578
579                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
580                 nob -= this_nob;
581
582                 if (kiov->bv_len > kiovoffset + this_nob) {
583                         addr += this_nob;
584                         kiovoffset += this_nob;
585                 } else {
586                         kunmap(kiov->bv_page);
587                         addr = NULL;
588                         kiov++;
589                         nkiov--;
590                         kiovoffset = 0;
591                 }
592
593                 if (iov->iov_len > iovoffset + this_nob) {
594                         iovoffset += this_nob;
595                 } else {
596                         iov++;
597                         niov--;
598                         iovoffset = 0;
599                 }
600         } while (nob > 0);
601
602         if (addr != NULL)
603                 kunmap(kiov->bv_page);
604 }
605 EXPORT_SYMBOL(lnet_copy_iov2kiov);
606
607 int
608 lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
609                   int src_niov, struct bio_vec *src,
610                   unsigned int offset, unsigned int len)
611 {
612         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
613          * for exactly 'len' bytes, and return the number of entries.
614          * NB not destructive to 'src' */
615         unsigned int    frag_len;
616         unsigned int    niov;
617
618         if (len == 0)                           /* no data => */
619                 return (0);                     /* no frags */
620
621         LASSERT(src_niov > 0);
622         while (offset >= src->bv_len) {      /* skip initial frags */
623                 offset -= src->bv_len;
624                 src_niov--;
625                 src++;
626                 LASSERT(src_niov > 0);
627         }
628
629         niov = 1;
630         for (;;) {
631                 LASSERT(src_niov > 0);
632                 LASSERT((int)niov <= dst_niov);
633
634                 frag_len = src->bv_len - offset;
635                 dst->bv_page = src->bv_page;
636                 dst->bv_offset = src->bv_offset + offset;
637
638                 if (len <= frag_len) {
639                         dst->bv_len = len;
640                         LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
641                         return niov;
642                 }
643
644                 dst->bv_len = frag_len;
645                 LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
646
647                 len -= frag_len;
648                 dst++;
649                 src++;
650                 niov++;
651                 src_niov--;
652                 offset = 0;
653         }
654 }
655 EXPORT_SYMBOL(lnet_extract_kiov);
656
657 void
658 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
659              int delayed, unsigned int offset, unsigned int mlen,
660              unsigned int rlen)
661 {
662         unsigned int niov = 0;
663         struct kvec *iov = NULL;
664         struct bio_vec  *kiov = NULL;
665         int rc;
666
667         LASSERT (!in_interrupt ());
668         LASSERT (mlen == 0 || msg != NULL);
669
670         if (msg != NULL) {
671                 LASSERT(msg->msg_receiving);
672                 LASSERT(!msg->msg_sending);
673                 LASSERT(rlen == msg->msg_len);
674                 LASSERT(mlen <= msg->msg_len);
675                 LASSERT(msg->msg_offset == offset);
676                 LASSERT(msg->msg_wanted == mlen);
677
678                 msg->msg_receiving = 0;
679
680                 if (mlen != 0) {
681                         niov = msg->msg_niov;
682                         kiov = msg->msg_kiov;
683
684                         LASSERT (niov > 0);
685                         LASSERT ((iov == NULL) != (kiov == NULL));
686                 }
687         }
688
689         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
690                                              niov, kiov, offset, mlen,
691                                              rlen);
692         if (rc < 0)
693                 lnet_finalize(msg, rc);
694 }
695
696 static void
697 lnet_setpayloadbuffer(struct lnet_msg *msg)
698 {
699         struct lnet_libmd *md = msg->msg_md;
700
701         LASSERT(msg->msg_len > 0);
702         LASSERT(!msg->msg_routing);
703         LASSERT(md != NULL);
704         LASSERT(msg->msg_niov == 0);
705         LASSERT(msg->msg_kiov == NULL);
706
707         msg->msg_niov = md->md_niov;
708         msg->msg_kiov = md->md_kiov;
709 }
710
711 void
712 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target,
713                unsigned int offset, unsigned int len)
714 {
715         msg->msg_type = type;
716         msg->msg_target.pid = target.pid;
717         lnet_nid4_to_nid(target.nid, &msg->msg_target.nid);
718         msg->msg_len = len;
719         msg->msg_offset = offset;
720
721         if (len != 0)
722                 lnet_setpayloadbuffer(msg);
723
724         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
725         msg->msg_hdr.type           = type;
726         /* dest_nid will be overwritten by lnet_select_pathway() */
727         lnet_nid4_to_nid(target.nid, &msg->msg_hdr.dest_nid);
728         msg->msg_hdr.dest_pid       = target.pid;
729         /* src_nid will be set later */
730         msg->msg_hdr.src_pid        = the_lnet.ln_pid;
731         msg->msg_hdr.payload_length = len;
732 }
733
734 void
735 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
736 {
737         void *priv = msg->msg_private;
738         int rc;
739
740         LASSERT(!in_interrupt());
741         LASSERT(nid_is_lo0(&ni->ni_nid) ||
742                 (msg->msg_txcredit && msg->msg_peertxcredit));
743
744         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
745         if (rc < 0) {
746                 msg->msg_no_resend = true;
747                 lnet_finalize(msg, rc);
748         }
749 }
750
751 static int
752 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
753 {
754         int     rc;
755
756         LASSERT(!msg->msg_sending);
757         LASSERT(msg->msg_receiving);
758         LASSERT(!msg->msg_rx_ready_delay);
759         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
760
761         msg->msg_rx_ready_delay = 1;
762         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
763                                                   &msg->msg_private);
764         if (rc != 0) {
765                 CERROR("recv from %s / send to %s aborted: "
766                        "eager_recv failed %d\n",
767                        libcfs_nidstr(&msg->msg_rxpeer->lpni_nid),
768                        libcfs_idstr(&msg->msg_target), rc);
769                 LASSERT(rc < 0); /* required by my callers */
770         }
771
772         return rc;
773 }
774
775 static bool
776 lnet_is_peer_deadline_passed(struct lnet_peer_ni *lpni, time64_t now)
777 {
778         time64_t deadline;
779
780         deadline = lpni->lpni_last_alive +
781                    lpni->lpni_net->net_tunables.lct_peer_timeout;
782
783         /*
784          * assume peer_ni is alive as long as we're within the configured
785          * peer timeout
786          */
787         if (deadline > now)
788                 return false;
789
790         return true;
791 }
792
793 /* NB: returns 1 when alive, 0 when dead, negative when error;
794  *     may drop the lnet_net_lock */
795 static int
796 lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
797                        struct lnet_msg *msg)
798 {
799         time64_t now = ktime_get_seconds();
800
801         if (!lnet_peer_aliveness_enabled(lpni))
802                 return -ENODEV;
803
804         /*
805          * If we're resending a message, let's attempt to send it even if
806          * the peer is down to fulfill our resend quota on the message
807          */
808         if (msg->msg_retry_count > 0)
809                 return 1;
810
811         /* try and send recovery messages irregardless */
812         if (msg->msg_recovery)
813                 return 1;
814
815         /* always send any responses */
816         if (lnet_msg_is_response(msg))
817                 return 1;
818
819         if (!lnet_is_peer_deadline_passed(lpni, now))
820                 return true;
821
822         return lnet_is_peer_ni_alive(lpni);
823 }
824
825 /**
826  * \param msg The message to be sent.
827  * \param do_send True if lnet_ni_send() should be called in this function.
828  *        lnet_send() is going to lnet_net_unlock immediately after this, so
829  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
830  *
831  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
832  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
833  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
834  * \retval -ECANCELED If the MD of the message has been unlinked.
835  */
836 static int
837 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
838 {
839         struct lnet_peer_ni     *lp = msg->msg_txpeer;
840         struct lnet_ni          *ni = msg->msg_txni;
841         int                     cpt = msg->msg_tx_cpt;
842         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
843
844         /* non-lnet_send() callers have checked before */
845         LASSERT(!do_send || msg->msg_tx_delayed);
846         LASSERT(!msg->msg_receiving);
847         LASSERT(msg->msg_tx_committed);
848
849         /* can't get here if we're sending to the loopback interface */
850         if (the_lnet.ln_loni)
851                 LASSERT(!nid_same(&lp->lpni_nid, &the_lnet.ln_loni->ni_nid));
852
853         /* NB 'lp' is always the next hop */
854         if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
855             lnet_peer_alive_locked(ni, lp, msg) == 0) {
856                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
857                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
858                         msg->msg_len;
859                 lnet_net_unlock(cpt);
860                 if (msg->msg_txpeer)
861                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
862                                         msg->msg_type,
863                                         LNET_STATS_TYPE_DROP);
864                 if (msg->msg_txni)
865                         lnet_incr_stats(&msg->msg_txni->ni_stats,
866                                         msg->msg_type,
867                                         LNET_STATS_TYPE_DROP);
868
869                 CNETERR("Dropping message for %s: peer not alive\n",
870                         libcfs_idstr(&msg->msg_target));
871                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
872                 if (do_send)
873                         lnet_finalize(msg, -EHOSTUNREACH);
874
875                 lnet_net_lock(cpt);
876                 return -EHOSTUNREACH;
877         }
878
879         if (msg->msg_md != NULL &&
880             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
881                 lnet_net_unlock(cpt);
882
883                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
884                         "called on the MD/ME.\n",
885                         libcfs_idstr(&msg->msg_target));
886                 if (do_send) {
887                         msg->msg_no_resend = true;
888                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
889                                msg, libcfs_idstr(&msg->msg_target));
890                         lnet_finalize(msg, -ECANCELED);
891                 }
892
893                 lnet_net_lock(cpt);
894                 return -ECANCELED;
895         }
896
897         if (!msg->msg_peertxcredit) {
898                 spin_lock(&lp->lpni_lock);
899                 LASSERT((lp->lpni_txcredits < 0) ==
900                         !list_empty(&lp->lpni_txq));
901
902                 msg->msg_peertxcredit = 1;
903                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr_nid4);
904                 lp->lpni_txcredits--;
905
906                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
907                         lp->lpni_mintxcredits = lp->lpni_txcredits;
908
909                 if (lp->lpni_txcredits < 0) {
910                         msg->msg_tx_delayed = 1;
911                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
912                         spin_unlock(&lp->lpni_lock);
913                         return LNET_CREDIT_WAIT;
914                 }
915                 spin_unlock(&lp->lpni_lock);
916         }
917
918         if (!msg->msg_txcredit) {
919                 LASSERT((tq->tq_credits < 0) ==
920                         !list_empty(&tq->tq_delayed));
921
922                 msg->msg_txcredit = 1;
923                 tq->tq_credits--;
924                 atomic_dec(&ni->ni_tx_credits);
925
926                 if (tq->tq_credits < tq->tq_credits_min)
927                         tq->tq_credits_min = tq->tq_credits;
928
929                 if (tq->tq_credits < 0) {
930                         msg->msg_tx_delayed = 1;
931                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
932                         return LNET_CREDIT_WAIT;
933                 }
934         }
935
936         if (unlikely(!list_empty(&the_lnet.ln_delay_rules)) &&
937             lnet_delay_rule_match_locked(&msg->msg_hdr, msg)) {
938                 msg->msg_tx_delayed = 1;
939                 return LNET_CREDIT_WAIT;
940         }
941
942         /* unset the tx_delay flag as we're going to send it now */
943         msg->msg_tx_delayed = 0;
944
945         if (do_send) {
946                 lnet_net_unlock(cpt);
947                 lnet_ni_send(ni, msg);
948                 lnet_net_lock(cpt);
949         }
950         return LNET_CREDIT_OK;
951 }
952
953
954 static struct lnet_rtrbufpool *
955 lnet_msg2bufpool(struct lnet_msg *msg)
956 {
957         struct lnet_rtrbufpool  *rbp;
958         int                     cpt;
959
960         LASSERT(msg->msg_rx_committed);
961
962         cpt = msg->msg_rx_cpt;
963         rbp = &the_lnet.ln_rtrpools[cpt][0];
964
965         LASSERT(msg->msg_len <= LNET_MTU);
966         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
967                 rbp++;
968                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
969         }
970
971         return rbp;
972 }
973
974 static int
975 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
976 {
977         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
978          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
979          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
980          * received or OK to receive */
981         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
982         struct lnet_peer *lp;
983         struct lnet_rtrbufpool *rbp;
984         struct lnet_rtrbuf *rb;
985
986         LASSERT(msg->msg_kiov == NULL);
987         LASSERT(msg->msg_niov == 0);
988         LASSERT(msg->msg_routing);
989         LASSERT(msg->msg_receiving);
990         LASSERT(!msg->msg_sending);
991         LASSERT(lpni->lpni_peer_net);
992         LASSERT(lpni->lpni_peer_net->lpn_peer);
993
994         lp = lpni->lpni_peer_net->lpn_peer;
995
996         /* non-lnet_parse callers only receive delayed messages */
997         LASSERT(!do_recv || msg->msg_rx_delayed);
998
999         if (!msg->msg_peerrtrcredit) {
1000                 /* lpni_lock protects the credit manipulation */
1001                 spin_lock(&lpni->lpni_lock);
1002
1003                 msg->msg_peerrtrcredit = 1;
1004                 lpni->lpni_rtrcredits--;
1005                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
1006                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
1007
1008                 if (lpni->lpni_rtrcredits < 0) {
1009                         spin_unlock(&lpni->lpni_lock);
1010                         /* must have checked eager_recv before here */
1011                         LASSERT(msg->msg_rx_ready_delay);
1012                         msg->msg_rx_delayed = 1;
1013                         /* lp_lock protects the lp_rtrq */
1014                         spin_lock(&lp->lp_lock);
1015                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1016                         spin_unlock(&lp->lp_lock);
1017                         return LNET_CREDIT_WAIT;
1018                 }
1019                 spin_unlock(&lpni->lpni_lock);
1020         }
1021
1022         rbp = lnet_msg2bufpool(msg);
1023
1024         if (!msg->msg_rtrcredit) {
1025                 msg->msg_rtrcredit = 1;
1026                 rbp->rbp_credits--;
1027                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1028                         rbp->rbp_mincredits = rbp->rbp_credits;
1029
1030                 if (rbp->rbp_credits < 0) {
1031                         /* must have checked eager_recv before here */
1032                         LASSERT(msg->msg_rx_ready_delay);
1033                         msg->msg_rx_delayed = 1;
1034                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1035                         return LNET_CREDIT_WAIT;
1036                 }
1037         }
1038
1039         LASSERT(!list_empty(&rbp->rbp_bufs));
1040         rb = list_entry(rbp->rbp_bufs.next, struct lnet_rtrbuf, rb_list);
1041         list_del(&rb->rb_list);
1042
1043         msg->msg_niov = rbp->rbp_npages;
1044         msg->msg_kiov = &rb->rb_kiov[0];
1045
1046         /* unset the msg-rx_delayed flag since we're receiving the message */
1047         msg->msg_rx_delayed = 0;
1048
1049         if (do_recv) {
1050                 int cpt = msg->msg_rx_cpt;
1051
1052                 lnet_net_unlock(cpt);
1053                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1054                              0, msg->msg_len, msg->msg_len);
1055                 lnet_net_lock(cpt);
1056         }
1057         return LNET_CREDIT_OK;
1058 }
1059
1060 void
1061 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1062 {
1063         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1064         struct lnet_ni          *txni = msg->msg_txni;
1065         struct lnet_msg         *msg2;
1066
1067         if (msg->msg_txcredit) {
1068                 struct lnet_ni       *ni = msg->msg_txni;
1069                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1070
1071                 /* give back NI txcredits */
1072                 msg->msg_txcredit = 0;
1073
1074                 LASSERT((tq->tq_credits < 0) ==
1075                         !list_empty(&tq->tq_delayed));
1076
1077                 tq->tq_credits++;
1078                 atomic_inc(&ni->ni_tx_credits);
1079                 if (tq->tq_credits <= 0) {
1080                         msg2 = list_entry(tq->tq_delayed.next,
1081                                           struct lnet_msg, msg_list);
1082                         list_del(&msg2->msg_list);
1083
1084                         LASSERT(msg2->msg_txni == ni);
1085                         LASSERT(msg2->msg_tx_delayed);
1086                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1087
1088                         (void) lnet_post_send_locked(msg2, 1);
1089                 }
1090         }
1091
1092         if (msg->msg_peertxcredit) {
1093                 /* give back peer txcredits */
1094                 msg->msg_peertxcredit = 0;
1095
1096                 spin_lock(&txpeer->lpni_lock);
1097                 LASSERT((txpeer->lpni_txcredits < 0) ==
1098                         !list_empty(&txpeer->lpni_txq));
1099
1100                 txpeer->lpni_txqnob -=  msg->msg_len +
1101                                         sizeof(struct lnet_hdr_nid4);
1102                 LASSERT(txpeer->lpni_txqnob >= 0);
1103
1104                 txpeer->lpni_txcredits++;
1105                 if (txpeer->lpni_txcredits <= 0) {
1106                         int msg2_cpt;
1107
1108                         msg2 = list_entry(txpeer->lpni_txq.next,
1109                                               struct lnet_msg, msg_list);
1110                         list_del(&msg2->msg_list);
1111                         spin_unlock(&txpeer->lpni_lock);
1112
1113                         LASSERT(msg2->msg_txpeer == txpeer);
1114                         LASSERT(msg2->msg_tx_delayed);
1115
1116                         msg2_cpt = msg2->msg_tx_cpt;
1117
1118                         /*
1119                          * The msg_cpt can be different from the msg2_cpt
1120                          * so we need to make sure we lock the correct cpt
1121                          * for msg2.
1122                          * Once we call lnet_post_send_locked() it is no
1123                          * longer safe to access msg2, since it could've
1124                          * been freed by lnet_finalize(), but we still
1125                          * need to relock the correct cpt, so we cache the
1126                          * msg2_cpt for the purpose of the check that
1127                          * follows the call to lnet_pose_send_locked().
1128                          */
1129                         if (msg2_cpt != msg->msg_tx_cpt) {
1130                                 lnet_net_unlock(msg->msg_tx_cpt);
1131                                 lnet_net_lock(msg2_cpt);
1132                         }
1133                         (void) lnet_post_send_locked(msg2, 1);
1134                         if (msg2_cpt != msg->msg_tx_cpt) {
1135                                 lnet_net_unlock(msg2_cpt);
1136                                 lnet_net_lock(msg->msg_tx_cpt);
1137                         }
1138                 } else {
1139                         spin_unlock(&txpeer->lpni_lock);
1140                 }
1141         }
1142
1143         if (txni != NULL) {
1144                 msg->msg_txni = NULL;
1145                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1146         }
1147
1148         if (txpeer != NULL) {
1149                 msg->msg_txpeer = NULL;
1150                 lnet_peer_ni_decref_locked(txpeer);
1151         }
1152 }
1153
1154 void
1155 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1156 {
1157         struct lnet_msg *msg;
1158
1159         if (list_empty(&rbp->rbp_msgs))
1160                 return;
1161         msg = list_entry(rbp->rbp_msgs.next,
1162                          struct lnet_msg, msg_list);
1163         list_del(&msg->msg_list);
1164
1165         (void)lnet_post_routed_recv_locked(msg, 1);
1166 }
1167
1168 void
1169 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1170 {
1171         struct lnet_msg *msg;
1172         struct lnet_msg *tmp;
1173
1174         lnet_net_unlock(cpt);
1175
1176         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1177                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1178                              0, 0, 0, msg->msg_hdr.payload_length);
1179                 list_del_init(&msg->msg_list);
1180                 msg->msg_no_resend = true;
1181                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1182                 lnet_finalize(msg, -ECANCELED);
1183         }
1184
1185         lnet_net_lock(cpt);
1186 }
1187
1188 void
1189 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1190 {
1191         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1192         struct lnet_peer *lp;
1193         struct lnet_ni *rxni = msg->msg_rxni;
1194         struct lnet_msg *msg2;
1195
1196         if (msg->msg_rtrcredit) {
1197                 /* give back global router credits */
1198                 struct lnet_rtrbuf *rb;
1199                 struct lnet_rtrbufpool *rbp;
1200
1201                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1202                  * there until it gets one allocated, or aborts the wait
1203                  * itself */
1204                 LASSERT(msg->msg_kiov != NULL);
1205
1206                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1207                 rbp = rb->rb_pool;
1208
1209                 msg->msg_kiov = NULL;
1210                 msg->msg_rtrcredit = 0;
1211
1212                 LASSERT(rbp == lnet_msg2bufpool(msg));
1213
1214                 LASSERT((rbp->rbp_credits > 0) ==
1215                         !list_empty(&rbp->rbp_bufs));
1216
1217                 /* If routing is now turned off, we just drop this buffer and
1218                  * don't bother trying to return credits.  */
1219                 if (!the_lnet.ln_routing) {
1220                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1221                         goto routing_off;
1222                 }
1223
1224                 /* It is possible that a user has lowered the desired number of
1225                  * buffers in this pool.  Make sure we never put back
1226                  * more buffers than the stated number. */
1227                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1228                         /* Discard this buffer so we don't have too
1229                          * many. */
1230                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1231                         rbp->rbp_nbuffers--;
1232                 } else {
1233                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1234                         rbp->rbp_credits++;
1235                         if (rbp->rbp_credits <= 0)
1236                                 lnet_schedule_blocked_locked(rbp);
1237                 }
1238         }
1239
1240 routing_off:
1241         if (msg->msg_peerrtrcredit) {
1242                 LASSERT(rxpeerni);
1243                 LASSERT(rxpeerni->lpni_peer_net);
1244                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1245
1246                 /* give back peer router credits */
1247                 msg->msg_peerrtrcredit = 0;
1248
1249                 spin_lock(&rxpeerni->lpni_lock);
1250                 rxpeerni->lpni_rtrcredits++;
1251                 spin_unlock(&rxpeerni->lpni_lock);
1252
1253                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1254                 spin_lock(&lp->lp_lock);
1255
1256                 /* drop all messages which are queued to be routed on that
1257                  * peer. */
1258                 if (!the_lnet.ln_routing) {
1259                         LIST_HEAD(drop);
1260                         list_splice_init(&lp->lp_rtrq, &drop);
1261                         spin_unlock(&lp->lp_lock);
1262                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1263                 } else if (!list_empty(&lp->lp_rtrq)) {
1264                         int msg2_cpt;
1265
1266                         msg2 = list_entry(lp->lp_rtrq.next,
1267                                           struct lnet_msg, msg_list);
1268                         list_del(&msg2->msg_list);
1269                         msg2_cpt = msg2->msg_rx_cpt;
1270                         spin_unlock(&lp->lp_lock);
1271                         /*
1272                          * messages on the lp_rtrq can be from any NID in
1273                          * the peer, which means they might have different
1274                          * cpts. We need to make sure we lock the right
1275                          * one.
1276                          */
1277                         if (msg2_cpt != msg->msg_rx_cpt) {
1278                                 lnet_net_unlock(msg->msg_rx_cpt);
1279                                 lnet_net_lock(msg2_cpt);
1280                         }
1281                         (void) lnet_post_routed_recv_locked(msg2, 1);
1282                         if (msg2_cpt != msg->msg_rx_cpt) {
1283                                 lnet_net_unlock(msg2_cpt);
1284                                 lnet_net_lock(msg->msg_rx_cpt);
1285                         }
1286                 } else {
1287                         spin_unlock(&lp->lp_lock);
1288                 }
1289         }
1290         if (rxni != NULL) {
1291                 msg->msg_rxni = NULL;
1292                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1293         }
1294         if (rxpeerni != NULL) {
1295                 msg->msg_rxpeer = NULL;
1296                 lnet_peer_ni_decref_locked(rxpeerni);
1297         }
1298 }
1299
1300 static struct lnet_peer_ni *
1301 lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid,
1302                     struct lnet_peer *peer,
1303                     struct lnet_peer_ni *best_lpni,
1304                     struct lnet_peer_net *peer_net)
1305 {
1306         /*
1307          * Look at the peer NIs for the destination peer that connect
1308          * to the chosen net. If a peer_ni is preferred when using the
1309          * best_ni to communicate, we use that one. If there is no
1310          * preferred peer_ni, or there are multiple preferred peer_ni,
1311          * the available transmit credits are used. If the transmit
1312          * credits are equal, we round-robin over the peer_ni.
1313          */
1314         struct lnet_peer_ni *lpni = NULL;
1315         int best_lpni_credits = (best_lpni) ? best_lpni->lpni_txcredits :
1316                 INT_MIN;
1317         int best_lpni_healthv = (best_lpni) ?
1318                 atomic_read(&best_lpni->lpni_healthv) : 0;
1319         bool best_lpni_is_preferred = false;
1320         bool lpni_is_preferred;
1321         int lpni_healthv;
1322         __u32 lpni_sel_prio;
1323         __u32 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1324
1325         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1326                 /*
1327                  * if the best_ni we've chosen aleady has this lpni
1328                  * preferred, then let's use it
1329                  */
1330                 if (best_ni) {
1331                         lpni_is_preferred = lnet_peer_is_pref_nid_locked(
1332                                 lpni, &best_ni->ni_nid);
1333                         CDEBUG(D_NET, "%s lpni_is_preferred = %d\n",
1334                                libcfs_nidstr(&best_ni->ni_nid),
1335                                lpni_is_preferred);
1336                 } else {
1337                         lpni_is_preferred = false;
1338                 }
1339
1340                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1341                 lpni_sel_prio = lpni->lpni_sel_priority;
1342
1343                 if (best_lpni)
1344                         CDEBUG(D_NET, "n:[%s, %s] h:[%d, %d] p:[%d, %d] c:[%d, %d] s:[%d, %d]\n",
1345                                 libcfs_nidstr(&lpni->lpni_nid),
1346                                 libcfs_nidstr(&best_lpni->lpni_nid),
1347                                 lpni_healthv, best_lpni_healthv,
1348                                 lpni_sel_prio, best_sel_prio,
1349                                 lpni->lpni_txcredits, best_lpni_credits,
1350                                 lpni->lpni_seq, best_lpni->lpni_seq);
1351                 else
1352                         goto select_lpni;
1353
1354                 /* pick the healthiest peer ni */
1355                 if (lpni_healthv < best_lpni_healthv)
1356                         continue;
1357                 else if (lpni_healthv > best_lpni_healthv) {
1358                         if (best_lpni_is_preferred)
1359                                 best_lpni_is_preferred = false;
1360                         goto select_lpni;
1361                 }
1362
1363                 if (lpni_sel_prio > best_sel_prio)
1364                         continue;
1365                 else if (lpni_sel_prio < best_sel_prio) {
1366                         if (best_lpni_is_preferred)
1367                                 best_lpni_is_preferred = false;
1368                         goto select_lpni;
1369                 }
1370
1371                 /* if this is a preferred peer use it */
1372                 if (!best_lpni_is_preferred && lpni_is_preferred) {
1373                         best_lpni_is_preferred = true;
1374                         goto select_lpni;
1375                 } else if (best_lpni_is_preferred && !lpni_is_preferred) {
1376                         /* this is not the preferred peer so let's ignore
1377                          * it.
1378                          */
1379                         continue;
1380                 }
1381
1382                 if (lpni->lpni_txcredits < best_lpni_credits)
1383                         /* We already have a peer that has more credits
1384                          * available than this one. No need to consider
1385                          * this peer further.
1386                          */
1387                         continue;
1388                 else if (lpni->lpni_txcredits > best_lpni_credits)
1389                         goto select_lpni;
1390
1391                 /* The best peer found so far and the current peer
1392                  * have the same number of available credits let's
1393                  * make sure to select between them using Round Robin
1394                  */
1395                 if (best_lpni && (best_lpni->lpni_seq <= lpni->lpni_seq))
1396                         continue;
1397 select_lpni:
1398                 best_lpni_is_preferred = lpni_is_preferred;
1399                 best_lpni_healthv = lpni_healthv;
1400                 best_sel_prio = lpni_sel_prio;
1401                 best_lpni = lpni;
1402                 best_lpni_credits = lpni->lpni_txcredits;
1403         }
1404
1405         /* if we still can't find a peer ni then we can't reach it */
1406         if (!best_lpni) {
1407                 __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1408                         LNET_NIDNET(dst_nid);
1409                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1410                                 libcfs_net2str(net_id));
1411                 return NULL;
1412         }
1413
1414         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1415                libcfs_nidstr(&best_lpni->lpni_nid));
1416
1417         return best_lpni;
1418 }
1419
1420 /*
1421  * Prerequisite: the best_ni should already be set in the sd
1422  * Find the best lpni.
1423  * If the net id is provided then restrict lpni selection on
1424  * that particular net.
1425  * Otherwise find any reachable lpni. When dealing with an MR
1426  * gateway and it has multiple lpnis which we can use
1427  * we want to select the best one from the list of reachable
1428  * ones.
1429  */
1430 static inline struct lnet_peer_ni *
1431 lnet_find_best_lpni(struct lnet_ni *lni, lnet_nid_t dst_nid,
1432                     struct lnet_peer *peer, __u32 net_id)
1433 {
1434         struct lnet_peer_net *peer_net;
1435
1436         /* find the best_lpni on any local network */
1437         if (net_id == LNET_NET_ANY) {
1438                 struct lnet_peer_ni *best_lpni = NULL;
1439                 struct lnet_peer_net *lpn;
1440                 list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
1441                         /* no net specified find any reachable peer ni */
1442                         if (!lnet_islocalnet_locked(lpn->lpn_net_id))
1443                                 continue;
1444                         best_lpni = lnet_select_peer_ni(lni, dst_nid, peer,
1445                                                         best_lpni, lpn);
1446                 }
1447
1448                 return best_lpni;
1449         }
1450         /* restrict on the specified net */
1451         peer_net = lnet_peer_get_net_locked(peer, net_id);
1452         if (peer_net)
1453                 return lnet_select_peer_ni(lni, dst_nid, peer, NULL, peer_net);
1454
1455         return NULL;
1456 }
1457
1458 static int
1459 lnet_compare_gw_lpnis(struct lnet_peer_ni *lpni1, struct lnet_peer_ni *lpni2)
1460 {
1461         if (lpni1->lpni_txqnob < lpni2->lpni_txqnob)
1462                 return 1;
1463
1464         if (lpni1->lpni_txqnob > lpni2->lpni_txqnob)
1465                 return -1;
1466
1467         if (lpni1->lpni_txcredits > lpni2->lpni_txcredits)
1468                 return 1;
1469
1470         if (lpni1->lpni_txcredits < lpni2->lpni_txcredits)
1471                 return -1;
1472
1473         return 0;
1474 }
1475
1476 /* Compare route priorities and hop counts */
1477 static int
1478 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
1479 {
1480         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1481         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1482
1483         if (r1->lr_priority < r2->lr_priority)
1484                 return 1;
1485
1486         if (r1->lr_priority > r2->lr_priority)
1487                 return -1;
1488
1489         if (r1_hops < r2_hops)
1490                 return 1;
1491
1492         if (r1_hops > r2_hops)
1493                 return -1;
1494
1495         return 0;
1496 }
1497
1498 static struct lnet_route *
1499 lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net,
1500                        struct lnet_peer_ni *remote_lpni,
1501                        struct lnet_route **prev_route,
1502                        struct lnet_peer_ni **gwni)
1503 {
1504         struct lnet_peer_ni *lpni, *best_gw_ni = NULL;
1505         struct lnet_route *best_route;
1506         struct lnet_route *last_route;
1507         struct lnet_route *route;
1508         int rc;
1509         bool best_rte_is_preferred = false;
1510         struct lnet_nid *gw_pnid;
1511
1512         CDEBUG(D_NET, "Looking up a route to %s, from %s\n",
1513                libcfs_net2str(rnet->lrn_net), libcfs_net2str(src_net));
1514
1515         best_route = last_route = NULL;
1516         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1517                 if (!lnet_is_route_alive(route))
1518                         continue;
1519                 gw_pnid = &route->lr_gateway->lp_primary_nid;
1520
1521                 /* no protection on below fields, but it's harmless */
1522                 if (last_route && (last_route->lr_seq - route->lr_seq < 0))
1523                         last_route = route;
1524
1525                 /* if the best route found is in the preferred list then
1526                  * tag it as preferred and use it later on. But if we
1527                  * didn't find any routes which are on the preferred list
1528                  * then just use the best route possible.
1529                  */
1530                 rc = lnet_peer_is_pref_rtr_locked(remote_lpni, gw_pnid);
1531
1532                 if (!best_route || (rc && !best_rte_is_preferred)) {
1533                         /* Restrict the selection of the router NI on the
1534                          * src_net provided. If the src_net is LNET_NID_ANY,
1535                          * then select the best interface available.
1536                          */
1537                         lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1538                                                    route->lr_gateway,
1539                                                    src_net);
1540                         if (!lpni) {
1541                                 CDEBUG(D_NET,
1542                                        "Gateway %s does not have a peer NI on net %s\n",
1543                                        libcfs_nidstr(gw_pnid),
1544                                        libcfs_net2str(src_net));
1545                                 continue;
1546                         }
1547                 }
1548
1549                 if (rc && !best_rte_is_preferred) {
1550                         /* This is the first preferred route we found,
1551                          * so it beats any route found previously
1552                          */
1553                         best_route = route;
1554                         if (!last_route)
1555                                 last_route = route;
1556                         best_gw_ni = lpni;
1557                         best_rte_is_preferred = true;
1558                         CDEBUG(D_NET, "preferred gw = %s\n",
1559                                libcfs_nidstr(gw_pnid));
1560                         continue;
1561                 } else if ((!rc) && best_rte_is_preferred)
1562                         /* The best route we found so far is in the preferred
1563                          * list, so it beats any non-preferred route
1564                          */
1565                         continue;
1566
1567                 if (!best_route) {
1568                         best_route = last_route = route;
1569                         best_gw_ni = lpni;
1570                         continue;
1571                 }
1572
1573                 rc = lnet_compare_routes(route, best_route);
1574                 if (rc == -1)
1575                         continue;
1576
1577                 /* Restrict the selection of the router NI on the
1578                  * src_net provided. If the src_net is LNET_NID_ANY,
1579                  * then select the best interface available.
1580                  */
1581                 lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1582                                            route->lr_gateway,
1583                                            src_net);
1584                 if (!lpni) {
1585                         CDEBUG(D_NET,
1586                                "Gateway %s does not have a peer NI on net %s\n",
1587                                libcfs_nidstr(gw_pnid),
1588                                libcfs_net2str(src_net));
1589                         continue;
1590                 }
1591
1592                 if (rc == 1) {
1593                         best_route = route;
1594                         best_gw_ni = lpni;
1595                         continue;
1596                 }
1597
1598                 rc = lnet_compare_gw_lpnis(lpni, best_gw_ni);
1599                 if (rc == -1)
1600                         continue;
1601
1602                 if (rc == 1 || route->lr_seq <= best_route->lr_seq) {
1603                         best_route = route;
1604                         best_gw_ni = lpni;
1605                         continue;
1606                 }
1607         }
1608
1609         *prev_route = last_route;
1610         *gwni = best_gw_ni;
1611
1612         return best_route;
1613 }
1614
1615 static inline unsigned int
1616 lnet_dev_prio_of_md(struct lnet_ni *ni, unsigned int dev_idx)
1617 {
1618         if (dev_idx == UINT_MAX)
1619                 return UINT_MAX;
1620
1621         if (!ni || !ni->ni_net || !ni->ni_net->net_lnd ||
1622             !ni->ni_net->net_lnd->lnd_get_dev_prio)
1623                 return UINT_MAX;
1624
1625         return ni->ni_net->net_lnd->lnd_get_dev_prio(ni, dev_idx);
1626 }
1627
1628 static struct lnet_ni *
1629 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1630                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1631                  struct lnet_msg *msg, int md_cpt)
1632 {
1633         struct lnet_libmd *md = msg->msg_md;
1634         unsigned int offset = msg->msg_offset;
1635         unsigned int shortest_distance;
1636         struct lnet_ni *ni = NULL;
1637         int best_credits;
1638         int best_healthv;
1639         __u32 best_sel_prio;
1640         unsigned int best_dev_prio;
1641         unsigned int dev_idx = UINT_MAX;
1642         struct page *page = lnet_get_first_page(md, offset);
1643         msg->msg_rdma_force = lnet_is_rdma_only_page(page);
1644
1645         if (msg->msg_rdma_force)
1646                 dev_idx = lnet_get_dev_idx(page);
1647
1648         /*
1649          * If there is no peer_ni that we can send to on this network,
1650          * then there is no point in looking for a new best_ni here.
1651         */
1652         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1653                 return best_ni;
1654
1655         if (best_ni == NULL) {
1656                 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1657                 shortest_distance = UINT_MAX;
1658                 best_dev_prio = UINT_MAX;
1659                 best_credits = INT_MIN;
1660                 best_healthv = 0;
1661         } else {
1662                 best_dev_prio = lnet_dev_prio_of_md(best_ni, dev_idx);
1663                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1664                                                      best_ni->ni_dev_cpt);
1665                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1666                 best_healthv = atomic_read(&best_ni->ni_healthv);
1667                 best_sel_prio = best_ni->ni_sel_priority;
1668         }
1669
1670         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1671                 unsigned int distance;
1672                 int ni_credits;
1673                 int ni_healthv;
1674                 int ni_fatal;
1675                 __u32 ni_sel_prio;
1676                 unsigned int ni_dev_prio;
1677
1678                 ni_credits = atomic_read(&ni->ni_tx_credits);
1679                 ni_healthv = atomic_read(&ni->ni_healthv);
1680                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1681                 ni_sel_prio = ni->ni_sel_priority;
1682
1683                 /*
1684                  * calculate the distance from the CPT on which
1685                  * the message memory is allocated to the CPT of
1686                  * the NI's physical device
1687                  */
1688                 distance = cfs_cpt_distance(lnet_cpt_table(),
1689                                             md_cpt,
1690                                             ni->ni_dev_cpt);
1691
1692                 ni_dev_prio = lnet_dev_prio_of_md(ni, dev_idx);
1693
1694                 /*
1695                  * All distances smaller than the NUMA range
1696                  * are treated equally.
1697                  */
1698                 if (distance < lnet_numa_range)
1699                         distance = lnet_numa_range;
1700
1701                 /*
1702                  * Select on health, selection policy, direct dma prio,
1703                  * shorter distance, available credits, then round-robin.
1704                  */
1705                 if (ni_fatal)
1706                         continue;
1707
1708                 if (best_ni)
1709                         CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d, p:%u, g:%u] with best_ni %s [c:%d, d:%d, s:%d, p:%u, g:%u]\n",
1710                                libcfs_nidstr(&ni->ni_nid), ni_credits, distance,
1711                                ni->ni_seq, ni_sel_prio, ni_dev_prio,
1712                                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid)
1713                                : "not selected", best_credits, shortest_distance,
1714                                (best_ni) ? best_ni->ni_seq : 0,
1715                                best_sel_prio, best_dev_prio);
1716                 else
1717                         goto select_ni;
1718
1719                 if (ni_healthv < best_healthv)
1720                         continue;
1721                 else if (ni_healthv > best_healthv)
1722                         goto select_ni;
1723
1724                 if (ni_sel_prio > best_sel_prio)
1725                         continue;
1726                 else if (ni_sel_prio < best_sel_prio)
1727                         goto select_ni;
1728
1729                 if (ni_dev_prio > best_dev_prio)
1730                         continue;
1731                 else if (ni_dev_prio < best_dev_prio)
1732                         goto select_ni;
1733
1734                 if (distance > shortest_distance)
1735                         continue;
1736                 else if (distance < shortest_distance)
1737                         goto select_ni;
1738
1739                 if (ni_credits < best_credits)
1740                         continue;
1741                 else if (ni_credits > best_credits)
1742                         goto select_ni;
1743
1744                 if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1745                         continue;
1746
1747 select_ni:
1748                 best_sel_prio = ni_sel_prio;
1749                 best_dev_prio = ni_dev_prio;
1750                 shortest_distance = distance;
1751                 best_healthv = ni_healthv;
1752                 best_ni = ni;
1753                 best_credits = ni_credits;
1754         }
1755
1756         CDEBUG(D_NET, "selected best_ni %s\n",
1757                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid) : "no selection");
1758
1759         return best_ni;
1760 }
1761
1762 /*
1763  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1764  * because such traffic is required to perform discovery. We therefore
1765  * exclude all GET and PUT on that portal. We also exclude all ACK and
1766  * REPLY traffic, but that is because the portal is not tracked in the
1767  * message structure for these message types. We could restrict this
1768  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1769  */
1770 static bool
1771 lnet_msg_discovery(struct lnet_msg *msg)
1772 {
1773         if (msg->msg_type == LNET_MSG_PUT) {
1774                 if (msg->msg_hdr.msg.put.ptl_index != LNET_RESERVED_PORTAL)
1775                         return true;
1776         } else if (msg->msg_type == LNET_MSG_GET) {
1777                 if (msg->msg_hdr.msg.get.ptl_index != LNET_RESERVED_PORTAL)
1778                         return true;
1779         }
1780         return false;
1781 }
1782
1783 #define SRC_SPEC        0x0001
1784 #define SRC_ANY         0x0002
1785 #define LOCAL_DST       0x0004
1786 #define REMOTE_DST      0x0008
1787 #define MR_DST          0x0010
1788 #define NMR_DST         0x0020
1789 #define SND_RESP        0x0040
1790
1791 /* The following to defines are used for return codes */
1792 #define REPEAT_SEND     0x1000
1793 #define PASS_THROUGH    0x2000
1794
1795 /* The different cases lnet_select pathway needs to handle */
1796 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1797 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1798 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1799 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1800 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1801 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1802 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1803 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1804
1805 static int
1806 lnet_handle_lo_send(struct lnet_send_data *sd)
1807 {
1808         struct lnet_msg *msg = sd->sd_msg;
1809         int cpt = sd->sd_cpt;
1810
1811         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1812                 return -ESHUTDOWN;
1813
1814         /* No send credit hassles with LOLND */
1815         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1816         msg->msg_hdr.dest_nid = the_lnet.ln_loni->ni_nid;
1817         if (!msg->msg_routing)
1818                 msg->msg_hdr.src_nid = the_lnet.ln_loni->ni_nid;
1819         msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
1820         lnet_msg_commit(msg, cpt);
1821         msg->msg_txni = the_lnet.ln_loni;
1822
1823         return LNET_CREDIT_OK;
1824 }
1825
1826 static int
1827 lnet_handle_send(struct lnet_send_data *sd)
1828 {
1829         struct lnet_ni *best_ni = sd->sd_best_ni;
1830         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1831         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1832         struct lnet_msg *msg = sd->sd_msg;
1833         int cpt2;
1834         __u32 send_case = sd->sd_send_case;
1835         int rc;
1836         __u32 routing = send_case & REMOTE_DST;
1837          struct lnet_rsp_tracker *rspt;
1838
1839         /* Increment sequence number of the selected peer, peer net,
1840          * local ni and local net so that we pick the next ones
1841          * in Round Robin.
1842          */
1843         best_lpni->lpni_peer_net->lpn_seq++;
1844         best_lpni->lpni_seq = best_lpni->lpni_peer_net->lpn_seq;
1845         best_ni->ni_net->net_seq++;
1846         best_ni->ni_seq = best_ni->ni_net->net_seq;
1847
1848         CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n",
1849                libcfs_nidstr(&best_ni->ni_nid),
1850                best_ni->ni_seq, best_ni->ni_net->net_seq,
1851                atomic_read(&best_ni->ni_tx_credits),
1852                best_ni->ni_sel_priority,
1853                libcfs_nidstr(&best_lpni->lpni_nid),
1854                best_lpni->lpni_seq, best_lpni->lpni_peer_net->lpn_seq,
1855                best_lpni->lpni_txcredits,
1856                best_lpni->lpni_sel_priority);
1857
1858         /*
1859          * grab a reference on the peer_ni so it sticks around even if
1860          * we need to drop and relock the lnet_net_lock below.
1861          */
1862         lnet_peer_ni_addref_locked(best_lpni);
1863
1864         /*
1865          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1866          * message. This ensures that we get a CPT that is correct for
1867          * the NI when the NI has been restricted to a subset of all CPTs.
1868          * If the selected CPT differs from the one currently locked, we
1869          * must unlock and relock the lnet_net_lock(), and then check whether
1870          * the configuration has changed. We don't have a hold on the best_ni
1871          * yet, and it may have vanished.
1872          */
1873         cpt2 = lnet_cpt_of_nid_locked(&best_lpni->lpni_nid, best_ni);
1874         if (sd->sd_cpt != cpt2) {
1875                 __u32 seq = lnet_get_dlc_seq_locked();
1876                 lnet_net_unlock(sd->sd_cpt);
1877                 sd->sd_cpt = cpt2;
1878                 lnet_net_lock(sd->sd_cpt);
1879                 if (seq != lnet_get_dlc_seq_locked()) {
1880                         lnet_peer_ni_decref_locked(best_lpni);
1881                         return REPEAT_SEND;
1882                 }
1883         }
1884
1885         /*
1886          * store the best_lpni in the message right away to avoid having
1887          * to do the same operation under different conditions
1888          */
1889         msg->msg_txpeer = best_lpni;
1890         msg->msg_txni = best_ni;
1891
1892         /*
1893          * grab a reference for the best_ni since now it's in use in this
1894          * send. The reference will be dropped in lnet_finalize()
1895          */
1896         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1897
1898         /*
1899          * Always set the target.nid to the best peer picked. Either the
1900          * NID will be one of the peer NIDs selected, or the same NID as
1901          * what was originally set in the target or it will be the NID of
1902          * a router if this message should be routed
1903          */
1904         msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1905
1906         /*
1907          * lnet_msg_commit assigns the correct cpt to the message, which
1908          * is used to decrement the correct refcount on the ni when it's
1909          * time to return the credits
1910          */
1911         lnet_msg_commit(msg, sd->sd_cpt);
1912
1913         /*
1914          * If we are routing the message then we keep the src_nid that was
1915          * set by the originator. If we are not routing then we are the
1916          * originator and set it here.
1917          */
1918         if (!msg->msg_routing)
1919                 msg->msg_hdr.src_nid = msg->msg_txni->ni_nid;
1920
1921         if (routing) {
1922                 msg->msg_target_is_router = 1;
1923                 msg->msg_target.pid = LNET_PID_LUSTRE;
1924                 /*
1925                  * since we're routing we want to ensure that the
1926                  * msg_hdr.dest_nid is set to the final destination. When
1927                  * the router receives this message it knows how to route
1928                  * it.
1929                  *
1930                  * final_dst_lpni is set at the beginning of the
1931                  * lnet_select_pathway() function and is never changed.
1932                  * It's safe to use it here.
1933                  */
1934                 msg->msg_hdr.dest_nid = final_dst_lpni->lpni_nid;
1935         } else {
1936                 /*
1937                  * if we're not routing set the dest_nid to the best peer
1938                  * ni NID that we picked earlier in the algorithm.
1939                  */
1940                 msg->msg_hdr.dest_nid = msg->msg_txpeer->lpni_nid;
1941         }
1942
1943         /*
1944          * if we have response tracker block update it with the next hop
1945          * nid
1946          */
1947         if (msg->msg_md) {
1948                 rspt = msg->msg_md->md_rspt_ptr;
1949                 if (rspt) {
1950                         rspt->rspt_next_hop_nid =
1951                                 msg->msg_txpeer->lpni_nid;
1952                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1953                                libcfs_nidstr(&rspt->rspt_next_hop_nid));
1954                 }
1955         }
1956
1957         rc = lnet_post_send_locked(msg, 0);
1958
1959         if (!rc)
1960                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
1961                        libcfs_nidstr(&msg->msg_hdr.src_nid),
1962                        libcfs_nidstr(&msg->msg_txni->ni_nid),
1963                        libcfs_nidstr(&sd->sd_src_nid),
1964                        libcfs_nidstr(&msg->msg_hdr.dest_nid),
1965                        libcfs_nidstr(&sd->sd_dst_nid),
1966                        libcfs_nidstr(&msg->msg_txpeer->lpni_nid),
1967                        libcfs_nidstr(&sd->sd_rtr_nid),
1968                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
1969
1970         return rc;
1971 }
1972
1973 static inline void
1974 lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni,
1975                          struct lnet_msg *msg)
1976 {
1977         if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) &&
1978             !lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) {
1979                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
1980                        libcfs_nidstr(&lni->ni_nid),
1981                        libcfs_nidstr(&lpni->lpni_nid));
1982                 lnet_peer_ni_set_non_mr_pref_nid(lpni, &lni->ni_nid);
1983         }
1984 }
1985
1986 /*
1987  * Source Specified
1988  * Local Destination
1989  * non-mr peer
1990  *
1991  * use the source and destination NIDs as the pathway
1992  */
1993 static int
1994 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
1995 {
1996         /* the destination lpni is set before we get here. */
1997
1998         /* find local NI */
1999         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2000         if (!sd->sd_best_ni) {
2001                 CERROR("Can't send to %s: src %s is not a local nid\n",
2002                        libcfs_nidstr(&sd->sd_dst_nid),
2003                        libcfs_nidstr(&sd->sd_src_nid));
2004                 return -EINVAL;
2005         }
2006
2007         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2008
2009         return lnet_handle_send(sd);
2010 }
2011
2012 /*
2013  * Source Specified
2014  * Local Destination
2015  * MR Peer
2016  *
2017  * Don't run the selection algorithm on the peer NIs. By specifying the
2018  * local NID, we're also saying that we should always use the destination NID
2019  * provided. This handles the case where we should be using the same
2020  * destination NID for the all the messages which belong to the same RPC
2021  * request.
2022  */
2023 static int
2024 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
2025 {
2026         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2027         if (!sd->sd_best_ni) {
2028                 CERROR("Can't send to %s: src %s is not a local nid\n",
2029                        libcfs_nidstr(&sd->sd_dst_nid),
2030                        libcfs_nidstr(&sd->sd_src_nid));
2031                 return -EINVAL;
2032         }
2033
2034         if (sd->sd_best_lpni &&
2035             nid_same(&sd->sd_best_lpni->lpni_nid,
2036                       &the_lnet.ln_loni->ni_nid))
2037                 return lnet_handle_lo_send(sd);
2038         else if (sd->sd_best_lpni)
2039                 return lnet_handle_send(sd);
2040
2041         CERROR("can't send to %s. no NI on %s\n",
2042                libcfs_nidstr(&sd->sd_dst_nid),
2043                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
2044
2045         return -EHOSTUNREACH;
2046 }
2047
2048 struct lnet_ni *
2049 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
2050                               struct lnet_peer *peer,
2051                               struct lnet_peer_net *peer_net,
2052                               struct lnet_msg *msg,
2053                               int cpt)
2054 {
2055         struct lnet_net *local_net;
2056         struct lnet_ni *best_ni;
2057
2058         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
2059         if (!local_net)
2060                 return NULL;
2061
2062         /*
2063          * Iterate through the NIs in this local Net and select
2064          * the NI to send from. The selection is determined by
2065          * these 3 criterion in the following priority:
2066          *      1. NUMA
2067          *      2. NI available credits
2068          *      3. Round Robin
2069          */
2070         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
2071                                    peer, peer_net, msg, cpt);
2072
2073         return best_ni;
2074 }
2075
2076 static int
2077 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg,
2078                              int cpt)
2079 {
2080         struct lnet_peer *peer;
2081         struct lnet_peer_ni *new_lpni;
2082         int rc;
2083
2084         lnet_peer_ni_addref_locked(lpni);
2085
2086         peer = lpni->lpni_peer_net->lpn_peer;
2087
2088         if (lnet_peer_gw_discovery(peer)) {
2089                 lnet_peer_ni_decref_locked(lpni);
2090                 return 0;
2091         }
2092
2093         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
2094                 lnet_peer_ni_decref_locked(lpni);
2095                 return 0;
2096         }
2097
2098         rc = lnet_discover_peer_locked(lpni, cpt, false);
2099         if (rc) {
2100                 lnet_peer_ni_decref_locked(lpni);
2101                 return rc;
2102         }
2103
2104         new_lpni = lnet_find_peer_ni_locked(lnet_nid_to_nid4(&lpni->lpni_nid));
2105         if (!new_lpni) {
2106                 lnet_peer_ni_decref_locked(lpni);
2107                 return -ENOENT;
2108         }
2109
2110         peer = new_lpni->lpni_peer_net->lpn_peer;
2111         spin_lock(&peer->lp_lock);
2112         if (lpni == new_lpni && lnet_peer_is_uptodate_locked(peer)) {
2113                 /* The peer NI did not change and the peer is up to date.
2114                  * Nothing more to do.
2115                  */
2116                 spin_unlock(&peer->lp_lock);
2117                 lnet_peer_ni_decref_locked(lpni);
2118                 lnet_peer_ni_decref_locked(new_lpni);
2119                 return 0;
2120         }
2121         spin_unlock(&peer->lp_lock);
2122
2123         /* Either the peer NI changed during discovery, or the peer isn't up
2124          * to date. In both cases we want to queue the message on the
2125          * (possibly new) peer's pending queue and queue the peer for discovery
2126          */
2127         msg->msg_sending = 0;
2128         msg->msg_txpeer = NULL;
2129         lnet_net_unlock(cpt);
2130         lnet_peer_queue_message(peer, msg);
2131         lnet_net_lock(cpt);
2132
2133         lnet_peer_ni_decref_locked(lpni);
2134         lnet_peer_ni_decref_locked(new_lpni);
2135
2136         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2137                msg, libcfs_nidstr(&peer->lp_primary_nid));
2138
2139         return LNET_DC_WAIT;
2140 }
2141
2142 static int
2143 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2144                              struct lnet_nid *dst_nid,
2145                              struct lnet_peer_ni **gw_lpni,
2146                              struct lnet_peer **gw_peer)
2147 {
2148         int rc;
2149         struct lnet_peer *gw;
2150         struct lnet_peer *lp;
2151         struct lnet_peer_net *lpn;
2152         struct lnet_peer_net *best_lpn = NULL;
2153         struct lnet_remotenet *rnet, *best_rnet = NULL;
2154         struct lnet_route *best_route = NULL;
2155         struct lnet_route *last_route = NULL;
2156         struct lnet_peer_ni *lpni = NULL;
2157         struct lnet_peer_ni *gwni = NULL;
2158         bool route_found = false;
2159         struct lnet_nid *src_nid =
2160                 !LNET_NID_IS_ANY(&sd->sd_src_nid) || !sd->sd_best_ni
2161                 ? &sd->sd_src_nid
2162                 : &sd->sd_best_ni->ni_nid;
2163         int best_lpn_healthv = 0;
2164         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2165
2166         CDEBUG(D_NET, "using src nid %s for route restriction\n",
2167                src_nid ? libcfs_nidstr(src_nid) : "ANY");
2168
2169         /* If a router nid was specified then we are replying to a GET or
2170          * sending an ACK. In this case we use the gateway associated with the
2171          * specified router nid.
2172          */
2173         if (!LNET_NID_IS_ANY(&sd->sd_rtr_nid)) {
2174                 gwni = lnet_peer_ni_find_locked(&sd->sd_rtr_nid);
2175                 if (gwni) {
2176                         gw = gwni->lpni_peer_net->lpn_peer;
2177                         lnet_peer_ni_decref_locked(gwni);
2178                         if (gw->lp_rtr_refcount)
2179                                 route_found = true;
2180                 } else {
2181                         CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n",
2182                                libcfs_nidstr(&sd->sd_rtr_nid));
2183                 }
2184         }
2185
2186         if (!route_found) {
2187                 if (sd->sd_msg->msg_routing || (src_nid && !LNET_NID_IS_ANY(src_nid))) {
2188                         /* If I'm routing this message then I need to find the
2189                          * next hop based on the destination NID
2190                          *
2191                          * We also find next hop based on the destination NID
2192                          * if the source NI was specified
2193                          */
2194                         best_rnet = lnet_find_rnet_locked(LNET_NID_NET(&sd->sd_dst_nid));
2195                         if (!best_rnet) {
2196                                 CERROR("Unable to send message from %s to %s - Route table may be misconfigured\n",
2197                                        (src_nid && LNET_NID_IS_ANY(src_nid)) ?
2198                                                 "any local NI" :
2199                                                 libcfs_nidstr(src_nid),
2200                                        libcfs_nidstr(&sd->sd_dst_nid));
2201                                 return -EHOSTUNREACH;
2202                         }
2203                 } else {
2204                         /* we've already looked up the initial lpni using
2205                          * dst_nid
2206                          */
2207                         lpni = sd->sd_best_lpni;
2208                         /* the peer tree must be in existence */
2209                         LASSERT(lpni && lpni->lpni_peer_net &&
2210                                 lpni->lpni_peer_net->lpn_peer);
2211                         lp = lpni->lpni_peer_net->lpn_peer;
2212
2213                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2214                                 /* is this remote network reachable?  */
2215                                 rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2216                                 if (!rnet)
2217                                         continue;
2218
2219                                 if (!best_lpn) {
2220                                         best_lpn = lpn;
2221                                         best_rnet = rnet;
2222                                 }
2223
2224                                 /* select the preferred peer net */
2225                                 if (best_lpn_healthv > lpn->lpn_healthv)
2226                                         continue;
2227                                 else if (best_lpn_healthv < lpn->lpn_healthv)
2228                                         goto use_lpn;
2229
2230                                 if (best_lpn_sel_prio < lpn->lpn_sel_priority)
2231                                         continue;
2232                                 else if (best_lpn_sel_prio > lpn->lpn_sel_priority)
2233                                         goto use_lpn;
2234
2235                                 if (best_lpn->lpn_seq <= lpn->lpn_seq)
2236                                         continue;
2237 use_lpn:
2238                                 best_lpn_healthv = lpn->lpn_healthv;
2239                                 best_lpn_sel_prio = lpn->lpn_sel_priority;
2240                                 best_lpn = lpn;
2241                                 best_rnet = rnet;
2242                         }
2243
2244                         if (!best_lpn) {
2245                                 CERROR("peer %s has no available nets\n",
2246                                        libcfs_nidstr(&sd->sd_dst_nid));
2247                                 return -EHOSTUNREACH;
2248                         }
2249
2250                         sd->sd_best_lpni = lnet_find_best_lpni(sd->sd_best_ni,
2251                                                                lnet_nid_to_nid4(&sd->sd_dst_nid),
2252                                                                lp,
2253                                                                best_lpn->lpn_net_id);
2254                         if (!sd->sd_best_lpni) {
2255                                 CERROR("peer %s is unreachable\n",
2256                                        libcfs_nidstr(&sd->sd_dst_nid));
2257                                 return -EHOSTUNREACH;
2258                         }
2259
2260                         /* We're attempting to round robin over the remote peer
2261                          * NI's so update the final destination we selected
2262                          */
2263                         sd->sd_final_dst_lpni = sd->sd_best_lpni;
2264
2265                         /* Increment the sequence number of the remote lpni so
2266                          * we can round robin over the different interfaces of
2267                          * the remote lpni
2268                          */
2269                         sd->sd_best_lpni->lpni_seq++;
2270                 }
2271
2272                 /*
2273                  * find the best route. Restrict the selection on the net of the
2274                  * local NI if we've already picked the local NI to send from.
2275                  * Otherwise, let's pick any route we can find and then find
2276                  * a local NI we can reach the route's gateway on. Any route we
2277                  * select will be reachable by virtue of the restriction we have
2278                  * when adding a route.
2279                  */
2280                 best_route = lnet_find_route_locked(best_rnet,
2281                                                     LNET_NID_NET(src_nid),
2282                                                     sd->sd_best_lpni,
2283                                                     &last_route, &gwni);
2284
2285                 if (!best_route) {
2286                         CERROR("no route to %s from %s\n",
2287                                libcfs_nidstr(dst_nid),
2288                                libcfs_nidstr(src_nid));
2289                         return -EHOSTUNREACH;
2290                 }
2291
2292                 if (!gwni) {
2293                         CERROR("Internal Error. Route expected to %s from %s\n",
2294                                libcfs_nidstr(dst_nid),
2295                                libcfs_nidstr(src_nid));
2296                         return -EFAULT;
2297                 }
2298
2299                 gw = best_route->lr_gateway;
2300                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2301         }
2302
2303         /*
2304          * If the router checker is not active then discover the gateway here.
2305          * This ensures we are able to take advantage of multi-rail routing, but
2306          * if the router checker is active then we do not unecessarily delay
2307          * messages while the gateway is being checked by the dedicated monitor
2308          * thread.
2309          *
2310          * NB: We're only checking the alive_router_check_interval here, rather
2311          * than calling lnet_router_checker_active(), because the other
2312          * conditions that are checked by that function are either
2313          * irrelevant (the_lnet.ln_routing) or must be true (list of routers
2314          * is not empty)
2315          */
2316         if (alive_router_check_interval <= 0) {
2317                 rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt);
2318                 if (rc)
2319                         return rc;
2320         }
2321
2322         if (!sd->sd_best_ni) {
2323                 lpn = gwni->lpni_peer_net;
2324                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw, lpn,
2325                                                                sd->sd_msg,
2326                                                                sd->sd_md_cpt);
2327                 if (!sd->sd_best_ni) {
2328                         CERROR("Internal Error. Expected local ni on %s but non found: %s\n",
2329                                libcfs_net2str(lpn->lpn_net_id),
2330                                libcfs_nidstr(&sd->sd_src_nid));
2331                         return -EFAULT;
2332                 }
2333         }
2334
2335         *gw_lpni = gwni;
2336         *gw_peer = gw;
2337
2338         /*
2339          * increment the sequence numbers since now we're sure we're
2340          * going to use this path
2341          */
2342         if (LNET_NID_IS_ANY(&sd->sd_rtr_nid)) {
2343                 LASSERT(best_route && last_route);
2344                 best_route->lr_seq = last_route->lr_seq + 1;
2345                 if (best_lpn)
2346                         best_lpn->lpn_seq++;
2347         }
2348
2349         return 0;
2350 }
2351
2352 /*
2353  * Handle two cases:
2354  *
2355  * Case 1:
2356  *  Source specified
2357  *  Remote destination
2358  *  Non-MR destination
2359  *
2360  * Case 2:
2361  *  Source specified
2362  *  Remote destination
2363  *  MR destination
2364  *
2365  * The handling of these two cases is similar. Even though the destination
2366  * can be MR or non-MR, we'll deal directly with the router.
2367  */
2368 static int
2369 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2370 {
2371         int rc;
2372         struct lnet_peer_ni *gw_lpni = NULL;
2373         struct lnet_peer *gw_peer = NULL;
2374
2375         /* find local NI */
2376         sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt);
2377         if (!sd->sd_best_ni) {
2378                 CERROR("Can't send to %s: src %s is not a local nid\n",
2379                        libcfs_nidstr(&sd->sd_dst_nid),
2380                        libcfs_nidstr(&sd->sd_src_nid));
2381                 return -EINVAL;
2382         }
2383
2384         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid,
2385                                           &gw_lpni, &gw_peer);
2386         if (rc)
2387                 return rc;
2388
2389         if (sd->sd_send_case & NMR_DST)
2390                 /*
2391                  * since the final destination is non-MR let's set its preferred
2392                  * NID before we send
2393                  */
2394                 lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni,
2395                                          sd->sd_msg);
2396
2397         /*
2398          * We're going to send to the gw found so let's set its
2399          * info
2400          */
2401         sd->sd_peer = gw_peer;
2402         sd->sd_best_lpni = gw_lpni;
2403
2404         return lnet_handle_send(sd);
2405 }
2406
2407 struct lnet_ni *
2408 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2409                                struct lnet_msg *msg, bool discovery)
2410 {
2411         struct lnet_peer_net *lpn = NULL;
2412         struct lnet_peer_net *best_lpn = NULL;
2413         struct lnet_net *net = NULL;
2414         struct lnet_net *best_net = NULL;
2415         struct lnet_ni *best_ni = NULL;
2416         int best_lpn_healthv = 0;
2417         int best_net_healthv = 0;
2418         int net_healthv;
2419         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2420         __u32 lpn_sel_prio;
2421         __u32 best_net_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2422         __u32 net_sel_prio;
2423         bool exit = false;
2424
2425         /*
2426          * The peer can have multiple interfaces, some of them can be on
2427          * the local network and others on a routed network. We should
2428          * prefer the local network. However if the local network is not
2429          * available then we need to try the routed network
2430          */
2431
2432         /* go through all the peer nets and find the best_ni */
2433         list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
2434                 /*
2435                  * The peer's list of nets can contain non-local nets. We
2436                  * want to only examine the local ones.
2437                  */
2438                 net = lnet_get_net_locked(lpn->lpn_net_id);
2439                 if (!net)
2440                         continue;
2441
2442                 lpn_sel_prio = lpn->lpn_sel_priority;
2443                 net_healthv = lnet_get_net_healthv_locked(net);
2444                 net_sel_prio = net->net_sel_priority;
2445
2446                 /*
2447                  * if this is a discovery message and lp_disc_net_id is
2448                  * specified then use that net to send the discovery on.
2449                  */
2450                 if (peer->lp_disc_net_id == lpn->lpn_net_id &&
2451                     discovery) {
2452                         exit = true;
2453                         goto select_lpn;
2454                 }
2455
2456                 if (!best_lpn)
2457                         goto select_lpn;
2458
2459                 /* always select the lpn with the best health */
2460                 if (best_lpn_healthv > lpn->lpn_healthv)
2461                         continue;
2462                 else if (best_lpn_healthv < lpn->lpn_healthv)
2463                         goto select_lpn;
2464
2465                 /* select the preferred peer and local nets */
2466                 if (best_lpn_sel_prio < lpn_sel_prio)
2467                         continue;
2468                 else if (best_lpn_sel_prio > lpn_sel_prio)
2469                         goto select_lpn;
2470
2471                 if (best_net_healthv > net_healthv)
2472                         continue;
2473                 else if (best_net_healthv < net_healthv)
2474                         goto select_lpn;
2475
2476                 if (best_net_sel_prio < net_sel_prio)
2477                         continue;
2478                 else if (best_net_sel_prio > net_sel_prio)
2479                         goto select_lpn;
2480
2481                 if (best_lpn->lpn_seq < lpn->lpn_seq)
2482                         continue;
2483                 else if (best_lpn->lpn_seq > lpn->lpn_seq)
2484                         goto select_lpn;
2485
2486                 /* round robin over the local networks */
2487                 if (best_net->net_seq <= net->net_seq)
2488                         continue;
2489
2490 select_lpn:
2491                 best_net_healthv = net_healthv;
2492                 best_net_sel_prio = net_sel_prio;
2493                 best_lpn_healthv = lpn->lpn_healthv;
2494                 best_lpn_sel_prio = lpn_sel_prio;
2495                 best_lpn = lpn;
2496                 best_net = net;
2497
2498                 if (exit)
2499                         break;
2500         }
2501
2502         if (best_lpn) {
2503                 /* Select the best NI on the same net as best_lpn chosen
2504                  * above
2505                  */
2506                 best_ni = lnet_find_best_ni_on_spec_net(NULL, peer, best_lpn,
2507                                                         msg, md_cpt);
2508         }
2509
2510         return best_ni;
2511 }
2512
2513 static struct lnet_ni *
2514 lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt)
2515 {
2516         struct lnet_ni *best_ni = NULL;
2517         struct lnet_peer_net *peer_net = lpni->lpni_peer_net;
2518         struct lnet_peer_ni *lpni_entry;
2519
2520         /*
2521          * We must use a consistent source address when sending to a
2522          * non-MR peer. However, a non-MR peer can have multiple NIDs
2523          * on multiple networks, and we may even need to talk to this
2524          * peer on multiple networks -- certain types of
2525          * load-balancing configuration do this.
2526          *
2527          * So we need to pick the NI the peer prefers for this
2528          * particular network.
2529          */
2530         LASSERT(peer_net);
2531         list_for_each_entry(lpni_entry, &peer_net->lpn_peer_nis,
2532                             lpni_peer_nis) {
2533                 if (lpni_entry->lpni_pref_nnids == 0)
2534                         continue;
2535                 LASSERT(lpni_entry->lpni_pref_nnids == 1);
2536                 best_ni = lnet_nid_to_ni_locked(&lpni_entry->lpni_pref.nid,
2537                                                 cpt);
2538                 break;
2539         }
2540
2541         return best_ni;
2542 }
2543
2544 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2545 static int
2546 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2547 {
2548         struct lnet_ni *best_ni = NULL;
2549         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
2550
2551         /*
2552          * We must use a consistent source address when sending to a
2553          * non-MR peer. However, a non-MR peer can have multiple NIDs
2554          * on multiple networks, and we may even need to talk to this
2555          * peer on multiple networks -- certain types of
2556          * load-balancing configuration do this.
2557          *
2558          * So we need to pick the NI the peer prefers for this
2559          * particular network.
2560          */
2561
2562         best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2563                                                        sd->sd_cpt);
2564
2565         /* if best_ni is still not set just pick one */
2566         if (!best_ni) {
2567                 best_ni =
2568                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2569                                                 sd->sd_best_lpni->lpni_peer_net,
2570                                                 sd->sd_msg,
2571                                                 sd->sd_md_cpt);
2572                 /* If there is no best_ni we don't have a route */
2573                 if (!best_ni) {
2574                         CERROR("no path to %s from net %s\n",
2575                                 libcfs_nidstr(&best_lpni->lpni_nid),
2576                                 libcfs_net2str(best_lpni->lpni_net->net_id));
2577                         return -EHOSTUNREACH;
2578                 }
2579         }
2580
2581         sd->sd_best_ni = best_ni;
2582
2583         /* Set preferred NI if necessary. */
2584         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2585
2586         return 0;
2587 }
2588
2589
2590 /*
2591  * Source not specified
2592  * Local destination
2593  * Non-MR Peer
2594  *
2595  * always use the same source NID for NMR peers
2596  * If we've talked to that peer before then we already have a preferred
2597  * source NI associated with it. Otherwise, we select a preferred local NI
2598  * and store it in the peer
2599  */
2600 static int
2601 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2602 {
2603         int rc = 0;
2604
2605         /* sd->sd_best_lpni is already set to the final destination */
2606
2607         /*
2608          * At this point we should've created the peer ni and peer. If we
2609          * can't find it, then something went wrong. Instead of assert
2610          * output a relevant message and fail the send
2611          */
2612         if (!sd->sd_best_lpni) {
2613                 CERROR("Internal fault. Unable to send msg %s to %s. NID not known\n",
2614                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2615                        libcfs_nidstr(&sd->sd_dst_nid));
2616                 return -EFAULT;
2617         }
2618
2619         if (sd->sd_msg->msg_routing) {
2620                 /* If I'm forwarding this message then I can choose any NI
2621                  * on the destination peer net
2622                  */
2623                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL,
2624                                                                sd->sd_peer,
2625                                                                sd->sd_best_lpni->lpni_peer_net,
2626                                                                sd->sd_msg,
2627                                                                sd->sd_md_cpt);
2628                 if (!sd->sd_best_ni) {
2629                         CERROR("Unable to forward message to %s. No local NI available\n",
2630                                libcfs_nidstr(&sd->sd_dst_nid));
2631                         rc = -EHOSTUNREACH;
2632                 }
2633         } else
2634                 rc = lnet_select_preferred_best_ni(sd);
2635
2636         if (!rc)
2637                 rc = lnet_handle_send(sd);
2638
2639         return rc;
2640 }
2641
2642 static int
2643 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2644 {
2645         /*
2646          * NOTE we've already handled the remote peer case. So we only
2647          * need to worry about the local case here.
2648          *
2649          * if we're sending a response, ACK or reply, we need to send it
2650          * to the destination NID given to us. At this point we already
2651          * have the peer_ni we're suppose to send to, so just find the
2652          * best_ni on the peer net and use that. Since we're sending to an
2653          * MR peer then we can just run the selection algorithm on our
2654          * local NIs and pick the best one.
2655          */
2656         if (sd->sd_send_case & SND_RESP) {
2657                 sd->sd_best_ni =
2658                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2659                                                 sd->sd_best_lpni->lpni_peer_net,
2660                                                 sd->sd_msg,
2661                                                 sd->sd_md_cpt);
2662
2663                 if (!sd->sd_best_ni) {
2664                         /*
2665                          * We're not going to deal with not able to send
2666                          * a response to the provided final destination
2667                          */
2668                         CERROR("Can't send response to %s. No local NI available\n",
2669                                 libcfs_nidstr(&sd->sd_dst_nid));
2670                         return -EHOSTUNREACH;
2671                 }
2672
2673                 return lnet_handle_send(sd);
2674         }
2675
2676         /*
2677          * If we get here that means we're sending a fresh request, PUT or
2678          * GET, so we need to run our standard selection algorithm.
2679          * First find the best local interface that's on any of the peer's
2680          * networks.
2681          */
2682         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2683                                         sd->sd_md_cpt,
2684                                         sd->sd_msg,
2685                                         lnet_msg_discovery(sd->sd_msg));
2686         if (sd->sd_best_ni) {
2687                 sd->sd_best_lpni =
2688                   lnet_find_best_lpni(sd->sd_best_ni,
2689                                              lnet_nid_to_nid4(&sd->sd_dst_nid),
2690                                       sd->sd_peer,
2691                                       sd->sd_best_ni->ni_net->net_id);
2692
2693                 /*
2694                  * if we're successful in selecting a peer_ni on the local
2695                  * network, then send to it. Otherwise fall through and
2696                  * try and see if we can reach it over another routed
2697                  * network
2698                  */
2699                 if (sd->sd_best_lpni &&
2700                     nid_same(&sd->sd_best_lpni->lpni_nid,
2701                              &the_lnet.ln_loni->ni_nid)) {
2702                         /*
2703                          * in case we initially started with a routed
2704                          * destination, let's reset to local
2705                          */
2706                         sd->sd_send_case &= ~REMOTE_DST;
2707                         sd->sd_send_case |= LOCAL_DST;
2708                         return lnet_handle_lo_send(sd);
2709                 } else if (sd->sd_best_lpni) {
2710                         /*
2711                          * in case we initially started with a routed
2712                          * destination, let's reset to local
2713                          */
2714                         sd->sd_send_case &= ~REMOTE_DST;
2715                         sd->sd_send_case |= LOCAL_DST;
2716                         return lnet_handle_send(sd);
2717                 }
2718
2719                 CERROR("Internal Error. Expected to have a best_lpni: "
2720                        "%s -> %s\n",
2721                        libcfs_nidstr(&sd->sd_src_nid),
2722                        libcfs_nidstr(&sd->sd_dst_nid));
2723
2724                 return -EFAULT;
2725         }
2726
2727         /*
2728          * Peer doesn't have a local network. Let's see if there is
2729          * a remote network we can reach it on.
2730          */
2731         return PASS_THROUGH;
2732 }
2733
2734 /*
2735  * Case 1:
2736  *      Source NID not specified
2737  *      Local destination
2738  *      MR peer
2739  *
2740  * Case 2:
2741  *      Source NID not speified
2742  *      Remote destination
2743  *      MR peer
2744  *
2745  * In both of these cases if we're sending a response, ACK or REPLY, then
2746  * we need to send to the destination NID provided.
2747  *
2748  * In the remote case let's deal with MR routers.
2749  *
2750  */
2751
2752 static int
2753 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2754 {
2755         int rc = 0;
2756         struct lnet_peer *gw_peer = NULL;
2757         struct lnet_peer_ni *gw_lpni = NULL;
2758
2759         /*
2760          * handle sending a response to a remote peer here so we don't
2761          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2762          */
2763         if (sd->sd_send_case & REMOTE_DST &&
2764             sd->sd_send_case & SND_RESP) {
2765                 struct lnet_peer_ni *gw;
2766                 struct lnet_peer *gw_peer;
2767
2768                 rc = lnet_handle_find_routed_path(
2769                         sd, &sd->sd_dst_nid, &gw, &gw_peer);
2770                 if (rc < 0) {
2771                         CERROR("Can't send response to %s. No route available\n",
2772                                libcfs_nidstr(&sd->sd_dst_nid));
2773                         return -EHOSTUNREACH;
2774                 } else if (rc > 0) {
2775                         return rc;
2776                 }
2777
2778                 sd->sd_best_lpni = gw;
2779                 sd->sd_peer = gw_peer;
2780
2781                 return lnet_handle_send(sd);
2782         }
2783
2784         /*
2785          * Even though the NID for the peer might not be on a local network,
2786          * since the peer is MR there could be other interfaces on the
2787          * local network. In that case we'd still like to prefer the local
2788          * network over the routed network. If we're unable to do that
2789          * then we select the best router among the different routed networks,
2790          * and if the router is MR then we can deal with it as such.
2791          */
2792         rc = lnet_handle_any_mr_dsta(sd);
2793         if (rc != PASS_THROUGH)
2794                 return rc;
2795
2796         /*
2797          * Now that we must route to the destination, we must consider the
2798          * MR case, where the destination has multiple interfaces, some of
2799          * which we can route to and others we do not. For this reason we
2800          * need to select the destination which we can route to and if
2801          * there are multiple, we need to round robin.
2802          */
2803         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid,
2804                                           &gw_lpni, &gw_peer);
2805         if (rc)
2806                 return rc;
2807
2808         sd->sd_send_case &= ~LOCAL_DST;
2809         sd->sd_send_case |= REMOTE_DST;
2810
2811         sd->sd_peer = gw_peer;
2812         sd->sd_best_lpni = gw_lpni;
2813
2814         return lnet_handle_send(sd);
2815 }
2816
2817 /*
2818  * Source not specified
2819  * Remote destination
2820  * Non-MR peer
2821  *
2822  * Must send to the specified peer NID using the same source NID that
2823  * we've used before. If it's the first time to talk to that peer then
2824  * find the source NI and assign it as preferred to that peer
2825  */
2826 static int
2827 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2828 {
2829         int rc;
2830         struct lnet_peer_ni *gw_lpni = NULL;
2831         struct lnet_peer *gw_peer = NULL;
2832
2833         /*
2834          * Let's see if we have a preferred NI to talk to this NMR peer
2835          */
2836         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2837                                                               sd->sd_cpt);
2838
2839         /*
2840          * find the router and that'll find the best NI if we didn't find
2841          * it already.
2842          */
2843         rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid, &gw_lpni,
2844                                           &gw_peer);
2845         if (rc)
2846                 return rc;
2847
2848         /*
2849          * set the best_ni we've chosen as the preferred one for
2850          * this peer
2851          */
2852         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2853
2854         /* we'll be sending to the gw */
2855         sd->sd_best_lpni = gw_lpni;
2856         sd->sd_peer = gw_peer;
2857
2858         return lnet_handle_send(sd);
2859 }
2860
2861 static int
2862 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2863 {
2864         /*
2865          * turn off the SND_RESP bit.
2866          * It will be checked in the case handling
2867          */
2868         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2869
2870         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2871                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2872                 (send_case & SRC_SPEC) ? libcfs_nidstr(&sd->sd_src_nid) : "",
2873                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2874                 libcfs_nidstr(&sd->sd_dst_nid),
2875                 (send_case & LOCAL_DST) ? "local" : "routed");
2876
2877         switch (send_case) {
2878         /*
2879          * For all cases where the source is specified, we should always
2880          * use the destination NID, whether it's an MR destination or not,
2881          * since we're continuing a series of related messages for the
2882          * same RPC
2883          */
2884         case SRC_SPEC_LOCAL_NMR_DST:
2885                 return lnet_handle_spec_local_nmr_dst(sd);
2886         case SRC_SPEC_LOCAL_MR_DST:
2887                 return lnet_handle_spec_local_mr_dst(sd);
2888         case SRC_SPEC_ROUTER_NMR_DST:
2889         case SRC_SPEC_ROUTER_MR_DST:
2890                 return lnet_handle_spec_router_dst(sd);
2891         case SRC_ANY_LOCAL_NMR_DST:
2892                 return lnet_handle_any_local_nmr_dst(sd);
2893         case SRC_ANY_LOCAL_MR_DST:
2894         case SRC_ANY_ROUTER_MR_DST:
2895                 return lnet_handle_any_mr_dst(sd);
2896         case SRC_ANY_ROUTER_NMR_DST:
2897                 return lnet_handle_any_router_nmr_dst(sd);
2898         default:
2899                 CERROR("Unknown send case\n");
2900                 return -1;
2901         }
2902 }
2903
2904 static int
2905 lnet_select_pathway(struct lnet_nid *src_nid,
2906                     struct lnet_nid *dst_nid,
2907                     struct lnet_msg *msg,
2908                     struct lnet_nid *rtr_nid)
2909 {
2910         struct lnet_peer_ni *lpni;
2911         struct lnet_peer *peer;
2912         struct lnet_send_data send_data;
2913         int cpt, rc;
2914         int md_cpt;
2915         __u32 send_case = 0;
2916         bool final_hop;
2917         bool mr_forwarding_allowed;
2918
2919         memset(&send_data, 0, sizeof(send_data));
2920
2921         /*
2922          * get an initial CPT to use for locking. The idea here is not to
2923          * serialize the calls to select_pathway, so that as many
2924          * operations can run concurrently as possible. To do that we use
2925          * the CPT where this call is being executed. Later on when we
2926          * determine the CPT to use in lnet_message_commit, we switch the
2927          * lock and check if there was any configuration change.  If none,
2928          * then we proceed, if there is, then we restart the operation.
2929          */
2930         cpt = lnet_net_lock_current();
2931
2932         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
2933         if (md_cpt == CFS_CPT_ANY)
2934                 md_cpt = cpt;
2935
2936 again:
2937
2938         /*
2939          * If we're being asked to send to the loopback interface, there
2940          * is no need to go through any selection. We can just shortcut
2941          * the entire process and send over lolnd
2942          */
2943         send_data.sd_msg = msg;
2944         send_data.sd_cpt = cpt;
2945         if (nid_is_lo0(dst_nid)) {
2946                 rc = lnet_handle_lo_send(&send_data);
2947                 lnet_net_unlock(cpt);
2948                 return rc;
2949         }
2950
2951         /*
2952          * find an existing peer_ni, or create one and mark it as having been
2953          * created due to network traffic. This call will create the
2954          * peer->peer_net->peer_ni tree.
2955          */
2956         lpni = lnet_peerni_by_nid_locked(dst_nid, NULL, cpt);
2957         if (IS_ERR(lpni)) {
2958                 lnet_net_unlock(cpt);
2959                 return PTR_ERR(lpni);
2960         }
2961
2962         /*
2963          * Cache the original src_nid and rtr_nid. If we need to resend the
2964          * message then we'll need to know whether the src_nid was originally
2965          * specified for this message. If it was originally specified,
2966          * then we need to keep using the same src_nid since it's
2967          * continuing the same sequence of messages. Similarly, rtr_nid will
2968          * affect our choice of next hop.
2969          */
2970         if (src_nid)
2971                 msg->msg_src_nid_param = *src_nid;
2972         else
2973                 msg->msg_src_nid_param = LNET_ANY_NID;
2974         if (rtr_nid)
2975                 msg->msg_rtr_nid_param = *rtr_nid;
2976         else
2977                 msg->msg_rtr_nid_param = LNET_ANY_NID;
2978
2979         /*
2980          * If necessary, perform discovery on the peer that owns this peer_ni.
2981          * Note, this can result in the ownership of this peer_ni changing
2982          * to another peer object.
2983          */
2984         rc = lnet_initiate_peer_discovery(lpni, msg, cpt);
2985         if (rc) {
2986                 lnet_peer_ni_decref_locked(lpni);
2987                 lnet_net_unlock(cpt);
2988                 return rc;
2989         }
2990         lnet_peer_ni_decref_locked(lpni);
2991
2992         peer = lpni->lpni_peer_net->lpn_peer;
2993
2994         /*
2995          * Identify the different send cases
2996          */
2997         if (!src_nid || LNET_NID_IS_ANY(src_nid)) {
2998                 send_case |= SRC_ANY;
2999                 if (lnet_get_net_locked(LNET_NID_NET(dst_nid)))
3000                         send_case |= LOCAL_DST;
3001                 else
3002                         send_case |= REMOTE_DST;
3003         } else {
3004                 send_case |= SRC_SPEC;
3005                 if (LNET_NID_NET(src_nid) == LNET_NID_NET(dst_nid))
3006                         send_case |= LOCAL_DST;
3007                 else
3008                         send_case |= REMOTE_DST;
3009         }
3010
3011         final_hop = false;
3012         if (msg->msg_routing && (send_case & LOCAL_DST))
3013                 final_hop = true;
3014
3015         /* Determine whether to allow MR forwarding for this message.
3016          * NB: MR forwarding is allowed if the message originator and the
3017          * destination are both MR capable, and the destination lpni that was
3018          * originally chosen by the originator is unhealthy or down.
3019          * We check the MR capability of the destination further below
3020          */
3021         mr_forwarding_allowed = false;
3022         if (final_hop) {
3023                 struct lnet_peer *src_lp;
3024                 struct lnet_peer_ni *src_lpni;
3025
3026                 src_lpni = lnet_peerni_by_nid_locked(&msg->msg_hdr.src_nid,
3027                                                    NULL, cpt);
3028                 /* We don't fail the send if we hit any errors here. We'll just
3029                  * try to send it via non-multi-rail criteria
3030                  */
3031                 if (!IS_ERR(src_lpni)) {
3032                         /* Drop ref taken by lnet_nid2peerni_locked() */
3033                         lnet_peer_ni_decref_locked(src_lpni);
3034                         src_lp = lpni->lpni_peer_net->lpn_peer;
3035                         if (lnet_peer_is_multi_rail(src_lp) &&
3036                             !lnet_is_peer_ni_alive(lpni))
3037                                 mr_forwarding_allowed = true;
3038
3039                 }
3040                 CDEBUG(D_NET, "msg %p MR forwarding %s\n", msg,
3041                        mr_forwarding_allowed ? "allowed" : "not allowed");
3042         }
3043
3044         /*
3045          * Deal with the peer as NMR in the following cases:
3046          * 1. the peer is NMR
3047          * 2. We're trying to recover a specific peer NI
3048          * 3. I'm a router sending to the final destination and MR forwarding is
3049          *    not allowed for this message (as determined above).
3050          *    In this case the source of the message would've
3051          *    already selected the final destination so my job
3052          *    is to honor the selection.
3053          */
3054         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery ||
3055             (final_hop && !mr_forwarding_allowed))
3056                 send_case |= NMR_DST;
3057         else
3058                 send_case |= MR_DST;
3059
3060         if (lnet_msg_is_response(msg))
3061                 send_case |= SND_RESP;
3062
3063         /* assign parameters to the send_data */
3064         if (rtr_nid)
3065                 send_data.sd_rtr_nid = *rtr_nid;
3066         else
3067                 send_data.sd_rtr_nid = LNET_ANY_NID;
3068         if (src_nid)
3069                 send_data.sd_src_nid = *src_nid;
3070         else
3071                 send_data.sd_src_nid = LNET_ANY_NID;
3072         send_data.sd_dst_nid = *dst_nid;
3073         send_data.sd_best_lpni = lpni;
3074         /*
3075          * keep a pointer to the final destination in case we're going to
3076          * route, so we'll need to access it later
3077          */
3078         send_data.sd_final_dst_lpni = lpni;
3079         send_data.sd_peer = peer;
3080         send_data.sd_md_cpt = md_cpt;
3081         send_data.sd_send_case = send_case;
3082
3083         rc = lnet_handle_send_case_locked(&send_data);
3084
3085         /*
3086          * Update the local cpt since send_data.sd_cpt might've been
3087          * updated as a result of calling lnet_handle_send_case_locked().
3088          */
3089         cpt = send_data.sd_cpt;
3090
3091         if (rc == REPEAT_SEND)
3092                 goto again;
3093
3094         lnet_net_unlock(cpt);
3095
3096         return rc;
3097 }
3098
3099 int
3100 lnet_send(struct lnet_nid *src_nid, struct lnet_msg *msg,
3101           struct lnet_nid *rtr_nid)
3102 {
3103         struct lnet_nid *dst_nid = &msg->msg_target.nid;
3104         int rc;
3105
3106         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
3107         LASSERT(msg->msg_txpeer == NULL);
3108         LASSERT(msg->msg_txni == NULL);
3109         LASSERT(!msg->msg_sending);
3110         LASSERT(!msg->msg_target_is_router);
3111         LASSERT(!msg->msg_receiving);
3112
3113         msg->msg_sending = 1;
3114
3115         LASSERT(!msg->msg_tx_committed);
3116
3117         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
3118         if (rc < 0) {
3119                 if (rc == -EHOSTUNREACH)
3120                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
3121                 else
3122                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
3123                 return rc;
3124         }
3125
3126         if (rc == LNET_CREDIT_OK)
3127                 lnet_ni_send(msg->msg_txni, msg);
3128
3129         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
3130         return 0;
3131 }
3132
3133 enum lnet_mt_event_type {
3134         MT_TYPE_LOCAL_NI = 0,
3135         MT_TYPE_PEER_NI
3136 };
3137
3138 struct lnet_mt_event_info {
3139         enum lnet_mt_event_type mt_type;
3140         lnet_nid_t mt_nid;
3141 };
3142
3143 /* called with res_lock held */
3144 void
3145 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
3146 {
3147         struct lnet_rsp_tracker *rspt;
3148
3149         /*
3150          * msg has a refcount on the MD so the MD is not going away.
3151          * The rspt queue for the cpt is protected by
3152          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
3153          */
3154         if (!md->md_rspt_ptr)
3155                 return;
3156
3157         rspt = md->md_rspt_ptr;
3158
3159         /* debug code */
3160         LASSERT(rspt->rspt_cpt == cpt);
3161
3162         md->md_rspt_ptr = NULL;
3163
3164         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3165                 /*
3166                  * The monitor thread has invalidated this handle because the
3167                  * response timed out, but it failed to lookup the MD. That
3168                  * means this response tracker is on the zombie list. We can
3169                  * safely remove it under the resource lock (held by caller) and
3170                  * free the response tracker block.
3171                  */
3172                 list_del(&rspt->rspt_on_list);
3173                 lnet_rspt_free(rspt, cpt);
3174         } else {
3175                 /*
3176                  * invalidate the handle to indicate that a response has been
3177                  * received, which will then lead the monitor thread to clean up
3178                  * the rspt block.
3179                  */
3180                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
3181         }
3182 }
3183
3184 void
3185 lnet_clean_zombie_rstqs(void)
3186 {
3187         struct lnet_rsp_tracker *rspt, *tmp;
3188         int i;
3189
3190         cfs_cpt_for_each(i, lnet_cpt_table()) {
3191                 list_for_each_entry_safe(rspt, tmp,
3192                                          the_lnet.ln_mt_zombie_rstqs[i],
3193                                          rspt_on_list) {
3194                         list_del(&rspt->rspt_on_list);
3195                         lnet_rspt_free(rspt, i);
3196                 }
3197         }
3198
3199         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
3200 }
3201
3202 static void
3203 lnet_finalize_expired_responses(void)
3204 {
3205         struct lnet_libmd *md;
3206         struct lnet_rsp_tracker *rspt, *tmp;
3207         ktime_t now;
3208         int i;
3209
3210         if (the_lnet.ln_mt_rstq == NULL)
3211                 return;
3212
3213         cfs_cpt_for_each(i, lnet_cpt_table()) {
3214                 LIST_HEAD(local_queue);
3215
3216                 lnet_net_lock(i);
3217                 if (!the_lnet.ln_mt_rstq[i]) {
3218                         lnet_net_unlock(i);
3219                         continue;
3220                 }
3221                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
3222                 lnet_net_unlock(i);
3223
3224                 now = ktime_get();
3225
3226                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
3227                         /*
3228                          * The rspt mdh will be invalidated when a response
3229                          * is received or whenever we want to discard the
3230                          * block the monitor thread will walk the queue
3231                          * and clean up any rsts with an invalid mdh.
3232                          * The monitor thread will walk the queue until
3233                          * the first unexpired rspt block. This means that
3234                          * some rspt blocks which received their
3235                          * corresponding responses will linger in the
3236                          * queue until they are cleaned up eventually.
3237                          */
3238                         lnet_res_lock(i);
3239                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3240                                 lnet_res_unlock(i);
3241                                 list_del(&rspt->rspt_on_list);
3242                                 lnet_rspt_free(rspt, i);
3243                                 continue;
3244                         }
3245
3246                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
3247                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
3248                                 struct lnet_peer_ni *lpni;
3249                                 struct lnet_nid nid;
3250
3251                                 md = lnet_handle2md(&rspt->rspt_mdh);
3252                                 if (!md) {
3253                                         /* MD has been queued for unlink, but
3254                                          * rspt hasn't been detached (Note we've
3255                                          * checked above that the rspt_mdh is
3256                                          * valid). Since we cannot lookup the MD
3257                                          * we're unable to detach the rspt
3258                                          * ourselves. Thus, move the rspt to the
3259                                          * zombie list where we'll wait for
3260                                          * either:
3261                                          *   1. The remaining operations on the
3262                                          *   MD to complete. In this case the
3263                                          *   final operation will result in
3264                                          *   lnet_msg_detach_md()->
3265                                          *   lnet_detach_rsp_tracker() where
3266                                          *   we will clean up this response
3267                                          *   tracker.
3268                                          *   2. LNet to shutdown. In this case
3269                                          *   we'll wait until after all LND Nets
3270                                          *   have shutdown and then we can
3271                                          *   safely free any remaining response
3272                                          *   tracker blocks on the zombie list.
3273                                          * Note: We need to hold the resource
3274                                          * lock when adding to the zombie list
3275                                          * because we may have concurrent access
3276                                          * with lnet_detach_rsp_tracker().
3277                                          */
3278                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
3279                                         list_move(&rspt->rspt_on_list,
3280                                                   the_lnet.ln_mt_zombie_rstqs[i]);
3281                                         lnet_res_unlock(i);
3282                                         continue;
3283                                 }
3284                                 LASSERT(md->md_rspt_ptr == rspt);
3285                                 md->md_rspt_ptr = NULL;
3286                                 lnet_res_unlock(i);
3287
3288                                 LNetMDUnlink(rspt->rspt_mdh);
3289
3290                                 nid = rspt->rspt_next_hop_nid;
3291
3292                                 list_del(&rspt->rspt_on_list);
3293                                 lnet_rspt_free(rspt, i);
3294
3295                                 /* If we're shutting down we just want to clean
3296                                  * up the rspt blocks
3297                                  */
3298                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3299                                         continue;
3300
3301                                 lnet_net_lock(i);
3302                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
3303                                 lnet_net_unlock(i);
3304
3305                                 CDEBUG(D_NET,
3306                                        "Response timeout: md = %p: nid = %s\n",
3307                                        md, libcfs_nidstr(&nid));
3308
3309                                 /*
3310                                  * If there is a timeout on the response
3311                                  * from the next hop decrement its health
3312                                  * value so that we don't use it
3313                                  */
3314                                 lnet_net_lock(0);
3315                                 lpni = lnet_peer_ni_find_locked(&nid);
3316                                 if (lpni) {
3317                                         lnet_handle_remote_failure_locked(lpni);
3318                                         lnet_peer_ni_decref_locked(lpni);
3319                                 }
3320                                 lnet_net_unlock(0);
3321                         } else {
3322                                 lnet_res_unlock(i);
3323                                 break;
3324                         }
3325                 }
3326
3327                 if (!list_empty(&local_queue)) {
3328                         lnet_net_lock(i);
3329                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
3330                         lnet_net_unlock(i);
3331                 }
3332         }
3333 }
3334
3335 static void
3336 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
3337 {
3338         struct lnet_msg *msg;
3339
3340         while (!list_empty(resendq)) {
3341                 struct lnet_peer_ni *lpni;
3342
3343                 msg = list_entry(resendq->next, struct lnet_msg,
3344                                  msg_list);
3345
3346                 list_del_init(&msg->msg_list);
3347
3348                 lpni = lnet_peer_ni_find_locked(&msg->msg_hdr.dest_nid);
3349                 if (!lpni) {
3350                         lnet_net_unlock(cpt);
3351                         CERROR("Expected that a peer is already created for %s\n",
3352                                libcfs_nidstr(&msg->msg_hdr.dest_nid));
3353                         msg->msg_no_resend = true;
3354                         lnet_finalize(msg, -EFAULT);
3355                         lnet_net_lock(cpt);
3356                 } else {
3357                         int rc;
3358
3359                         lnet_peer_ni_decref_locked(lpni);
3360
3361                         lnet_net_unlock(cpt);
3362                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3363                                libcfs_nidstr(&msg->msg_src_nid_param),
3364                                libcfs_idstr(&msg->msg_target),
3365                                lnet_msgtyp2str(msg->msg_type),
3366                                msg->msg_recovery,
3367                                msg->msg_retry_count);
3368                         rc = lnet_send(&msg->msg_src_nid_param, msg,
3369                                        &msg->msg_rtr_nid_param);
3370                         if (rc) {
3371                                 CERROR("Error sending %s to %s: %d\n",
3372                                        lnet_msgtyp2str(msg->msg_type),
3373                                        libcfs_idstr(&msg->msg_target), rc);
3374                                 msg->msg_no_resend = true;
3375                                 lnet_finalize(msg, rc);
3376                         }
3377                         lnet_net_lock(cpt);
3378                         if (!rc)
3379                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3380                 }
3381         }
3382 }
3383
3384 static void
3385 lnet_resend_pending_msgs(void)
3386 {
3387         int i;
3388
3389         cfs_cpt_for_each(i, lnet_cpt_table()) {
3390                 lnet_net_lock(i);
3391                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3392                 lnet_net_unlock(i);
3393         }
3394 }
3395
3396 /* called with cpt and ni_lock held */
3397 static void
3398 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3399 {
3400         struct lnet_handle_md recovery_mdh;
3401
3402         LNetInvalidateMDHandle(&recovery_mdh);
3403
3404         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3405             force) {
3406                 recovery_mdh = ni->ni_ping_mdh;
3407                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3408         }
3409         lnet_ni_unlock(ni);
3410         lnet_net_unlock(cpt);
3411         if (!LNetMDHandleIsInvalid(recovery_mdh))
3412                 LNetMDUnlink(recovery_mdh);
3413         lnet_net_lock(cpt);
3414         lnet_ni_lock(ni);
3415 }
3416
3417 static void
3418 lnet_recover_local_nis(void)
3419 {
3420         struct lnet_mt_event_info *ev_info;
3421         LIST_HEAD(processed_list);
3422         LIST_HEAD(local_queue);
3423         struct lnet_handle_md mdh;
3424         struct lnet_ni *tmp;
3425         struct lnet_ni *ni;
3426         lnet_nid_t nid;
3427         int healthv;
3428         int rc;
3429         time64_t now;
3430
3431         /*
3432          * splice the recovery queue on a local queue. We will iterate
3433          * through the local queue and update it as needed. Once we're
3434          * done with the traversal, we'll splice the local queue back on
3435          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3436          * will be traversed in the next iteration.
3437          */
3438         lnet_net_lock(0);
3439         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3440                          &local_queue);
3441         lnet_net_unlock(0);
3442
3443         now = ktime_get_seconds();
3444
3445         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3446                 /*
3447                  * if an NI is being deleted or it is now healthy, there
3448                  * is no need to keep it around in the recovery queue.
3449                  * The monitor thread is the only thread responsible for
3450                  * removing the NI from the recovery queue.
3451                  * Multiple threads can be adding NIs to the recovery
3452                  * queue.
3453                  */
3454                 healthv = atomic_read(&ni->ni_healthv);
3455
3456                 lnet_net_lock(0);
3457                 lnet_ni_lock(ni);
3458                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3459                     healthv == LNET_MAX_HEALTH_VALUE) {
3460                         list_del_init(&ni->ni_recovery);
3461                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3462                         lnet_ni_unlock(ni);
3463                         lnet_ni_decref_locked(ni, 0);
3464                         lnet_net_unlock(0);
3465                         continue;
3466                 }
3467
3468                 /*
3469                  * if the local NI failed recovery we must unlink the md.
3470                  * But we want to keep the local_ni on the recovery queue
3471                  * so we can continue the attempts to recover it.
3472                  */
3473                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3474                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3475                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3476                 }
3477
3478
3479                 lnet_ni_unlock(ni);
3480
3481                 if (now < ni->ni_next_ping) {
3482                         lnet_net_unlock(0);
3483                         continue;
3484                 }
3485
3486                 lnet_net_unlock(0);
3487
3488                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3489                        libcfs_nidstr(&ni->ni_nid));
3490
3491                 lnet_ni_lock(ni);
3492                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3493                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3494                         lnet_ni_unlock(ni);
3495
3496                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3497                         if (!ev_info) {
3498                                 CERROR("out of memory. Can't recover %s\n",
3499                                        libcfs_nidstr(&ni->ni_nid));
3500                                 lnet_ni_lock(ni);
3501                                 ni->ni_recovery_state &=
3502                                   ~LNET_NI_RECOVERY_PENDING;
3503                                 lnet_ni_unlock(ni);
3504                                 continue;
3505                         }
3506
3507                         mdh = ni->ni_ping_mdh;
3508                         /*
3509                          * Invalidate the ni mdh in case it's deleted.
3510                          * We'll unlink the mdh in this case below.
3511                          */
3512                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3513                         /* FIXME need to handle large-addr nid */
3514                         nid = lnet_nid_to_nid4(&ni->ni_nid);
3515
3516                         /*
3517                          * remove the NI from the local queue and drop the
3518                          * reference count to it while we're recovering
3519                          * it. The reason for that, is that the NI could
3520                          * be deleted, and the way the code is structured
3521                          * is if we don't drop the NI, then the deletion
3522                          * code will enter a loop waiting for the
3523                          * reference count to be removed while holding the
3524                          * ln_mutex_lock(). When we look up the peer to
3525                          * send to in lnet_select_pathway() we will try to
3526                          * lock the ln_mutex_lock() as well, leading to
3527                          * a deadlock. By dropping the refcount and
3528                          * removing it from the list, we allow for the NI
3529                          * to be removed, then we use the cached NID to
3530                          * look it up again. If it's gone, then we just
3531                          * continue examining the rest of the queue.
3532                          */
3533                         lnet_net_lock(0);
3534                         list_del_init(&ni->ni_recovery);
3535                         lnet_ni_decref_locked(ni, 0);
3536                         lnet_net_unlock(0);
3537
3538                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3539                         ev_info->mt_nid = nid;
3540                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3541                                             ev_info, the_lnet.ln_mt_handler,
3542                                             true);
3543                         /* lookup the nid again */
3544                         lnet_net_lock(0);
3545                         ni = lnet_nid2ni_locked(nid, 0);
3546                         if (!ni) {
3547                                 /*
3548                                  * the NI has been deleted when we dropped
3549                                  * the ref count
3550                                  */
3551                                 lnet_net_unlock(0);
3552                                 LNetMDUnlink(mdh);
3553                                 continue;
3554                         }
3555                         ni->ni_ping_count++;
3556
3557                         ni->ni_ping_mdh = mdh;
3558                         lnet_ni_add_to_recoveryq_locked(ni, &processed_list,
3559                                                         now);
3560
3561                         if (rc) {
3562                                 lnet_ni_lock(ni);
3563                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3564                                 lnet_ni_unlock(ni);
3565                         }
3566                         lnet_net_unlock(0);
3567                 } else
3568                         lnet_ni_unlock(ni);
3569         }
3570
3571         /*
3572          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3573          * reexamined in the next iteration.
3574          */
3575         list_splice_init(&processed_list, &local_queue);
3576         lnet_net_lock(0);
3577         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3578         lnet_net_unlock(0);
3579 }
3580
3581 static int
3582 lnet_resendqs_create(void)
3583 {
3584         struct list_head **resendqs;
3585         resendqs = lnet_create_array_of_queues();
3586
3587         if (!resendqs)
3588                 return -ENOMEM;
3589
3590         lnet_net_lock(LNET_LOCK_EX);
3591         the_lnet.ln_mt_resendqs = resendqs;
3592         lnet_net_unlock(LNET_LOCK_EX);
3593
3594         return 0;
3595 }
3596
3597 static void
3598 lnet_clean_local_ni_recoveryq(void)
3599 {
3600         struct lnet_ni *ni;
3601
3602         /* This is only called when the monitor thread has stopped */
3603         lnet_net_lock(0);
3604
3605         while (!list_empty(&the_lnet.ln_mt_localNIRecovq)) {
3606                 ni = list_entry(the_lnet.ln_mt_localNIRecovq.next,
3607                                 struct lnet_ni, ni_recovery);
3608                 list_del_init(&ni->ni_recovery);
3609                 lnet_ni_lock(ni);
3610                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3611                 lnet_ni_unlock(ni);
3612                 lnet_ni_decref_locked(ni, 0);
3613         }
3614
3615         lnet_net_unlock(0);
3616 }
3617
3618 static void
3619 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3620                                      bool force)
3621 {
3622         struct lnet_handle_md recovery_mdh;
3623
3624         LNetInvalidateMDHandle(&recovery_mdh);
3625
3626         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3627                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3628                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3629         }
3630         spin_unlock(&lpni->lpni_lock);
3631         lnet_net_unlock(cpt);
3632         if (!LNetMDHandleIsInvalid(recovery_mdh))
3633                 LNetMDUnlink(recovery_mdh);
3634         lnet_net_lock(cpt);
3635         spin_lock(&lpni->lpni_lock);
3636 }
3637
3638 static void
3639 lnet_clean_peer_ni_recoveryq(void)
3640 {
3641         struct lnet_peer_ni *lpni, *tmp;
3642
3643         lnet_net_lock(LNET_LOCK_EX);
3644
3645         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3646                                  lpni_recovery) {
3647                 list_del_init(&lpni->lpni_recovery);
3648                 spin_lock(&lpni->lpni_lock);
3649                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3650                 spin_unlock(&lpni->lpni_lock);
3651                 lnet_peer_ni_decref_locked(lpni);
3652         }
3653
3654         lnet_net_unlock(LNET_LOCK_EX);
3655 }
3656
3657 static void
3658 lnet_clean_resendqs(void)
3659 {
3660         struct lnet_msg *msg, *tmp;
3661         LIST_HEAD(msgs);
3662         int i;
3663
3664         cfs_cpt_for_each(i, lnet_cpt_table()) {
3665                 lnet_net_lock(i);
3666                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3667                 lnet_net_unlock(i);
3668                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3669                         list_del_init(&msg->msg_list);
3670                         msg->msg_no_resend = true;
3671                         lnet_finalize(msg, -ESHUTDOWN);
3672                 }
3673         }
3674
3675         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3676 }
3677
3678 static void
3679 lnet_recover_peer_nis(void)
3680 {
3681         struct lnet_mt_event_info *ev_info;
3682         LIST_HEAD(processed_list);
3683         LIST_HEAD(local_queue);
3684         struct lnet_handle_md mdh;
3685         struct lnet_peer_ni *lpni;
3686         struct lnet_peer_ni *tmp;
3687         lnet_nid_t nid;
3688         int healthv;
3689         int rc;
3690         time64_t now;
3691
3692         /*
3693          * Always use cpt 0 for locking across all interactions with
3694          * ln_mt_peerNIRecovq
3695          */
3696         lnet_net_lock(0);
3697         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3698                          &local_queue);
3699         lnet_net_unlock(0);
3700
3701         now = ktime_get_seconds();
3702
3703         list_for_each_entry_safe(lpni, tmp, &local_queue,
3704                                  lpni_recovery) {
3705                 /*
3706                  * The same protection strategy is used here as is in the
3707                  * local recovery case.
3708                  */
3709                 lnet_net_lock(0);
3710                 healthv = atomic_read(&lpni->lpni_healthv);
3711                 spin_lock(&lpni->lpni_lock);
3712                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3713                     healthv == LNET_MAX_HEALTH_VALUE) {
3714                         list_del_init(&lpni->lpni_recovery);
3715                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3716                         spin_unlock(&lpni->lpni_lock);
3717                         lnet_peer_ni_decref_locked(lpni);
3718                         lnet_net_unlock(0);
3719                         continue;
3720                 }
3721
3722                 /*
3723                  * If the peer NI has failed recovery we must unlink the
3724                  * md. But we want to keep the peer ni on the recovery
3725                  * queue so we can try to continue recovering it
3726                  */
3727                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3728                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3729                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3730                 }
3731
3732                 spin_unlock(&lpni->lpni_lock);
3733
3734                 if (now < lpni->lpni_next_ping) {
3735                         lnet_net_unlock(0);
3736                         continue;
3737                 }
3738
3739                 lnet_net_unlock(0);
3740
3741                 /*
3742                  * NOTE: we're racing with peer deletion from user space.
3743                  * It's possible that a peer is deleted after we check its
3744                  * state. In this case the recovery can create a new peer
3745                  */
3746                 spin_lock(&lpni->lpni_lock);
3747                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3748                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3749                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3750                         spin_unlock(&lpni->lpni_lock);
3751
3752                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3753                         if (!ev_info) {
3754                                 CERROR("out of memory. Can't recover %s\n",
3755                                        libcfs_nidstr(&lpni->lpni_nid));
3756                                 spin_lock(&lpni->lpni_lock);
3757                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3758                                 spin_unlock(&lpni->lpni_lock);
3759                                 continue;
3760                         }
3761
3762                         /* look at the comments in lnet_recover_local_nis() */
3763                         mdh = lpni->lpni_recovery_ping_mdh;
3764                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3765                         /* FIXME handle large-addr nid */
3766                         nid = lnet_nid_to_nid4(&lpni->lpni_nid);
3767                         lnet_net_lock(0);
3768                         list_del_init(&lpni->lpni_recovery);
3769                         lnet_peer_ni_decref_locked(lpni);
3770                         lnet_net_unlock(0);
3771
3772                         ev_info->mt_type = MT_TYPE_PEER_NI;
3773                         ev_info->mt_nid = nid;
3774                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3775                                             ev_info, the_lnet.ln_mt_handler,
3776                                             true);
3777                         lnet_net_lock(0);
3778                         /*
3779                          * lnet_find_peer_ni_locked() grabs a refcount for
3780                          * us. No need to take it explicitly.
3781                          */
3782                         lpni = lnet_find_peer_ni_locked(nid);
3783                         if (!lpni) {
3784                                 lnet_net_unlock(0);
3785                                 LNetMDUnlink(mdh);
3786                                 continue;
3787                         }
3788
3789                         lpni->lpni_ping_count++;
3790
3791                         lpni->lpni_recovery_ping_mdh = mdh;
3792
3793                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
3794                                                              &processed_list,
3795                                                              now);
3796                         if (rc) {
3797                                 spin_lock(&lpni->lpni_lock);
3798                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3799                                 spin_unlock(&lpni->lpni_lock);
3800                         }
3801
3802                         /* Drop the ref taken by lnet_find_peer_ni_locked() */
3803                         lnet_peer_ni_decref_locked(lpni);
3804                         lnet_net_unlock(0);
3805                 } else
3806                         spin_unlock(&lpni->lpni_lock);
3807         }
3808
3809         list_splice_init(&processed_list, &local_queue);
3810         lnet_net_lock(0);
3811         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3812         lnet_net_unlock(0);
3813 }
3814
3815 static int
3816 lnet_monitor_thread(void *arg)
3817 {
3818         time64_t rsp_timeout = 0;
3819         time64_t now;
3820
3821         wait_for_completion(&the_lnet.ln_started);
3822         /*
3823          * The monitor thread takes care of the following:
3824          *  1. Checks the aliveness of routers
3825          *  2. Checks if there are messages on the resend queue to resend
3826          *     them.
3827          *  3. Check if there are any NIs on the local recovery queue and
3828          *     pings them
3829          *  4. Checks if there are any NIs on the remote recovery queue
3830          *     and pings them.
3831          */
3832         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
3833                 now = ktime_get_real_seconds();
3834
3835                 if (lnet_router_checker_active())
3836                         lnet_check_routers();
3837
3838                 lnet_resend_pending_msgs();
3839
3840                 if (now >= rsp_timeout) {
3841                         lnet_finalize_expired_responses();
3842                         rsp_timeout = now + (lnet_transaction_timeout / 2);
3843                 }
3844
3845                 lnet_recover_local_nis();
3846                 lnet_recover_peer_nis();
3847
3848                 /*
3849                  * TODO do we need to check if we should sleep without
3850                  * timeout?  Technically, an active system will always
3851                  * have messages in flight so this check will always
3852                  * evaluate to false. And on an idle system do we care
3853                  * if we wake up every 1 second? Although, we've seen
3854                  * cases where we get a complaint that an idle thread
3855                  * is waking up unnecessarily.
3856                  */
3857                 wait_for_completion_interruptible_timeout(
3858                         &the_lnet.ln_mt_wait_complete,
3859                         cfs_time_seconds(1));
3860                 /* Must re-init the completion before testing anything,
3861                  * including ln_mt_state.
3862                  */
3863                 reinit_completion(&the_lnet.ln_mt_wait_complete);
3864         }
3865
3866         /* Shutting down */
3867         lnet_net_lock(LNET_LOCK_EX);
3868         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3869         lnet_net_unlock(LNET_LOCK_EX);
3870
3871         /* signal that the monitor thread is exiting */
3872         up(&the_lnet.ln_mt_signal);
3873
3874         return 0;
3875 }
3876
3877 /*
3878  * lnet_send_ping
3879  * Sends a ping.
3880  * Returns == 0 if success
3881  * Returns > 0 if LNetMDBind or prior fails
3882  * Returns < 0 if LNetGet fails
3883  */
3884 int
3885 lnet_send_ping(lnet_nid_t dest_nid,
3886                struct lnet_handle_md *mdh, int nnis,
3887                void *user_data, lnet_handler_t handler, bool recovery)
3888 {
3889         struct lnet_md md = { NULL };
3890         struct lnet_process_id id;
3891         struct lnet_ping_buffer *pbuf;
3892         int rc;
3893
3894         if (dest_nid == LNET_NID_ANY) {
3895                 rc = -EHOSTUNREACH;
3896                 goto fail_error;
3897         }
3898
3899         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
3900         if (!pbuf) {
3901                 rc = ENOMEM;
3902                 goto fail_error;
3903         }
3904
3905         /* initialize md content */
3906         md.start     = &pbuf->pb_info;
3907         md.length    = LNET_PING_INFO_SIZE(nnis);
3908         md.threshold = 2; /* GET/REPLY */
3909         md.max_size  = 0;
3910         md.options   = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE;
3911         md.user_ptr  = user_data;
3912         md.handler   = handler;
3913
3914         rc = LNetMDBind(&md, LNET_UNLINK, mdh);
3915         if (rc) {
3916                 lnet_ping_buffer_decref(pbuf);
3917                 CERROR("Can't bind MD: %d\n", rc);
3918                 rc = -rc; /* change the rc to positive */
3919                 goto fail_error;
3920         }
3921         id.pid = LNET_PID_LUSTRE;
3922         id.nid = dest_nid;
3923
3924         rc = LNetGet(LNET_NID_ANY, *mdh, id,
3925                      LNET_RESERVED_PORTAL,
3926                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
3927
3928         if (rc)
3929                 goto fail_unlink_md;
3930
3931         return 0;
3932
3933 fail_unlink_md:
3934         LNetMDUnlink(*mdh);
3935         LNetInvalidateMDHandle(mdh);
3936 fail_error:
3937         return rc;
3938 }
3939
3940 static void
3941 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
3942                            int status, bool send, bool unlink_event)
3943 {
3944         lnet_nid_t nid = ev_info->mt_nid;
3945
3946         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
3947                 struct lnet_ni *ni;
3948
3949                 lnet_net_lock(0);
3950                 ni = lnet_nid2ni_locked(nid, 0);
3951                 if (!ni) {
3952                         lnet_net_unlock(0);
3953                         return;
3954                 }
3955                 lnet_ni_lock(ni);
3956                 if (!send || (send && status != 0))
3957                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3958                 if (status)
3959                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
3960                 lnet_ni_unlock(ni);
3961                 lnet_net_unlock(0);
3962
3963                 if (status != 0) {
3964                         CERROR("local NI (%s) recovery failed with %d\n",
3965                                libcfs_nid2str(nid), status);
3966                         return;
3967                 }
3968                 /*
3969                  * need to increment healthv for the ni here, because in
3970                  * the lnet_finalize() path we don't have access to this
3971                  * NI. And in order to get access to it, we'll need to
3972                  * carry forward too much information.
3973                  * In the peer case, it'll naturally be incremented
3974                  */
3975                 if (!unlink_event)
3976                         lnet_inc_healthv(&ni->ni_healthv,
3977                                          lnet_health_sensitivity);
3978         } else {
3979                 struct lnet_peer_ni *lpni;
3980                 int cpt;
3981
3982                 cpt = lnet_net_lock_current();
3983                 lpni = lnet_find_peer_ni_locked(nid);
3984                 if (!lpni) {
3985                         lnet_net_unlock(cpt);
3986                         return;
3987                 }
3988                 spin_lock(&lpni->lpni_lock);
3989                 if (!send || (send && status != 0))
3990                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3991                 if (status)
3992                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
3993                 spin_unlock(&lpni->lpni_lock);
3994                 lnet_peer_ni_decref_locked(lpni);
3995                 lnet_net_unlock(cpt);
3996
3997                 if (status != 0)
3998                         CERROR("peer NI (%s) recovery failed with %d\n",
3999                                libcfs_nid2str(nid), status);
4000         }
4001 }
4002
4003 void
4004 lnet_mt_event_handler(struct lnet_event *event)
4005 {
4006         struct lnet_mt_event_info *ev_info = event->md_user_ptr;
4007         struct lnet_ping_buffer *pbuf;
4008
4009         /* TODO: remove assert */
4010         LASSERT(event->type == LNET_EVENT_REPLY ||
4011                 event->type == LNET_EVENT_SEND ||
4012                 event->type == LNET_EVENT_UNLINK);
4013
4014         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
4015                event->status);
4016
4017         switch (event->type) {
4018         case LNET_EVENT_UNLINK:
4019                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
4020                        libcfs_nid2str(ev_info->mt_nid));
4021                 /* fallthrough */
4022         case LNET_EVENT_REPLY:
4023                 lnet_handle_recovery_reply(ev_info, event->status, false,
4024                                            event->type == LNET_EVENT_UNLINK);
4025                 break;
4026         case LNET_EVENT_SEND:
4027                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
4028                                libcfs_nid2str(ev_info->mt_nid),
4029                                (event->status) ? "unsuccessfully" :
4030                                "successfully", event->status);
4031                 lnet_handle_recovery_reply(ev_info, event->status, true, false);
4032                 break;
4033         default:
4034                 CERROR("Unexpected event: %d\n", event->type);
4035                 break;
4036         }
4037         if (event->unlinked) {
4038                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
4039                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
4040                 lnet_ping_buffer_decref(pbuf);
4041         }
4042 }
4043
4044 static int
4045 lnet_rsp_tracker_create(void)
4046 {
4047         struct list_head **rstqs;
4048         rstqs = lnet_create_array_of_queues();
4049
4050         if (!rstqs)
4051                 return -ENOMEM;
4052
4053         the_lnet.ln_mt_rstq = rstqs;
4054
4055         return 0;
4056 }
4057
4058 static void
4059 lnet_rsp_tracker_clean(void)
4060 {
4061         lnet_finalize_expired_responses();
4062
4063         cfs_percpt_free(the_lnet.ln_mt_rstq);
4064         the_lnet.ln_mt_rstq = NULL;
4065 }
4066
4067 int lnet_monitor_thr_start(void)
4068 {
4069         int rc = 0;
4070         struct task_struct *task;
4071
4072         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
4073                 return -EALREADY;
4074
4075         rc = lnet_resendqs_create();
4076         if (rc)
4077                 return rc;
4078
4079         rc = lnet_rsp_tracker_create();
4080         if (rc)
4081                 goto clean_queues;
4082
4083         sema_init(&the_lnet.ln_mt_signal, 0);
4084
4085         lnet_net_lock(LNET_LOCK_EX);
4086         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
4087         lnet_net_unlock(LNET_LOCK_EX);
4088         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
4089         if (IS_ERR(task)) {
4090                 rc = PTR_ERR(task);
4091                 CERROR("Can't start monitor thread: %d\n", rc);
4092                 goto clean_thread;
4093         }
4094
4095         return 0;
4096
4097 clean_thread:
4098         lnet_net_lock(LNET_LOCK_EX);
4099         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4100         lnet_net_unlock(LNET_LOCK_EX);
4101         /* block until event callback signals exit */
4102         down(&the_lnet.ln_mt_signal);
4103         /* clean up */
4104         lnet_net_lock(LNET_LOCK_EX);
4105         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
4106         lnet_net_unlock(LNET_LOCK_EX);
4107         lnet_rsp_tracker_clean();
4108         lnet_clean_local_ni_recoveryq();
4109         lnet_clean_peer_ni_recoveryq();
4110         lnet_clean_resendqs();
4111         the_lnet.ln_mt_handler = NULL;
4112         return rc;
4113 clean_queues:
4114         lnet_rsp_tracker_clean();
4115         lnet_clean_local_ni_recoveryq();
4116         lnet_clean_peer_ni_recoveryq();
4117         lnet_clean_resendqs();
4118         return rc;
4119 }
4120
4121 void lnet_monitor_thr_stop(void)
4122 {
4123         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
4124                 return;
4125
4126         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
4127         lnet_net_lock(LNET_LOCK_EX);
4128         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4129         lnet_net_unlock(LNET_LOCK_EX);
4130
4131         /* tell the monitor thread that we're shutting down */
4132         complete(&the_lnet.ln_mt_wait_complete);
4133
4134         /* block until monitor thread signals that it's done */
4135         down(&the_lnet.ln_mt_signal);
4136         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
4137
4138         /* perform cleanup tasks */
4139         lnet_rsp_tracker_clean();
4140         lnet_clean_local_ni_recoveryq();
4141         lnet_clean_peer_ni_recoveryq();
4142         lnet_clean_resendqs();
4143 }
4144
4145 void
4146 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
4147                   __u32 msg_type)
4148 {
4149         lnet_net_lock(cpt);
4150         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
4151         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4152         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
4153         lnet_net_unlock(cpt);
4154
4155         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
4156 }
4157
4158 static void
4159 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
4160 {
4161         struct lnet_hdr *hdr = &msg->msg_hdr;
4162
4163         if (msg->msg_wanted != 0)
4164                 lnet_setpayloadbuffer(msg);
4165
4166         lnet_build_msg_event(msg, LNET_EVENT_PUT);
4167
4168         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
4169          * it back into the ACK during lnet_finalize() */
4170         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
4171                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
4172
4173         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
4174                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
4175 }
4176
4177 static int
4178 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
4179 {
4180         struct lnet_hdr         *hdr = &msg->msg_hdr;
4181         struct lnet_match_info  info;
4182         int                     rc;
4183         bool                    ready_delay;
4184
4185         /* Convert put fields to host byte order */
4186         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
4187         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
4188         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
4189
4190         /* Primary peer NID. */
4191         info.mi_id.nid = msg->msg_initiator;
4192         info.mi_id.pid  = hdr->src_pid;
4193         info.mi_opc     = LNET_MD_OP_PUT;
4194         info.mi_portal  = hdr->msg.put.ptl_index;
4195         info.mi_rlength = hdr->payload_length;
4196         info.mi_roffset = hdr->msg.put.offset;
4197         info.mi_mbits   = hdr->msg.put.match_bits;
4198         info.mi_cpt     = lnet_nid2cpt(&msg->msg_initiator, ni);
4199
4200         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
4201         ready_delay = msg->msg_rx_ready_delay;
4202
4203  again:
4204         rc = lnet_ptl_match_md(&info, msg);
4205         switch (rc) {
4206         default:
4207                 LBUG();
4208
4209         case LNET_MATCHMD_OK:
4210                 lnet_recv_put(ni, msg);
4211                 return 0;
4212
4213         case LNET_MATCHMD_NONE:
4214                 if (ready_delay)
4215                         /* no eager_recv or has already called it, should
4216                          * have been attached on delayed list */
4217                         return 0;
4218
4219                 rc = lnet_ni_eager_recv(ni, msg);
4220                 if (rc == 0) {
4221                         ready_delay = true;
4222                         goto again;
4223                 }
4224                 /* fall through */
4225
4226         case LNET_MATCHMD_DROP:
4227                 CNETERR("Dropping PUT from %s portal %d match %llu"
4228                         " offset %d length %d: %d\n",
4229                         libcfs_idstr(&info.mi_id), info.mi_portal,
4230                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
4231
4232                 return -ENOENT; /* -ve: OK but no match */
4233         }
4234 }
4235
4236 static int
4237 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
4238 {
4239         struct lnet_match_info info;
4240         struct lnet_hdr *hdr = &msg->msg_hdr;
4241         struct lnet_process_id source_id;
4242         struct lnet_handle_wire reply_wmd;
4243         int rc;
4244
4245         /* Convert get fields to host byte order */
4246         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
4247         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
4248         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
4249         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
4250
4251         source_id.nid = lnet_nid_to_nid4(&hdr->src_nid);
4252         source_id.pid = hdr->src_pid;
4253         /* Primary peer NID */
4254         info.mi_id.nid  = msg->msg_initiator;
4255         info.mi_id.pid  = hdr->src_pid;
4256         info.mi_opc     = LNET_MD_OP_GET;
4257         info.mi_portal  = hdr->msg.get.ptl_index;
4258         info.mi_rlength = hdr->msg.get.sink_length;
4259         info.mi_roffset = hdr->msg.get.src_offset;
4260         info.mi_mbits   = hdr->msg.get.match_bits;
4261         info.mi_cpt     = lnet_nid2cpt(&msg->msg_initiator, ni);
4262
4263         rc = lnet_ptl_match_md(&info, msg);
4264         if (rc == LNET_MATCHMD_DROP) {
4265                 CNETERR("Dropping GET from %s portal %d match %llu"
4266                         " offset %d length %d\n",
4267                         libcfs_idstr(&info.mi_id), info.mi_portal,
4268                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
4269                 return -ENOENT; /* -ve: OK but no match */
4270         }
4271
4272         LASSERT(rc == LNET_MATCHMD_OK);
4273
4274         lnet_build_msg_event(msg, LNET_EVENT_GET);
4275
4276         reply_wmd = hdr->msg.get.return_wmd;
4277
4278         lnet_prep_send(msg, LNET_MSG_REPLY, source_id,
4279                        msg->msg_offset, msg->msg_wanted);
4280
4281         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
4282
4283         if (rdma_get) {
4284                 /* The LND completes the REPLY from her recv procedure */
4285                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
4286                              msg->msg_offset, msg->msg_len, msg->msg_len);
4287                 return 0;
4288         }
4289
4290         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
4291         msg->msg_receiving = 0;
4292
4293         rc = lnet_send(&ni->ni_nid, msg, &msg->msg_from);
4294         if (rc < 0) {
4295                 /* didn't get as far as lnet_ni_send() */
4296                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
4297                        libcfs_nidstr(&ni->ni_nid),
4298                        libcfs_idstr(&info.mi_id), rc);
4299
4300                 lnet_finalize(msg, rc);
4301         }
4302
4303         return 0;
4304 }
4305
4306 static int
4307 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
4308 {
4309         void *private = msg->msg_private;
4310         struct lnet_hdr *hdr = &msg->msg_hdr;
4311         struct lnet_process_id src = {0};
4312         struct lnet_libmd *md;
4313         unsigned int rlength;
4314         unsigned int mlength;
4315         int cpt;
4316
4317         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4318         lnet_res_lock(cpt);
4319
4320         src.nid = lnet_nid_to_nid4(&hdr->src_nid);
4321         src.pid = hdr->src_pid;
4322
4323         /* NB handles only looked up by creator (no flips) */
4324         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4325         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4326                 CNETERR("%s: Dropping REPLY from %s for %s "
4327                         "MD %#llx.%#llx\n",
4328                         libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
4329                         (md == NULL) ? "invalid" : "inactive",
4330                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4331                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4332                 if (md != NULL && md->md_me != NULL)
4333                         CERROR("REPLY MD also attached to portal %d\n",
4334                                md->md_me->me_portal);
4335
4336                 lnet_res_unlock(cpt);
4337                 return -ENOENT; /* -ve: OK but no match */
4338         }
4339
4340         LASSERT(md->md_offset == 0);
4341
4342         rlength = hdr->payload_length;
4343         mlength = min(rlength, md->md_length);
4344
4345         if (mlength < rlength &&
4346             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4347                 CNETERR("%s: Dropping REPLY from %s length %d "
4348                         "for MD %#llx would overflow (%d)\n",
4349                         libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
4350                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4351                         mlength);
4352                 lnet_res_unlock(cpt);
4353                 return -ENOENT; /* -ve: OK but no match */
4354         }
4355
4356         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4357                libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
4358                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4359
4360         lnet_msg_attach_md(msg, md, 0, mlength);
4361
4362         if (mlength != 0)
4363                 lnet_setpayloadbuffer(msg);
4364
4365         lnet_res_unlock(cpt);
4366
4367         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4368
4369         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4370         return 0;
4371 }
4372
4373 static int
4374 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4375 {
4376         struct lnet_hdr *hdr = &msg->msg_hdr;
4377         struct lnet_process_id src = {0};
4378         struct lnet_libmd *md;
4379         int cpt;
4380
4381         src.nid = lnet_nid_to_nid4(&hdr->src_nid);
4382         src.pid = hdr->src_pid;
4383
4384         /* Convert ack fields to host byte order */
4385         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4386         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4387
4388         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4389         lnet_res_lock(cpt);
4390
4391         /* NB handles only looked up by creator (no flips) */
4392         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4393         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4394                 /* Don't moan; this is expected */
4395                 CDEBUG(D_NET,
4396                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4397                        libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
4398                        (md == NULL) ? "invalid" : "inactive",
4399                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4400                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4401                 if (md != NULL && md->md_me != NULL)
4402                         CERROR("Source MD also attached to portal %d\n",
4403                                md->md_me->me_portal);
4404
4405                 lnet_res_unlock(cpt);
4406                 return -ENOENT;                  /* -ve! */
4407         }
4408
4409         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4410                libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
4411                hdr->msg.ack.dst_wmd.wh_object_cookie);
4412
4413         lnet_msg_attach_md(msg, md, 0, 0);
4414
4415         lnet_res_unlock(cpt);
4416
4417         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4418
4419         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4420         return 0;
4421 }
4422
4423 /**
4424  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4425  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4426  * \retval -ve                  error code
4427  */
4428 int
4429 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4430 {
4431         int     rc = 0;
4432
4433         if (!the_lnet.ln_routing)
4434                 return -ECANCELED;
4435
4436         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4437             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4438                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4439                         msg->msg_rx_ready_delay = 1;
4440                 } else {
4441                         lnet_net_unlock(msg->msg_rx_cpt);
4442                         rc = lnet_ni_eager_recv(ni, msg);
4443                         lnet_net_lock(msg->msg_rx_cpt);
4444                 }
4445         }
4446
4447         if (rc == 0)
4448                 rc = lnet_post_routed_recv_locked(msg, 0);
4449         return rc;
4450 }
4451
4452 int
4453 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4454 {
4455         int     rc;
4456
4457         switch (msg->msg_type) {
4458         case LNET_MSG_ACK:
4459                 rc = lnet_parse_ack(ni, msg);
4460                 break;
4461         case LNET_MSG_PUT:
4462                 rc = lnet_parse_put(ni, msg);
4463                 break;
4464         case LNET_MSG_GET:
4465                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4466                 break;
4467         case LNET_MSG_REPLY:
4468                 rc = lnet_parse_reply(ni, msg);
4469                 break;
4470         default: /* prevent an unused label if !kernel */
4471                 LASSERT(0);
4472                 return -EPROTO;
4473         }
4474
4475         LASSERT(rc == 0 || rc == -ENOENT);
4476         return rc;
4477 }
4478
4479 char *
4480 lnet_msgtyp2str (int type)
4481 {
4482         switch (type) {
4483         case LNET_MSG_ACK:
4484                 return ("ACK");
4485         case LNET_MSG_PUT:
4486                 return ("PUT");
4487         case LNET_MSG_GET:
4488                 return ("GET");
4489         case LNET_MSG_REPLY:
4490                 return ("REPLY");
4491         case LNET_MSG_HELLO:
4492                 return ("HELLO");
4493         default:
4494                 return ("<UNKNOWN>");
4495         }
4496 }
4497 EXPORT_SYMBOL(lnet_msgtyp2str);
4498
4499 int
4500 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid4,
4501            void *private, int rdma_req)
4502 {
4503         struct lnet_peer_ni *lpni;
4504         struct lnet_msg *msg;
4505         __u32 payload_length;
4506         lnet_pid_t dest_pid;
4507         lnet_nid_t dest_nid;
4508         lnet_nid_t src_nid;
4509         struct lnet_nid from_nid;
4510         bool push = false;
4511         int for_me;
4512         __u32 type;
4513         int rc = 0;
4514         int cpt;
4515
4516         LASSERT (!in_interrupt ());
4517
4518         lnet_nid4_to_nid(from_nid4, &from_nid);
4519
4520         type = hdr->type;
4521         src_nid = lnet_nid_to_nid4(&hdr->src_nid);
4522         dest_nid = lnet_nid_to_nid4(&hdr->dest_nid);
4523         dest_pid = hdr->dest_pid;
4524         payload_length = hdr->payload_length;
4525
4526         /* FIXME handle large-addr nids */
4527         for_me = (lnet_nid_to_nid4(&ni->ni_nid) == dest_nid);
4528         cpt = lnet_cpt_of_nid(from_nid4, ni);
4529
4530         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4531                 libcfs_nid2str(dest_nid),
4532                 libcfs_nidstr(&ni->ni_nid),
4533                 libcfs_nid2str(src_nid),
4534                 lnet_msgtyp2str(type),
4535                 (for_me) ? "for me" : "routed");
4536
4537         switch (type) {
4538         case LNET_MSG_ACK:
4539         case LNET_MSG_GET:
4540                 if (payload_length > 0) {
4541                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4542                                libcfs_nid2str(from_nid4),
4543                                libcfs_nid2str(src_nid),
4544                                lnet_msgtyp2str(type), payload_length);
4545                         return -EPROTO;
4546                 }
4547                 break;
4548
4549         case LNET_MSG_PUT:
4550         case LNET_MSG_REPLY:
4551                 if (payload_length >
4552                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4553                         CERROR("%s, src %s: bad %s payload %d "
4554                                "(%d max expected)\n",
4555                                libcfs_nid2str(from_nid4),
4556                                libcfs_nid2str(src_nid),
4557                                lnet_msgtyp2str(type),
4558                                payload_length,
4559                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4560                         return -EPROTO;
4561                 }
4562                 break;
4563
4564         default:
4565                 CERROR("%s, src %s: Bad message type 0x%x\n",
4566                        libcfs_nid2str(from_nid4),
4567                        libcfs_nid2str(src_nid), type);
4568                 return -EPROTO;
4569         }
4570
4571         if (the_lnet.ln_routing &&
4572             ni->ni_net->net_last_alive != ktime_get_real_seconds()) {
4573                 lnet_ni_lock(ni);
4574                 spin_lock(&ni->ni_net->net_lock);
4575                 ni->ni_net->net_last_alive = ktime_get_real_seconds();
4576                 spin_unlock(&ni->ni_net->net_lock);
4577                 push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP);
4578                 lnet_ni_unlock(ni);
4579         }
4580
4581         if (push)
4582                 lnet_push_update_to_peers(1);
4583
4584         /* Regard a bad destination NID as a protocol error.  Senders should
4585          * know what they're doing; if they don't they're misconfigured, buggy
4586          * or malicious so we chop them off at the knees :) */
4587
4588         if (!for_me) {
4589                 if (LNET_NIDNET(dest_nid) == LNET_NID_NET(&ni->ni_nid)) {
4590                         /* should have gone direct */
4591                         CERROR("%s, src %s: Bad dest nid %s "
4592                                "(should have been sent direct)\n",
4593                                 libcfs_nid2str(from_nid4),
4594                                 libcfs_nid2str(src_nid),
4595                                 libcfs_nid2str(dest_nid));
4596                         return -EPROTO;
4597                 }
4598
4599                 if (lnet_islocalnid4(dest_nid)) {
4600                         /* dest is another local NI; sender should have used
4601                          * this node's NID on its own network */
4602                         CERROR("%s, src %s: Bad dest nid %s "
4603                                "(it's my nid but on a different network)\n",
4604                                 libcfs_nid2str(from_nid4),
4605                                 libcfs_nid2str(src_nid),
4606                                 libcfs_nid2str(dest_nid));
4607                         return -EPROTO;
4608                 }
4609
4610                 if (rdma_req && type == LNET_MSG_GET) {
4611                         CERROR("%s, src %s: Bad optimized GET for %s "
4612                                "(final destination must be me)\n",
4613                                 libcfs_nid2str(from_nid4),
4614                                 libcfs_nid2str(src_nid),
4615                                 libcfs_nid2str(dest_nid));
4616                         return -EPROTO;
4617                 }
4618
4619                 if (!the_lnet.ln_routing) {
4620                         CERROR("%s, src %s: Dropping message for %s "
4621                                "(routing not enabled)\n",
4622                                 libcfs_nid2str(from_nid4),
4623                                 libcfs_nid2str(src_nid),
4624                                 libcfs_nid2str(dest_nid));
4625                         goto drop;
4626                 }
4627         }
4628
4629         /* Message looks OK; we're not going to return an error, so we MUST
4630          * call back lnd_recv() come what may... */
4631
4632         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4633             fail_peer(src_nid, 0)) {                    /* shall we now? */
4634                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4635                        libcfs_nid2str(from_nid4), libcfs_nid2str(src_nid),
4636                        lnet_msgtyp2str(type));
4637                 goto drop;
4638         }
4639
4640         /* FIXME need to support large-addr nid */
4641         if (!list_empty(&the_lnet.ln_drop_rules) &&
4642             lnet_drop_rule_match(hdr, lnet_nid_to_nid4(&ni->ni_nid), NULL)) {
4643                 CDEBUG(D_NET,
4644                        "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
4645                        libcfs_nid2str(from_nid4), libcfs_nid2str(src_nid),
4646                        libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
4647                 goto drop;
4648         }
4649
4650         msg = lnet_msg_alloc();
4651         if (msg == NULL) {
4652                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4653                        libcfs_nid2str(from_nid4), libcfs_nid2str(src_nid),
4654                        lnet_msgtyp2str(type));
4655                 goto drop;
4656         }
4657
4658         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4659          * pointers NULL etc */
4660
4661         msg->msg_type = type;
4662         msg->msg_private = private;
4663         msg->msg_receiving = 1;
4664         msg->msg_rdma_get = rdma_req;
4665         msg->msg_len = msg->msg_wanted = payload_length;
4666         msg->msg_offset = 0;
4667         msg->msg_hdr = *hdr;
4668         /* for building message event */
4669         msg->msg_from = from_nid;
4670         if (!for_me) {
4671                 msg->msg_target.pid     = dest_pid;
4672                 lnet_nid4_to_nid(dest_nid, &msg->msg_target.nid);
4673                 msg->msg_routing        = 1;
4674         }
4675
4676         lnet_net_lock(cpt);
4677         lpni = lnet_peerni_by_nid_locked(&from_nid, &ni->ni_nid, cpt);
4678         if (IS_ERR(lpni)) {
4679                 lnet_net_unlock(cpt);
4680                 rc = PTR_ERR(lpni);
4681                 CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
4682                        libcfs_nid2str(from_nid4), libcfs_nid2str(src_nid),
4683                        lnet_msgtyp2str(type), rc);
4684                 lnet_msg_free(msg);
4685                 if (rc == -ESHUTDOWN)
4686                         /* We are shutting down.  Don't do anything more */
4687                         return 0;
4688                 goto drop;
4689         }
4690
4691         /* If this message was forwarded to us from a router then we may need
4692          * to update router aliveness or check for an asymmetrical route
4693          * (or both)
4694          */
4695         if (((lnet_drop_asym_route && for_me) ||
4696              !lpni->lpni_peer_net->lpn_peer->lp_alive) &&
4697             LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid4)) {
4698                 __u32 src_net_id = LNET_NIDNET(src_nid);
4699                 struct lnet_peer *gw = lpni->lpni_peer_net->lpn_peer;
4700                 struct lnet_route *route;
4701                 bool found = false;
4702
4703                 list_for_each_entry(route, &gw->lp_routes, lr_gwlist) {
4704                         if (route->lr_net == src_net_id) {
4705                                 found = true;
4706                                 /* If we're transitioning the gateway from
4707                                  * dead -> alive, and discovery is disabled
4708                                  * locally or on the gateway, then we need to
4709                                  * update the cached route aliveness for each
4710                                  * route to the src_nid's net.
4711                                  *
4712                                  * Otherwise, we're only checking for
4713                                  * symmetrical route, and we can break the
4714                                  * loop
4715                                  */
4716                                 if (!gw->lp_alive &&
4717                                     lnet_is_discovery_disabled(gw))
4718                                         lnet_set_route_aliveness(route, true);
4719                                 else
4720                                         break;
4721                         }
4722                 }
4723                 if (lnet_drop_asym_route && for_me && !found) {
4724                         /* Drop ref taken by lnet_nid2peerni_locked() */
4725                         lnet_peer_ni_decref_locked(lpni);
4726                         lnet_net_unlock(cpt);
4727                         /* we would not use from_nid to route a message to
4728                          * src_nid
4729                          * => asymmetric routing detected but forbidden
4730                          */
4731                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4732                                libcfs_nid2str(from_nid4),
4733                                libcfs_nid2str(src_nid), lnet_msgtyp2str(type));
4734                         lnet_msg_free(msg);
4735                         goto drop;
4736                 }
4737                 if (!gw->lp_alive) {
4738                         struct lnet_peer_net *lpn;
4739                         struct lnet_peer_ni *lpni2;
4740
4741                         gw->lp_alive = true;
4742                         /* Mark all remote NIs on src_nid's net UP */
4743                         lpn = lnet_peer_get_net_locked(gw, src_net_id);
4744                         if (lpn)
4745                                 list_for_each_entry(lpni2, &lpn->lpn_peer_nis,
4746                                                     lpni_peer_nis)
4747                                         lpni2->lpni_ns_status = LNET_NI_STATUS_UP;
4748                 }
4749         }
4750
4751         lpni->lpni_last_alive = ktime_get_seconds();
4752
4753         msg->msg_rxpeer = lpni;
4754         msg->msg_rxni = ni;
4755         lnet_ni_addref_locked(ni, cpt);
4756         /* Multi-Rail: Primary NID of source. */
4757         lnet_peer_primary_nid_locked(src_nid, &msg->msg_initiator);
4758
4759         /*
4760          * mark the status of this lpni as UP since we received a message
4761          * from it. The ping response reports back the ns_status which is
4762          * marked on the remote as up or down and we cache it here.
4763          */
4764         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
4765
4766         lnet_msg_commit(msg, cpt);
4767
4768         /* message delay simulation */
4769         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
4770                      lnet_delay_rule_match_locked(hdr, msg))) {
4771                 lnet_net_unlock(cpt);
4772                 return 0;
4773         }
4774
4775         if (!for_me) {
4776                 rc = lnet_parse_forward_locked(ni, msg);
4777                 lnet_net_unlock(cpt);
4778
4779                 if (rc < 0)
4780                         goto free_drop;
4781
4782                 if (rc == LNET_CREDIT_OK) {
4783                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
4784                                      0, payload_length, payload_length);
4785                 }
4786                 return 0;
4787         }
4788
4789         lnet_net_unlock(cpt);
4790
4791         rc = lnet_parse_local(ni, msg);
4792         if (rc != 0)
4793                 goto free_drop;
4794         return 0;
4795
4796  free_drop:
4797         LASSERT(msg->msg_md == NULL);
4798         lnet_finalize(msg, rc);
4799
4800  drop:
4801         lnet_drop_message(ni, cpt, private, payload_length, type);
4802         return 0;
4803 }
4804 EXPORT_SYMBOL(lnet_parse);
4805
4806 void
4807 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
4808 {
4809         while (!list_empty(head)) {
4810                 struct lnet_process_id id = {0};
4811                 struct lnet_msg *msg;
4812
4813                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4814                 list_del(&msg->msg_list);
4815
4816                 id.nid = lnet_nid_to_nid4(&msg->msg_hdr.src_nid);
4817                 id.pid = msg->msg_hdr.src_pid;
4818
4819                 LASSERT(msg->msg_md == NULL);
4820                 LASSERT(msg->msg_rx_delayed);
4821                 LASSERT(msg->msg_rxpeer != NULL);
4822                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4823
4824                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
4825                       " offset %d length %d: %s\n",
4826                       libcfs_id2str(id),
4827                       msg->msg_hdr.msg.put.ptl_index,
4828                       msg->msg_hdr.msg.put.match_bits,
4829                       msg->msg_hdr.msg.put.offset,
4830                       msg->msg_hdr.payload_length, reason);
4831
4832                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
4833                  * called lnet_drop_message(), so I just hang onto msg as well
4834                  * until that's done */
4835
4836                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
4837                                   msg->msg_private, msg->msg_len,
4838                                   msg->msg_type);
4839
4840                 msg->msg_no_resend = true;
4841                 /*
4842                  * NB: message will not generate event because w/o attached MD,
4843                  * but we still should give error code so lnet_msg_decommit()
4844                  * can skip counters operations and other checks.
4845                  */
4846                 lnet_finalize(msg, -ENOENT);
4847         }
4848 }
4849
4850 void
4851 lnet_recv_delayed_msg_list(struct list_head *head)
4852 {
4853         while (!list_empty(head)) {
4854                 struct lnet_msg *msg;
4855                 struct lnet_process_id id;
4856
4857                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4858                 list_del(&msg->msg_list);
4859
4860                 /* md won't disappear under me, since each msg
4861                  * holds a ref on it */
4862
4863                 id.nid = lnet_nid_to_nid4(&msg->msg_hdr.src_nid);
4864                 id.pid = msg->msg_hdr.src_pid;
4865
4866                 LASSERT(msg->msg_rx_delayed);
4867                 LASSERT(msg->msg_md != NULL);
4868                 LASSERT(msg->msg_rxpeer != NULL);
4869                 LASSERT(msg->msg_rxni != NULL);
4870                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4871
4872                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
4873                        "match %llu offset %d length %d.\n",
4874                         libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
4875                         msg->msg_hdr.msg.put.match_bits,
4876                         msg->msg_hdr.msg.put.offset,
4877                         msg->msg_hdr.payload_length);
4878
4879                 lnet_recv_put(msg->msg_rxni, msg);
4880         }
4881 }
4882
4883 static void
4884 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
4885                         struct lnet_libmd *md, struct lnet_handle_md mdh)
4886 {
4887         s64 timeout_ns;
4888         struct lnet_rsp_tracker *local_rspt;
4889
4890         /*
4891          * MD has a refcount taken by message so it's not going away.
4892          * The MD however can be looked up. We need to secure the access
4893          * to the md_rspt_ptr by taking the res_lock.
4894          * The rspt can be accessed without protection up to when it gets
4895          * added to the list.
4896          */
4897
4898         lnet_res_lock(cpt);
4899         local_rspt = md->md_rspt_ptr;
4900         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
4901         if (local_rspt != NULL) {
4902                 /*
4903                  * we already have an rspt attached to the md, so we'll
4904                  * update the deadline on that one.
4905                  */
4906                 lnet_rspt_free(rspt, cpt);
4907         } else {
4908                 /* new md */
4909                 rspt->rspt_mdh = mdh;
4910                 rspt->rspt_cpt = cpt;
4911                 /* store the rspt so we can access it when we get the REPLY */
4912                 md->md_rspt_ptr = rspt;
4913                 local_rspt = rspt;
4914         }
4915         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
4916
4917         /*
4918          * add to the list of tracked responses. It's added to tail of the
4919          * list in order to expire all the older entries first.
4920          */
4921         lnet_net_lock(cpt);
4922         list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
4923         lnet_net_unlock(cpt);
4924         lnet_res_unlock(cpt);
4925 }
4926
4927 /**
4928  * Initiate an asynchronous PUT operation.
4929  *
4930  * There are several events associated with a PUT: completion of the send on
4931  * the initiator node (LNET_EVENT_SEND), and when the send completes
4932  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
4933  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
4934  * used at the target node to indicate the completion of incoming data
4935  * delivery.
4936  *
4937  * The local events will be logged in the EQ associated with the MD pointed to
4938  * by \a mdh handle. Using a MD without an associated EQ results in these
4939  * events being discarded. In this case, the caller must have another
4940  * mechanism (e.g., a higher level protocol) for determining when it is safe
4941  * to modify the memory region associated with the MD.
4942  *
4943  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
4944  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
4945  *
4946  * \param self Indicates the NID of a local interface through which to send
4947  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
4948  * \param mdh A handle for the MD that describes the memory to be sent. The MD
4949  * must be "free floating" (See LNetMDBind()).
4950  * \param ack Controls whether an acknowledgment is requested.
4951  * Acknowledgments are only sent when they are requested by the initiating
4952  * process and the target MD enables them.
4953  * \param target A process identifier for the target process.
4954  * \param portal The index in the \a target's portal table.
4955  * \param match_bits The match bits to use for MD selection at the target
4956  * process.
4957  * \param offset The offset into the target MD (only used when the target
4958  * MD has the LNET_MD_MANAGE_REMOTE option set).
4959  * \param hdr_data 64 bits of user data that can be included in the message
4960  * header. This data is written to an event queue entry at the target if an
4961  * EQ is present on the matching MD.
4962  *
4963  * \retval  0      Success, and only in this case events will be generated
4964  * and logged to EQ (if it exists).
4965  * \retval -EIO    Simulated failure.
4966  * \retval -ENOMEM Memory allocation failure.
4967  * \retval -ENOENT Invalid MD object.
4968  *
4969  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
4970  */
4971 int
4972 LNetPut(lnet_nid_t self4, struct lnet_handle_md mdh, enum lnet_ack_req ack,
4973         struct lnet_process_id target, unsigned int portal,
4974         __u64 match_bits, unsigned int offset,
4975         __u64 hdr_data)
4976 {
4977         struct lnet_msg *msg;
4978         struct lnet_libmd *md;
4979         int cpt;
4980         int rc;
4981         struct lnet_rsp_tracker *rspt = NULL;
4982         struct lnet_nid self;
4983
4984         LASSERT(the_lnet.ln_refcount > 0);
4985
4986         lnet_nid4_to_nid(self4, &self);
4987
4988         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4989             fail_peer(target.nid, 1)) {                 /* shall we now? */
4990                 CERROR("Dropping PUT to %s: simulated failure\n",
4991                        libcfs_id2str(target));
4992                 return -EIO;
4993         }
4994
4995         msg = lnet_msg_alloc();
4996         if (msg == NULL) {
4997                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
4998                        libcfs_id2str(target));
4999                 return -ENOMEM;
5000         }
5001         msg->msg_vmflush = !!(current->flags & PF_MEMALLOC);
5002
5003         cpt = lnet_cpt_of_cookie(mdh.cookie);
5004
5005         if (ack == LNET_ACK_REQ) {
5006                 rspt = lnet_rspt_alloc(cpt);
5007                 if (!rspt) {
5008                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
5009                                 libcfs_id2str(target));
5010                         return -ENOMEM;
5011                 }
5012                 INIT_LIST_HEAD(&rspt->rspt_on_list);
5013         }
5014
5015         lnet_res_lock(cpt);
5016
5017         md = lnet_handle2md(&mdh);
5018         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5019                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
5020                        match_bits, portal, libcfs_id2str(target),
5021                        md == NULL ? -1 : md->md_threshold);
5022                 if (md != NULL && md->md_me != NULL)
5023                         CERROR("Source MD also attached to portal %d\n",
5024                                md->md_me->me_portal);
5025                 lnet_res_unlock(cpt);
5026
5027                 if (rspt)
5028                         lnet_rspt_free(rspt, cpt);
5029
5030                 lnet_msg_free(msg);
5031                 return -ENOENT;
5032         }
5033
5034         CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
5035
5036         lnet_msg_attach_md(msg, md, 0, 0);
5037
5038         lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
5039
5040         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
5041         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
5042         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
5043         msg->msg_hdr.msg.put.hdr_data = hdr_data;
5044
5045         /* NB handles only looked up by creator (no flips) */
5046         if (ack == LNET_ACK_REQ) {
5047                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5048                         the_lnet.ln_interface_cookie;
5049                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5050                         md->md_lh.lh_cookie;
5051         } else {
5052                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5053                         LNET_WIRE_HANDLE_COOKIE_NONE;
5054                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5055                         LNET_WIRE_HANDLE_COOKIE_NONE;
5056         }
5057
5058         lnet_res_unlock(cpt);
5059
5060         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5061
5062         if (rspt && lnet_response_tracking_enabled(LNET_MSG_PUT,
5063                                                    md->md_options))
5064                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5065         else if (rspt)
5066                 lnet_rspt_free(rspt, cpt);
5067
5068         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
5069                                  CFS_FAIL_ONCE))
5070                 rc = -EIO;
5071         else
5072                 rc = lnet_send(&self, msg, NULL);
5073
5074         if (rc != 0) {
5075                 CNETERR("Error sending PUT to %s: %d\n",
5076                         libcfs_id2str(target), rc);
5077                 msg->msg_no_resend = true;
5078                 lnet_finalize(msg, rc);
5079         }
5080
5081         /* completion will be signalled by an event */
5082         return 0;
5083 }
5084 EXPORT_SYMBOL(LNetPut);
5085
5086 /*
5087  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
5088  * returns a msg for the LND to pass to lnet_finalize() when the sink
5089  * data has been received.
5090  *
5091  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
5092  * lnet_finalize() is called on it, so the LND must call this first
5093  */
5094 struct lnet_msg *
5095 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
5096 {
5097         struct lnet_msg *msg = lnet_msg_alloc();
5098         struct lnet_libmd *getmd = getmsg->msg_md;
5099         struct lnet_processid *peer_id = &getmsg->msg_target;
5100         int cpt;
5101
5102         LASSERT(!getmsg->msg_target_is_router);
5103         LASSERT(!getmsg->msg_routing);
5104
5105         if (msg == NULL) {
5106                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
5107                        libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id));
5108                 goto drop;
5109         }
5110
5111         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
5112         lnet_res_lock(cpt);
5113
5114         LASSERT(getmd->md_refcount > 0);
5115
5116         if (getmd->md_threshold == 0) {
5117                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
5118                         libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id),
5119                         getmd);
5120                 lnet_res_unlock(cpt);
5121                 goto drop;
5122         }
5123
5124         LASSERT(getmd->md_offset == 0);
5125
5126         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
5127                libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id), getmd);
5128
5129         /* setup information for lnet_build_msg_event */
5130         msg->msg_initiator =
5131                 getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
5132         msg->msg_from = peer_id->nid;
5133         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
5134         msg->msg_hdr.src_nid = peer_id->nid;
5135         msg->msg_hdr.payload_length = getmd->md_length;
5136         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
5137
5138         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
5139         lnet_res_unlock(cpt);
5140
5141         cpt = lnet_nid2cpt(&peer_id->nid, ni);
5142
5143         lnet_net_lock(cpt);
5144         lnet_msg_commit(msg, cpt);
5145         lnet_net_unlock(cpt);
5146
5147         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
5148
5149         return msg;
5150
5151  drop:
5152         cpt = lnet_nid2cpt(&peer_id->nid, ni);
5153
5154         lnet_net_lock(cpt);
5155         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
5156         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
5157         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
5158                 getmd->md_length;
5159         lnet_net_unlock(cpt);
5160
5161         if (msg != NULL)
5162                 lnet_msg_free(msg);
5163
5164         return NULL;
5165 }
5166 EXPORT_SYMBOL(lnet_create_reply_msg);
5167
5168 void
5169 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
5170                        unsigned int len)
5171 {
5172         /* Set the REPLY length, now the RDMA that elides the REPLY message has
5173          * completed and I know it. */
5174         LASSERT(reply != NULL);
5175         LASSERT(reply->msg_type == LNET_MSG_GET);
5176         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
5177
5178         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
5179          * the end of my buffer, I might as well be dead. */
5180         LASSERT(len <= reply->msg_ev.mlength);
5181
5182         reply->msg_ev.mlength = len;
5183 }
5184 EXPORT_SYMBOL(lnet_set_reply_msg_len);
5185
5186 /**
5187  * Initiate an asynchronous GET operation.
5188  *
5189  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
5190  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
5191  * the target node in the REPLY has been written to local MD.
5192  *
5193  * On the target node, an LNET_EVENT_GET is logged when the GET request
5194  * arrives and is accepted into a MD.
5195  *
5196  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
5197  * \param mdh A handle for the MD that describes the memory into which the
5198  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
5199  *
5200  * \retval  0      Success, and only in this case events will be generated
5201  * and logged to EQ (if it exists) of the MD.
5202  * \retval -EIO    Simulated failure.
5203  * \retval -ENOMEM Memory allocation failure.
5204  * \retval -ENOENT Invalid MD object.
5205  */
5206 int
5207 LNetGet(lnet_nid_t self4, struct lnet_handle_md mdh,
5208         struct lnet_process_id target, unsigned int portal,
5209         __u64 match_bits, unsigned int offset, bool recovery)
5210 {
5211         struct lnet_msg *msg;
5212         struct lnet_libmd *md;
5213         struct lnet_rsp_tracker *rspt;
5214         int cpt;
5215         int rc;
5216         struct lnet_nid self;
5217
5218         LASSERT(the_lnet.ln_refcount > 0);
5219
5220         lnet_nid4_to_nid(self4, &self);
5221
5222         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5223             fail_peer(target.nid, 1))                   /* shall we now? */
5224         {
5225                 CERROR("Dropping GET to %s: simulated failure\n",
5226                        libcfs_id2str(target));
5227                 return -EIO;
5228         }
5229
5230         msg = lnet_msg_alloc();
5231         if (!msg) {
5232                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
5233                        libcfs_id2str(target));
5234                 return -ENOMEM;
5235         }
5236
5237         cpt = lnet_cpt_of_cookie(mdh.cookie);
5238
5239         rspt = lnet_rspt_alloc(cpt);
5240         if (!rspt) {
5241                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
5242                        libcfs_id2str(target));
5243                 return -ENOMEM;
5244         }
5245         INIT_LIST_HEAD(&rspt->rspt_on_list);
5246
5247         msg->msg_recovery = recovery;
5248
5249         lnet_res_lock(cpt);
5250
5251         md = lnet_handle2md(&mdh);
5252         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5253                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
5254                        match_bits, portal, libcfs_id2str(target),
5255                        md == NULL ? -1 : md->md_threshold);
5256                 if (md != NULL && md->md_me != NULL)
5257                         CERROR("REPLY MD also attached to portal %d\n",
5258                                md->md_me->me_portal);
5259
5260                 lnet_res_unlock(cpt);
5261
5262                 lnet_msg_free(msg);
5263                 lnet_rspt_free(rspt, cpt);
5264                 return -ENOENT;
5265         }
5266
5267         CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
5268
5269         lnet_msg_attach_md(msg, md, 0, 0);
5270
5271         lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
5272
5273         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5274         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5275         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5276         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5277
5278         /* NB handles only looked up by creator (no flips) */
5279         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5280                 the_lnet.ln_interface_cookie;
5281         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5282                 md->md_lh.lh_cookie;
5283
5284         lnet_res_unlock(cpt);
5285
5286         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5287
5288         if (lnet_response_tracking_enabled(LNET_MSG_GET, md->md_options))
5289                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5290         else
5291                 lnet_rspt_free(rspt, cpt);
5292
5293         rc = lnet_send(&self, msg, NULL);
5294         if (rc < 0) {
5295                 CNETERR("Error sending GET to %s: %d\n",
5296                         libcfs_id2str(target), rc);
5297                 msg->msg_no_resend = true;
5298                 lnet_finalize(msg, rc);
5299         }
5300
5301         /* completion will be signalled by an event */
5302         return 0;
5303 }
5304 EXPORT_SYMBOL(LNetGet);
5305
5306 /**
5307  * Calculate distance to node at \a dstnid.
5308  *
5309  * \param dstnid Target NID.
5310  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5311  * is saved here.
5312  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5313  * here.
5314  *
5315  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5316  * local_nid_dist_zero is set, which is the default.
5317  * \retval positives Distance to target NID, i.e. number of hops plus one.
5318  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5319  */
5320 int
5321 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
5322 {
5323         struct list_head *e;
5324         struct lnet_ni *ni = NULL;
5325         struct lnet_remotenet *rnet;
5326         __u32 dstnet = LNET_NIDNET(dstnid);
5327         int hops;
5328         int cpt;
5329         __u32 order = 2;
5330         struct list_head *rn_list;
5331         bool matched_dstnet = false;
5332
5333         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5334          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5335          * keep order 0 free for 0@lo and order 1 free for a local NID
5336          * match */
5337
5338         LASSERT(the_lnet.ln_refcount > 0);
5339
5340         cpt = lnet_net_lock_current();
5341
5342         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5343                 /* FIXME support large-addr nid */
5344                 if (lnet_nid_to_nid4(&ni->ni_nid) == dstnid) {
5345                         if (srcnidp != NULL)
5346                                 *srcnidp = dstnid;
5347                         if (orderp != NULL) {
5348                                 if (dstnid == LNET_NID_LO_0)
5349                                         *orderp = 0;
5350                                 else
5351                                         *orderp = 1;
5352                         }
5353                         lnet_net_unlock(cpt);
5354
5355                         return local_nid_dist_zero ? 0 : 1;
5356                 }
5357
5358                 if (!matched_dstnet && LNET_NID_NET(&ni->ni_nid) == dstnet) {
5359                         matched_dstnet = true;
5360                         /* We matched the destination net, but we may have
5361                          * additional local NIs to inspect.
5362                          *
5363                          * We record the nid and order as appropriate, but
5364                          * they may be overwritten if we match local NI above.
5365                          */
5366                         if (srcnidp)
5367                                 /* FIXME support large-addr nids */
5368                                 *srcnidp = lnet_nid_to_nid4(&ni->ni_nid);
5369
5370                         if (orderp) {
5371                                 /* Check if ni was originally created in
5372                                  * current net namespace.
5373                                  * If not, assign order above 0xffff0000,
5374                                  * to make this ni not a priority.
5375                                  */
5376                                 if (current->nsproxy &&
5377                                     !net_eq(ni->ni_net_ns,
5378                                             current->nsproxy->net_ns))
5379                                         *orderp = order + 0xffff0000;
5380                                 else
5381                                         *orderp = order;
5382                         }
5383                 }
5384
5385                 order++;
5386         }
5387
5388         if (matched_dstnet) {
5389                 lnet_net_unlock(cpt);
5390                 return 1;
5391         }
5392
5393         rn_list = lnet_net2rnethash(dstnet);
5394         list_for_each(e, rn_list) {
5395                 rnet = list_entry(e, struct lnet_remotenet, lrn_list);
5396
5397                 if (rnet->lrn_net == dstnet) {
5398                         struct lnet_route *route;
5399                         struct lnet_route *shortest = NULL;
5400                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5401                         __u32 route_hops;
5402
5403                         LASSERT(!list_empty(&rnet->lrn_routes));
5404
5405                         list_for_each_entry(route, &rnet->lrn_routes,
5406                                             lr_list) {
5407                                 route_hops = route->lr_hops;
5408                                 if (route_hops == LNET_UNDEFINED_HOPS)
5409                                         route_hops = 1;
5410                                 if (shortest == NULL ||
5411                                     route_hops < shortest_hops) {
5412                                         shortest = route;
5413                                         shortest_hops = route_hops;
5414                                 }
5415                         }
5416
5417                         LASSERT(shortest != NULL);
5418                         hops = shortest_hops;
5419                         if (srcnidp != NULL) {
5420                                 struct lnet_net *net;
5421                                 net = lnet_get_net_locked(shortest->lr_lnet);
5422                                 LASSERT(net);
5423                                 ni = lnet_get_next_ni_locked(net, NULL);
5424                                 /* FIXME support large-addr nids */
5425                                 *srcnidp = lnet_nid_to_nid4(&ni->ni_nid);
5426                         }
5427                         if (orderp != NULL)
5428                                 *orderp = order;
5429                         lnet_net_unlock(cpt);
5430                         return hops + 1;
5431                 }
5432                 order++;
5433         }
5434
5435         lnet_net_unlock(cpt);
5436         return -EHOSTUNREACH;
5437 }
5438 EXPORT_SYMBOL(LNetDist);