Whamcloud - gitweb
LU-14627 lnet: Allow delayed sends
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/lib-move.c
32  *
33  * Data movement routines
34  */
35
36 #define DEBUG_SUBSYSTEM S_LNET
37
38 #include <linux/pagemap.h>
39
40 #include <lnet/lib-lnet.h>
41 #include <linux/nsproxy.h>
42 #include <net/net_namespace.h>
43
44 static int local_nid_dist_zero = 1;
45 module_param(local_nid_dist_zero, int, 0444);
46 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
47
48 struct lnet_send_data {
49         struct lnet_ni *sd_best_ni;
50         struct lnet_peer_ni *sd_best_lpni;
51         struct lnet_peer_ni *sd_final_dst_lpni;
52         struct lnet_peer *sd_peer;
53         struct lnet_peer *sd_gw_peer;
54         struct lnet_peer_ni *sd_gw_lpni;
55         struct lnet_peer_net *sd_peer_net;
56         struct lnet_msg *sd_msg;
57         lnet_nid_t sd_dst_nid;
58         lnet_nid_t sd_src_nid;
59         lnet_nid_t sd_rtr_nid;
60         int sd_cpt;
61         int sd_md_cpt;
62         __u32 sd_send_case;
63 };
64
65 static inline bool
66 lnet_msg_is_response(struct lnet_msg *msg)
67 {
68         return msg->msg_type == LNET_MSG_ACK || msg->msg_type == LNET_MSG_REPLY;
69 }
70
71 static inline bool
72 lnet_response_tracking_enabled(__u32 msg_type, unsigned int md_options)
73 {
74         if (md_options & LNET_MD_NO_TRACK_RESPONSE)
75                 /* Explicitly disabled in MD options */
76                 return false;
77
78         if (md_options & LNET_MD_TRACK_RESPONSE)
79                 /* Explicity enabled in MD options */
80                 return true;
81
82         if (lnet_response_tracking == 3)
83                 /* Enabled for all message types */
84                 return true;
85
86         if (msg_type == LNET_MSG_PUT)
87                 return lnet_response_tracking == 2;
88
89         if (msg_type == LNET_MSG_GET)
90                 return lnet_response_tracking == 1;
91
92         return false;
93 }
94
95 static inline struct lnet_comm_count *
96 get_stats_counts(struct lnet_element_stats *stats,
97                  enum lnet_stats_type stats_type)
98 {
99         switch (stats_type) {
100         case LNET_STATS_TYPE_SEND:
101                 return &stats->el_send_stats;
102         case LNET_STATS_TYPE_RECV:
103                 return &stats->el_recv_stats;
104         case LNET_STATS_TYPE_DROP:
105                 return &stats->el_drop_stats;
106         default:
107                 CERROR("Unknown stats type\n");
108         }
109
110         return NULL;
111 }
112
113 void lnet_incr_stats(struct lnet_element_stats *stats,
114                      enum lnet_msg_type msg_type,
115                      enum lnet_stats_type stats_type)
116 {
117         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
118         if (!counts)
119                 return;
120
121         switch (msg_type) {
122         case LNET_MSG_ACK:
123                 atomic_inc(&counts->co_ack_count);
124                 break;
125         case LNET_MSG_PUT:
126                 atomic_inc(&counts->co_put_count);
127                 break;
128         case LNET_MSG_GET:
129                 atomic_inc(&counts->co_get_count);
130                 break;
131         case LNET_MSG_REPLY:
132                 atomic_inc(&counts->co_reply_count);
133                 break;
134         case LNET_MSG_HELLO:
135                 atomic_inc(&counts->co_hello_count);
136                 break;
137         default:
138                 CERROR("There is a BUG in the code. Unknown message type\n");
139                 break;
140         }
141 }
142
143 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
144                      enum lnet_stats_type stats_type)
145 {
146         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
147         if (!counts)
148                 return 0;
149
150         return (atomic_read(&counts->co_ack_count) +
151                 atomic_read(&counts->co_put_count) +
152                 atomic_read(&counts->co_get_count) +
153                 atomic_read(&counts->co_reply_count) +
154                 atomic_read(&counts->co_hello_count));
155 }
156
157 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
158                                 struct lnet_comm_count *counts)
159 {
160         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
161         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
162         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
163         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
164         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
165 }
166
167 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
168                               struct lnet_element_stats *stats)
169 {
170         struct lnet_comm_count *counts;
171
172         LASSERT(msg_stats);
173         LASSERT(stats);
174
175         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
176         if (!counts)
177                 return;
178         assign_stats(&msg_stats->im_send_stats, counts);
179
180         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
181         if (!counts)
182                 return;
183         assign_stats(&msg_stats->im_recv_stats, counts);
184
185         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
186         if (!counts)
187                 return;
188         assign_stats(&msg_stats->im_drop_stats, counts);
189 }
190
191 int
192 lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
193 {
194         struct lnet_test_peer *tp;
195         struct list_head *el;
196         struct list_head *next;
197         LIST_HEAD(cull);
198
199         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
200         if (threshold != 0) {
201                 /* Adding a new entry */
202                 LIBCFS_ALLOC(tp, sizeof(*tp));
203                 if (tp == NULL)
204                         return -ENOMEM;
205
206                 tp->tp_nid = nid;
207                 tp->tp_threshold = threshold;
208
209                 lnet_net_lock(0);
210                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
211                 lnet_net_unlock(0);
212                 return 0;
213         }
214
215         lnet_net_lock(0);
216
217         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
218                 tp = list_entry(el, struct lnet_test_peer, tp_list);
219
220                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
221                     nid == LNET_NID_ANY ||      /* removing all entries */
222                     tp->tp_nid == nid) {        /* matched this one */
223                         list_move(&tp->tp_list, &cull);
224                 }
225         }
226
227         lnet_net_unlock(0);
228
229         while (!list_empty(&cull)) {
230                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
231
232                 list_del(&tp->tp_list);
233                 LIBCFS_FREE(tp, sizeof(*tp));
234         }
235         return 0;
236 }
237
238 static int
239 fail_peer (lnet_nid_t nid, int outgoing)
240 {
241         struct lnet_test_peer *tp;
242         struct list_head *el;
243         struct list_head *next;
244         LIST_HEAD(cull);
245         int fail = 0;
246
247         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
248         lnet_net_lock(0);
249
250         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
251                 tp = list_entry(el, struct lnet_test_peer, tp_list);
252
253                 if (tp->tp_threshold == 0) {
254                         /* zombie entry */
255                         if (outgoing) {
256                                 /* only cull zombies on outgoing tests,
257                                  * since we may be at interrupt priority on
258                                  * incoming messages. */
259                                 list_move(&tp->tp_list, &cull);
260                         }
261                         continue;
262                 }
263
264                 if (tp->tp_nid == LNET_NID_ANY ||       /* fail every peer */
265                     nid == tp->tp_nid) {                /* fail this peer */
266                         fail = 1;
267
268                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
269                                 tp->tp_threshold--;
270                                 if (outgoing &&
271                                     tp->tp_threshold == 0) {
272                                         /* see above */
273                                         list_move(&tp->tp_list, &cull);
274                                 }
275                         }
276                         break;
277                 }
278         }
279
280         lnet_net_unlock(0);
281
282         while (!list_empty(&cull)) {
283                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
284                 list_del(&tp->tp_list);
285
286                 LIBCFS_FREE(tp, sizeof(*tp));
287         }
288
289         return fail;
290 }
291
292 unsigned int
293 lnet_iov_nob(unsigned int niov, struct kvec *iov)
294 {
295         unsigned int nob = 0;
296
297         LASSERT(niov == 0 || iov != NULL);
298         while (niov-- > 0)
299                 nob += (iov++)->iov_len;
300
301         return (nob);
302 }
303 EXPORT_SYMBOL(lnet_iov_nob);
304
305 void
306 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
307                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
308                   unsigned int nob)
309 {
310         /* NB diov, siov are READ-ONLY */
311         unsigned int this_nob;
312
313         if (nob == 0)
314                 return;
315
316         /* skip complete frags before 'doffset' */
317         LASSERT(ndiov > 0);
318         while (doffset >= diov->iov_len) {
319                 doffset -= diov->iov_len;
320                 diov++;
321                 ndiov--;
322                 LASSERT(ndiov > 0);
323         }
324
325         /* skip complete frags before 'soffset' */
326         LASSERT(nsiov > 0);
327         while (soffset >= siov->iov_len) {
328                 soffset -= siov->iov_len;
329                 siov++;
330                 nsiov--;
331                 LASSERT(nsiov > 0);
332         }
333
334         do {
335                 LASSERT(ndiov > 0);
336                 LASSERT(nsiov > 0);
337                 this_nob = min3((unsigned int)diov->iov_len - doffset,
338                                 (unsigned int)siov->iov_len - soffset,
339                                 nob);
340
341                 memcpy((char *)diov->iov_base + doffset,
342                        (char *)siov->iov_base + soffset, this_nob);
343                 nob -= this_nob;
344
345                 if (diov->iov_len > doffset + this_nob) {
346                         doffset += this_nob;
347                 } else {
348                         diov++;
349                         ndiov--;
350                         doffset = 0;
351                 }
352
353                 if (siov->iov_len > soffset + this_nob) {
354                         soffset += this_nob;
355                 } else {
356                         siov++;
357                         nsiov--;
358                         soffset = 0;
359                 }
360         } while (nob > 0);
361 }
362 EXPORT_SYMBOL(lnet_copy_iov2iov);
363
364 unsigned int
365 lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
366 {
367         unsigned int  nob = 0;
368
369         LASSERT(niov == 0 || kiov != NULL);
370         while (niov-- > 0)
371                 nob += (kiov++)->bv_len;
372
373         return (nob);
374 }
375 EXPORT_SYMBOL(lnet_kiov_nob);
376
377 void
378 lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov,
379                     unsigned int doffset,
380                     unsigned int nsiov, struct bio_vec *siov,
381                     unsigned int soffset,
382                     unsigned int nob)
383 {
384         /* NB diov, siov are READ-ONLY */
385         unsigned int    this_nob;
386         char           *daddr = NULL;
387         char           *saddr = NULL;
388
389         if (nob == 0)
390                 return;
391
392         LASSERT (!in_interrupt ());
393
394         LASSERT (ndiov > 0);
395         while (doffset >= diov->bv_len) {
396                 doffset -= diov->bv_len;
397                 diov++;
398                 ndiov--;
399                 LASSERT(ndiov > 0);
400         }
401
402         LASSERT(nsiov > 0);
403         while (soffset >= siov->bv_len) {
404                 soffset -= siov->bv_len;
405                 siov++;
406                 nsiov--;
407                 LASSERT(nsiov > 0);
408         }
409
410         do {
411                 LASSERT(ndiov > 0);
412                 LASSERT(nsiov > 0);
413                 this_nob = min3(diov->bv_len - doffset,
414                                 siov->bv_len - soffset,
415                                 nob);
416
417                 if (daddr == NULL)
418                         daddr = ((char *)kmap(diov->bv_page)) +
419                                 diov->bv_offset + doffset;
420                 if (saddr == NULL)
421                         saddr = ((char *)kmap(siov->bv_page)) +
422                                 siov->bv_offset + soffset;
423
424                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
425                  * However in practice at least one of the kiovs will be mapped
426                  * kernel pages and the map/unmap will be NOOPs */
427
428                 memcpy (daddr, saddr, this_nob);
429                 nob -= this_nob;
430
431                 if (diov->bv_len > doffset + this_nob) {
432                         daddr += this_nob;
433                         doffset += this_nob;
434                 } else {
435                         kunmap(diov->bv_page);
436                         daddr = NULL;
437                         diov++;
438                         ndiov--;
439                         doffset = 0;
440                 }
441
442                 if (siov->bv_len > soffset + this_nob) {
443                         saddr += this_nob;
444                         soffset += this_nob;
445                 } else {
446                         kunmap(siov->bv_page);
447                         saddr = NULL;
448                         siov++;
449                         nsiov--;
450                         soffset = 0;
451                 }
452         } while (nob > 0);
453
454         if (daddr != NULL)
455                 kunmap(diov->bv_page);
456         if (saddr != NULL)
457                 kunmap(siov->bv_page);
458 }
459 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
460
461 void
462 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
463                     unsigned int nkiov, struct bio_vec *kiov,
464                     unsigned int kiovoffset,
465                     unsigned int nob)
466 {
467         /* NB iov, kiov are READ-ONLY */
468         unsigned int    this_nob;
469         char           *addr = NULL;
470
471         if (nob == 0)
472                 return;
473
474         LASSERT (!in_interrupt ());
475
476         LASSERT (niov > 0);
477         while (iovoffset >= iov->iov_len) {
478                 iovoffset -= iov->iov_len;
479                 iov++;
480                 niov--;
481                 LASSERT(niov > 0);
482         }
483
484         LASSERT(nkiov > 0);
485         while (kiovoffset >= kiov->bv_len) {
486                 kiovoffset -= kiov->bv_len;
487                 kiov++;
488                 nkiov--;
489                 LASSERT(nkiov > 0);
490         }
491
492         do {
493                 LASSERT(niov > 0);
494                 LASSERT(nkiov > 0);
495                 this_nob = min3((unsigned int)iov->iov_len - iovoffset,
496                                 (unsigned int)kiov->bv_len - kiovoffset,
497                                 nob);
498
499                 if (addr == NULL)
500                         addr = ((char *)kmap(kiov->bv_page)) +
501                                 kiov->bv_offset + kiovoffset;
502
503                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
504                 nob -= this_nob;
505
506                 if (iov->iov_len > iovoffset + this_nob) {
507                         iovoffset += this_nob;
508                 } else {
509                         iov++;
510                         niov--;
511                         iovoffset = 0;
512                 }
513
514                 if (kiov->bv_len > kiovoffset + this_nob) {
515                         addr += this_nob;
516                         kiovoffset += this_nob;
517                 } else {
518                         kunmap(kiov->bv_page);
519                         addr = NULL;
520                         kiov++;
521                         nkiov--;
522                         kiovoffset = 0;
523                 }
524
525         } while (nob > 0);
526
527         if (addr != NULL)
528                 kunmap(kiov->bv_page);
529 }
530 EXPORT_SYMBOL(lnet_copy_kiov2iov);
531
532 void
533 lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
534                    unsigned int kiovoffset,
535                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
536                    unsigned int nob)
537 {
538         /* NB kiov, iov are READ-ONLY */
539         unsigned int    this_nob;
540         char           *addr = NULL;
541
542         if (nob == 0)
543                 return;
544
545         LASSERT (!in_interrupt ());
546
547         LASSERT (nkiov > 0);
548         while (kiovoffset >= kiov->bv_len) {
549                 kiovoffset -= kiov->bv_len;
550                 kiov++;
551                 nkiov--;
552                 LASSERT(nkiov > 0);
553         }
554
555         LASSERT(niov > 0);
556         while (iovoffset >= iov->iov_len) {
557                 iovoffset -= iov->iov_len;
558                 iov++;
559                 niov--;
560                 LASSERT(niov > 0);
561         }
562
563         do {
564                 LASSERT(nkiov > 0);
565                 LASSERT(niov > 0);
566                 this_nob = min3((unsigned int)kiov->bv_len - kiovoffset,
567                                 (unsigned int)iov->iov_len - iovoffset,
568                                 nob);
569
570                 if (addr == NULL)
571                         addr = ((char *)kmap(kiov->bv_page)) +
572                                 kiov->bv_offset + kiovoffset;
573
574                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
575                 nob -= this_nob;
576
577                 if (kiov->bv_len > kiovoffset + this_nob) {
578                         addr += this_nob;
579                         kiovoffset += this_nob;
580                 } else {
581                         kunmap(kiov->bv_page);
582                         addr = NULL;
583                         kiov++;
584                         nkiov--;
585                         kiovoffset = 0;
586                 }
587
588                 if (iov->iov_len > iovoffset + this_nob) {
589                         iovoffset += this_nob;
590                 } else {
591                         iov++;
592                         niov--;
593                         iovoffset = 0;
594                 }
595         } while (nob > 0);
596
597         if (addr != NULL)
598                 kunmap(kiov->bv_page);
599 }
600 EXPORT_SYMBOL(lnet_copy_iov2kiov);
601
602 int
603 lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
604                   int src_niov, struct bio_vec *src,
605                   unsigned int offset, unsigned int len)
606 {
607         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
608          * for exactly 'len' bytes, and return the number of entries.
609          * NB not destructive to 'src' */
610         unsigned int    frag_len;
611         unsigned int    niov;
612
613         if (len == 0)                           /* no data => */
614                 return (0);                     /* no frags */
615
616         LASSERT(src_niov > 0);
617         while (offset >= src->bv_len) {      /* skip initial frags */
618                 offset -= src->bv_len;
619                 src_niov--;
620                 src++;
621                 LASSERT(src_niov > 0);
622         }
623
624         niov = 1;
625         for (;;) {
626                 LASSERT(src_niov > 0);
627                 LASSERT((int)niov <= dst_niov);
628
629                 frag_len = src->bv_len - offset;
630                 dst->bv_page = src->bv_page;
631                 dst->bv_offset = src->bv_offset + offset;
632
633                 if (len <= frag_len) {
634                         dst->bv_len = len;
635                         LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
636                         return niov;
637                 }
638
639                 dst->bv_len = frag_len;
640                 LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
641
642                 len -= frag_len;
643                 dst++;
644                 src++;
645                 niov++;
646                 src_niov--;
647                 offset = 0;
648         }
649 }
650 EXPORT_SYMBOL(lnet_extract_kiov);
651
652 void
653 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
654              int delayed, unsigned int offset, unsigned int mlen,
655              unsigned int rlen)
656 {
657         unsigned int niov = 0;
658         struct kvec *iov = NULL;
659         struct bio_vec  *kiov = NULL;
660         int rc;
661
662         LASSERT (!in_interrupt ());
663         LASSERT (mlen == 0 || msg != NULL);
664
665         if (msg != NULL) {
666                 LASSERT(msg->msg_receiving);
667                 LASSERT(!msg->msg_sending);
668                 LASSERT(rlen == msg->msg_len);
669                 LASSERT(mlen <= msg->msg_len);
670                 LASSERT(msg->msg_offset == offset);
671                 LASSERT(msg->msg_wanted == mlen);
672
673                 msg->msg_receiving = 0;
674
675                 if (mlen != 0) {
676                         niov = msg->msg_niov;
677                         kiov = msg->msg_kiov;
678
679                         LASSERT (niov > 0);
680                         LASSERT ((iov == NULL) != (kiov == NULL));
681                 }
682         }
683
684         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
685                                              niov, kiov, offset, mlen,
686                                              rlen);
687         if (rc < 0)
688                 lnet_finalize(msg, rc);
689 }
690
691 static void
692 lnet_setpayloadbuffer(struct lnet_msg *msg)
693 {
694         struct lnet_libmd *md = msg->msg_md;
695
696         LASSERT(msg->msg_len > 0);
697         LASSERT(!msg->msg_routing);
698         LASSERT(md != NULL);
699         LASSERT(msg->msg_niov == 0);
700         LASSERT(msg->msg_kiov == NULL);
701
702         msg->msg_niov = md->md_niov;
703         msg->msg_kiov = md->md_kiov;
704 }
705
706 void
707 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target,
708                unsigned int offset, unsigned int len)
709 {
710         msg->msg_type = type;
711         msg->msg_target = target;
712         msg->msg_len = len;
713         msg->msg_offset = offset;
714
715         if (len != 0)
716                 lnet_setpayloadbuffer(msg);
717
718         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
719         msg->msg_hdr.type           = cpu_to_le32(type);
720         /* dest_nid will be overwritten by lnet_select_pathway() */
721         msg->msg_hdr.dest_nid       = cpu_to_le64(target.nid);
722         msg->msg_hdr.dest_pid       = cpu_to_le32(target.pid);
723         /* src_nid will be set later */
724         msg->msg_hdr.src_pid        = cpu_to_le32(the_lnet.ln_pid);
725         msg->msg_hdr.payload_length = cpu_to_le32(len);
726 }
727
728 void
729 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
730 {
731         void *priv = msg->msg_private;
732         int rc;
733
734         LASSERT(!in_interrupt());
735         LASSERT(ni->ni_nid == LNET_NID_LO_0 ||
736                 (msg->msg_txcredit && msg->msg_peertxcredit));
737
738         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
739         if (rc < 0) {
740                 msg->msg_no_resend = true;
741                 lnet_finalize(msg, rc);
742         }
743 }
744
745 static int
746 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
747 {
748         int     rc;
749
750         LASSERT(!msg->msg_sending);
751         LASSERT(msg->msg_receiving);
752         LASSERT(!msg->msg_rx_ready_delay);
753         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
754
755         msg->msg_rx_ready_delay = 1;
756         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
757                                                   &msg->msg_private);
758         if (rc != 0) {
759                 CERROR("recv from %s / send to %s aborted: "
760                        "eager_recv failed %d\n",
761                        libcfs_nid2str(msg->msg_rxpeer->lpni_nid),
762                        libcfs_id2str(msg->msg_target), rc);
763                 LASSERT(rc < 0); /* required by my callers */
764         }
765
766         return rc;
767 }
768
769 static bool
770 lnet_is_peer_deadline_passed(struct lnet_peer_ni *lpni, time64_t now)
771 {
772         time64_t deadline;
773
774         deadline = lpni->lpni_last_alive +
775                    lpni->lpni_net->net_tunables.lct_peer_timeout;
776
777         /*
778          * assume peer_ni is alive as long as we're within the configured
779          * peer timeout
780          */
781         if (deadline > now)
782                 return false;
783
784         return true;
785 }
786
787 /* NB: returns 1 when alive, 0 when dead, negative when error;
788  *     may drop the lnet_net_lock */
789 static int
790 lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
791                        struct lnet_msg *msg)
792 {
793         time64_t now = ktime_get_seconds();
794
795         if (!lnet_peer_aliveness_enabled(lpni))
796                 return -ENODEV;
797
798         /*
799          * If we're resending a message, let's attempt to send it even if
800          * the peer is down to fulfill our resend quota on the message
801          */
802         if (msg->msg_retry_count > 0)
803                 return 1;
804
805         /* try and send recovery messages irregardless */
806         if (msg->msg_recovery)
807                 return 1;
808
809         /* always send any responses */
810         if (lnet_msg_is_response(msg))
811                 return 1;
812
813         if (!lnet_is_peer_deadline_passed(lpni, now))
814                 return true;
815
816         return lnet_is_peer_ni_alive(lpni);
817 }
818
819 /**
820  * \param msg The message to be sent.
821  * \param do_send True if lnet_ni_send() should be called in this function.
822  *        lnet_send() is going to lnet_net_unlock immediately after this, so
823  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
824  *
825  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
826  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
827  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
828  * \retval -ECANCELED If the MD of the message has been unlinked.
829  */
830 static int
831 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
832 {
833         struct lnet_peer_ni     *lp = msg->msg_txpeer;
834         struct lnet_ni          *ni = msg->msg_txni;
835         int                     cpt = msg->msg_tx_cpt;
836         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
837
838         /* non-lnet_send() callers have checked before */
839         LASSERT(!do_send || msg->msg_tx_delayed);
840         LASSERT(!msg->msg_receiving);
841         LASSERT(msg->msg_tx_committed);
842
843         /* can't get here if we're sending to the loopback interface */
844         if (the_lnet.ln_loni)
845                 LASSERT(lp->lpni_nid != the_lnet.ln_loni->ni_nid);
846
847         /* NB 'lp' is always the next hop */
848         if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
849             lnet_peer_alive_locked(ni, lp, msg) == 0) {
850                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
851                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
852                         msg->msg_len;
853                 lnet_net_unlock(cpt);
854                 if (msg->msg_txpeer)
855                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
856                                         msg->msg_type,
857                                         LNET_STATS_TYPE_DROP);
858                 if (msg->msg_txni)
859                         lnet_incr_stats(&msg->msg_txni->ni_stats,
860                                         msg->msg_type,
861                                         LNET_STATS_TYPE_DROP);
862
863                 CNETERR("Dropping message for %s: peer not alive\n",
864                         libcfs_id2str(msg->msg_target));
865                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
866                 if (do_send)
867                         lnet_finalize(msg, -EHOSTUNREACH);
868
869                 lnet_net_lock(cpt);
870                 return -EHOSTUNREACH;
871         }
872
873         if (msg->msg_md != NULL &&
874             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
875                 lnet_net_unlock(cpt);
876
877                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
878                         "called on the MD/ME.\n",
879                         libcfs_id2str(msg->msg_target));
880                 if (do_send) {
881                         msg->msg_no_resend = true;
882                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
883                                msg, libcfs_id2str(msg->msg_target));
884                         lnet_finalize(msg, -ECANCELED);
885                 }
886
887                 lnet_net_lock(cpt);
888                 return -ECANCELED;
889         }
890
891         if (!msg->msg_peertxcredit) {
892                 spin_lock(&lp->lpni_lock);
893                 LASSERT((lp->lpni_txcredits < 0) ==
894                         !list_empty(&lp->lpni_txq));
895
896                 msg->msg_peertxcredit = 1;
897                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr);
898                 lp->lpni_txcredits--;
899
900                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
901                         lp->lpni_mintxcredits = lp->lpni_txcredits;
902
903                 if (lp->lpni_txcredits < 0) {
904                         msg->msg_tx_delayed = 1;
905                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
906                         spin_unlock(&lp->lpni_lock);
907                         return LNET_CREDIT_WAIT;
908                 }
909                 spin_unlock(&lp->lpni_lock);
910         }
911
912         if (!msg->msg_txcredit) {
913                 LASSERT((tq->tq_credits < 0) ==
914                         !list_empty(&tq->tq_delayed));
915
916                 msg->msg_txcredit = 1;
917                 tq->tq_credits--;
918                 atomic_dec(&ni->ni_tx_credits);
919
920                 if (tq->tq_credits < tq->tq_credits_min)
921                         tq->tq_credits_min = tq->tq_credits;
922
923                 if (tq->tq_credits < 0) {
924                         msg->msg_tx_delayed = 1;
925                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
926                         return LNET_CREDIT_WAIT;
927                 }
928         }
929
930         if (unlikely(!list_empty(&the_lnet.ln_delay_rules)) &&
931             lnet_delay_rule_match_locked(&msg->msg_hdr, msg)) {
932                 msg->msg_tx_delayed = 1;
933                 return LNET_CREDIT_WAIT;
934         }
935
936         /* unset the tx_delay flag as we're going to send it now */
937         msg->msg_tx_delayed = 0;
938
939         if (do_send) {
940                 lnet_net_unlock(cpt);
941                 lnet_ni_send(ni, msg);
942                 lnet_net_lock(cpt);
943         }
944         return LNET_CREDIT_OK;
945 }
946
947
948 static struct lnet_rtrbufpool *
949 lnet_msg2bufpool(struct lnet_msg *msg)
950 {
951         struct lnet_rtrbufpool  *rbp;
952         int                     cpt;
953
954         LASSERT(msg->msg_rx_committed);
955
956         cpt = msg->msg_rx_cpt;
957         rbp = &the_lnet.ln_rtrpools[cpt][0];
958
959         LASSERT(msg->msg_len <= LNET_MTU);
960         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
961                 rbp++;
962                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
963         }
964
965         return rbp;
966 }
967
968 static int
969 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
970 {
971         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
972          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
973          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
974          * received or OK to receive */
975         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
976         struct lnet_peer *lp;
977         struct lnet_rtrbufpool *rbp;
978         struct lnet_rtrbuf *rb;
979
980         LASSERT(msg->msg_kiov == NULL);
981         LASSERT(msg->msg_niov == 0);
982         LASSERT(msg->msg_routing);
983         LASSERT(msg->msg_receiving);
984         LASSERT(!msg->msg_sending);
985         LASSERT(lpni->lpni_peer_net);
986         LASSERT(lpni->lpni_peer_net->lpn_peer);
987
988         lp = lpni->lpni_peer_net->lpn_peer;
989
990         /* non-lnet_parse callers only receive delayed messages */
991         LASSERT(!do_recv || msg->msg_rx_delayed);
992
993         if (!msg->msg_peerrtrcredit) {
994                 /* lpni_lock protects the credit manipulation */
995                 spin_lock(&lpni->lpni_lock);
996
997                 msg->msg_peerrtrcredit = 1;
998                 lpni->lpni_rtrcredits--;
999                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
1000                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
1001
1002                 if (lpni->lpni_rtrcredits < 0) {
1003                         spin_unlock(&lpni->lpni_lock);
1004                         /* must have checked eager_recv before here */
1005                         LASSERT(msg->msg_rx_ready_delay);
1006                         msg->msg_rx_delayed = 1;
1007                         /* lp_lock protects the lp_rtrq */
1008                         spin_lock(&lp->lp_lock);
1009                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1010                         spin_unlock(&lp->lp_lock);
1011                         return LNET_CREDIT_WAIT;
1012                 }
1013                 spin_unlock(&lpni->lpni_lock);
1014         }
1015
1016         rbp = lnet_msg2bufpool(msg);
1017
1018         if (!msg->msg_rtrcredit) {
1019                 msg->msg_rtrcredit = 1;
1020                 rbp->rbp_credits--;
1021                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1022                         rbp->rbp_mincredits = rbp->rbp_credits;
1023
1024                 if (rbp->rbp_credits < 0) {
1025                         /* must have checked eager_recv before here */
1026                         LASSERT(msg->msg_rx_ready_delay);
1027                         msg->msg_rx_delayed = 1;
1028                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1029                         return LNET_CREDIT_WAIT;
1030                 }
1031         }
1032
1033         LASSERT(!list_empty(&rbp->rbp_bufs));
1034         rb = list_entry(rbp->rbp_bufs.next, struct lnet_rtrbuf, rb_list);
1035         list_del(&rb->rb_list);
1036
1037         msg->msg_niov = rbp->rbp_npages;
1038         msg->msg_kiov = &rb->rb_kiov[0];
1039
1040         /* unset the msg-rx_delayed flag since we're receiving the message */
1041         msg->msg_rx_delayed = 0;
1042
1043         if (do_recv) {
1044                 int cpt = msg->msg_rx_cpt;
1045
1046                 lnet_net_unlock(cpt);
1047                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1048                              0, msg->msg_len, msg->msg_len);
1049                 lnet_net_lock(cpt);
1050         }
1051         return LNET_CREDIT_OK;
1052 }
1053
1054 void
1055 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1056 {
1057         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1058         struct lnet_ni          *txni = msg->msg_txni;
1059         struct lnet_msg         *msg2;
1060
1061         if (msg->msg_txcredit) {
1062                 struct lnet_ni       *ni = msg->msg_txni;
1063                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1064
1065                 /* give back NI txcredits */
1066                 msg->msg_txcredit = 0;
1067
1068                 LASSERT((tq->tq_credits < 0) ==
1069                         !list_empty(&tq->tq_delayed));
1070
1071                 tq->tq_credits++;
1072                 atomic_inc(&ni->ni_tx_credits);
1073                 if (tq->tq_credits <= 0) {
1074                         msg2 = list_entry(tq->tq_delayed.next,
1075                                           struct lnet_msg, msg_list);
1076                         list_del(&msg2->msg_list);
1077
1078                         LASSERT(msg2->msg_txni == ni);
1079                         LASSERT(msg2->msg_tx_delayed);
1080                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1081
1082                         (void) lnet_post_send_locked(msg2, 1);
1083                 }
1084         }
1085
1086         if (msg->msg_peertxcredit) {
1087                 /* give back peer txcredits */
1088                 msg->msg_peertxcredit = 0;
1089
1090                 spin_lock(&txpeer->lpni_lock);
1091                 LASSERT((txpeer->lpni_txcredits < 0) ==
1092                         !list_empty(&txpeer->lpni_txq));
1093
1094                 txpeer->lpni_txqnob -= msg->msg_len + sizeof(struct lnet_hdr);
1095                 LASSERT(txpeer->lpni_txqnob >= 0);
1096
1097                 txpeer->lpni_txcredits++;
1098                 if (txpeer->lpni_txcredits <= 0) {
1099                         int msg2_cpt;
1100
1101                         msg2 = list_entry(txpeer->lpni_txq.next,
1102                                               struct lnet_msg, msg_list);
1103                         list_del(&msg2->msg_list);
1104                         spin_unlock(&txpeer->lpni_lock);
1105
1106                         LASSERT(msg2->msg_txpeer == txpeer);
1107                         LASSERT(msg2->msg_tx_delayed);
1108
1109                         msg2_cpt = msg2->msg_tx_cpt;
1110
1111                         /*
1112                          * The msg_cpt can be different from the msg2_cpt
1113                          * so we need to make sure we lock the correct cpt
1114                          * for msg2.
1115                          * Once we call lnet_post_send_locked() it is no
1116                          * longer safe to access msg2, since it could've
1117                          * been freed by lnet_finalize(), but we still
1118                          * need to relock the correct cpt, so we cache the
1119                          * msg2_cpt for the purpose of the check that
1120                          * follows the call to lnet_pose_send_locked().
1121                          */
1122                         if (msg2_cpt != msg->msg_tx_cpt) {
1123                                 lnet_net_unlock(msg->msg_tx_cpt);
1124                                 lnet_net_lock(msg2_cpt);
1125                         }
1126                         (void) lnet_post_send_locked(msg2, 1);
1127                         if (msg2_cpt != msg->msg_tx_cpt) {
1128                                 lnet_net_unlock(msg2_cpt);
1129                                 lnet_net_lock(msg->msg_tx_cpt);
1130                         }
1131                 } else {
1132                         spin_unlock(&txpeer->lpni_lock);
1133                 }
1134         }
1135
1136         if (txni != NULL) {
1137                 msg->msg_txni = NULL;
1138                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1139         }
1140
1141         if (txpeer != NULL) {
1142                 msg->msg_txpeer = NULL;
1143                 lnet_peer_ni_decref_locked(txpeer);
1144         }
1145 }
1146
1147 void
1148 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1149 {
1150         struct lnet_msg *msg;
1151
1152         if (list_empty(&rbp->rbp_msgs))
1153                 return;
1154         msg = list_entry(rbp->rbp_msgs.next,
1155                          struct lnet_msg, msg_list);
1156         list_del(&msg->msg_list);
1157
1158         (void)lnet_post_routed_recv_locked(msg, 1);
1159 }
1160
1161 void
1162 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1163 {
1164         struct lnet_msg *msg;
1165         struct lnet_msg *tmp;
1166
1167         lnet_net_unlock(cpt);
1168
1169         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1170                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1171                              0, 0, 0, msg->msg_hdr.payload_length);
1172                 list_del_init(&msg->msg_list);
1173                 msg->msg_no_resend = true;
1174                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1175                 lnet_finalize(msg, -ECANCELED);
1176         }
1177
1178         lnet_net_lock(cpt);
1179 }
1180
1181 void
1182 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1183 {
1184         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1185         struct lnet_peer *lp;
1186         struct lnet_ni *rxni = msg->msg_rxni;
1187         struct lnet_msg *msg2;
1188
1189         if (msg->msg_rtrcredit) {
1190                 /* give back global router credits */
1191                 struct lnet_rtrbuf *rb;
1192                 struct lnet_rtrbufpool *rbp;
1193
1194                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1195                  * there until it gets one allocated, or aborts the wait
1196                  * itself */
1197                 LASSERT(msg->msg_kiov != NULL);
1198
1199                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1200                 rbp = rb->rb_pool;
1201
1202                 msg->msg_kiov = NULL;
1203                 msg->msg_rtrcredit = 0;
1204
1205                 LASSERT(rbp == lnet_msg2bufpool(msg));
1206
1207                 LASSERT((rbp->rbp_credits > 0) ==
1208                         !list_empty(&rbp->rbp_bufs));
1209
1210                 /* If routing is now turned off, we just drop this buffer and
1211                  * don't bother trying to return credits.  */
1212                 if (!the_lnet.ln_routing) {
1213                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1214                         goto routing_off;
1215                 }
1216
1217                 /* It is possible that a user has lowered the desired number of
1218                  * buffers in this pool.  Make sure we never put back
1219                  * more buffers than the stated number. */
1220                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1221                         /* Discard this buffer so we don't have too
1222                          * many. */
1223                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1224                         rbp->rbp_nbuffers--;
1225                 } else {
1226                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1227                         rbp->rbp_credits++;
1228                         if (rbp->rbp_credits <= 0)
1229                                 lnet_schedule_blocked_locked(rbp);
1230                 }
1231         }
1232
1233 routing_off:
1234         if (msg->msg_peerrtrcredit) {
1235                 LASSERT(rxpeerni);
1236                 LASSERT(rxpeerni->lpni_peer_net);
1237                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1238
1239                 /* give back peer router credits */
1240                 msg->msg_peerrtrcredit = 0;
1241
1242                 spin_lock(&rxpeerni->lpni_lock);
1243                 rxpeerni->lpni_rtrcredits++;
1244                 spin_unlock(&rxpeerni->lpni_lock);
1245
1246                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1247                 spin_lock(&lp->lp_lock);
1248
1249                 /* drop all messages which are queued to be routed on that
1250                  * peer. */
1251                 if (!the_lnet.ln_routing) {
1252                         LIST_HEAD(drop);
1253                         list_splice_init(&lp->lp_rtrq, &drop);
1254                         spin_unlock(&lp->lp_lock);
1255                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1256                 } else if (!list_empty(&lp->lp_rtrq)) {
1257                         int msg2_cpt;
1258
1259                         msg2 = list_entry(lp->lp_rtrq.next,
1260                                           struct lnet_msg, msg_list);
1261                         list_del(&msg2->msg_list);
1262                         msg2_cpt = msg2->msg_rx_cpt;
1263                         spin_unlock(&lp->lp_lock);
1264                         /*
1265                          * messages on the lp_rtrq can be from any NID in
1266                          * the peer, which means they might have different
1267                          * cpts. We need to make sure we lock the right
1268                          * one.
1269                          */
1270                         if (msg2_cpt != msg->msg_rx_cpt) {
1271                                 lnet_net_unlock(msg->msg_rx_cpt);
1272                                 lnet_net_lock(msg2_cpt);
1273                         }
1274                         (void) lnet_post_routed_recv_locked(msg2, 1);
1275                         if (msg2_cpt != msg->msg_rx_cpt) {
1276                                 lnet_net_unlock(msg2_cpt);
1277                                 lnet_net_lock(msg->msg_rx_cpt);
1278                         }
1279                 } else {
1280                         spin_unlock(&lp->lp_lock);
1281                 }
1282         }
1283         if (rxni != NULL) {
1284                 msg->msg_rxni = NULL;
1285                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1286         }
1287         if (rxpeerni != NULL) {
1288                 msg->msg_rxpeer = NULL;
1289                 lnet_peer_ni_decref_locked(rxpeerni);
1290         }
1291 }
1292
1293 static struct lnet_peer_ni *
1294 lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid,
1295                     struct lnet_peer *peer,
1296                     struct lnet_peer_ni *best_lpni,
1297                     struct lnet_peer_net *peer_net)
1298 {
1299         /*
1300          * Look at the peer NIs for the destination peer that connect
1301          * to the chosen net. If a peer_ni is preferred when using the
1302          * best_ni to communicate, we use that one. If there is no
1303          * preferred peer_ni, or there are multiple preferred peer_ni,
1304          * the available transmit credits are used. If the transmit
1305          * credits are equal, we round-robin over the peer_ni.
1306          */
1307         struct lnet_peer_ni *lpni = NULL;
1308         int best_lpni_credits = (best_lpni) ? best_lpni->lpni_txcredits :
1309                 INT_MIN;
1310         int best_lpni_healthv = (best_lpni) ?
1311                 atomic_read(&best_lpni->lpni_healthv) : 0;
1312         bool best_lpni_is_preferred = false;
1313         bool lpni_is_preferred;
1314         int lpni_healthv;
1315         __u32 lpni_sel_prio;
1316         __u32 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1317
1318         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1319                 /*
1320                  * if the best_ni we've chosen aleady has this lpni
1321                  * preferred, then let's use it
1322                  */
1323                 if (best_ni) {
1324                         lpni_is_preferred = lnet_peer_is_pref_nid_locked(lpni,
1325                                                                 best_ni->ni_nid);
1326                         CDEBUG(D_NET, "%s lpni_is_preferred = %d\n",
1327                                libcfs_nid2str(best_ni->ni_nid),
1328                                lpni_is_preferred);
1329                 } else {
1330                         lpni_is_preferred = false;
1331                 }
1332
1333                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1334                 lpni_sel_prio = lpni->lpni_sel_priority;
1335
1336                 if (best_lpni)
1337                         CDEBUG(D_NET, "n:[%s, %s] h:[%d, %d] p:[%d, %d] c:[%d, %d] s:[%d, %d]\n",
1338                                 libcfs_nid2str(lpni->lpni_nid),
1339                                 libcfs_nid2str(best_lpni->lpni_nid),
1340                                 lpni_healthv, best_lpni_healthv,
1341                                 lpni_sel_prio, best_sel_prio,
1342                                 lpni->lpni_txcredits, best_lpni_credits,
1343                                 lpni->lpni_seq, best_lpni->lpni_seq);
1344                 else
1345                         goto select_lpni;
1346
1347                 /* pick the healthiest peer ni */
1348                 if (lpni_healthv < best_lpni_healthv)
1349                         continue;
1350                 else if (lpni_healthv > best_lpni_healthv) {
1351                         if (best_lpni_is_preferred)
1352                                 best_lpni_is_preferred = false;
1353                         goto select_lpni;
1354                 }
1355
1356                 if (lpni_sel_prio > best_sel_prio)
1357                         continue;
1358                 else if (lpni_sel_prio < best_sel_prio) {
1359                         if (best_lpni_is_preferred)
1360                                 best_lpni_is_preferred = false;
1361                         goto select_lpni;
1362                 }
1363
1364                 /* if this is a preferred peer use it */
1365                 if (!best_lpni_is_preferred && lpni_is_preferred) {
1366                         best_lpni_is_preferred = true;
1367                         goto select_lpni;
1368                 } else if (best_lpni_is_preferred && !lpni_is_preferred) {
1369                         /* this is not the preferred peer so let's ignore
1370                          * it.
1371                          */
1372                         continue;
1373                 }
1374
1375                 if (lpni->lpni_txcredits < best_lpni_credits)
1376                         /* We already have a peer that has more credits
1377                          * available than this one. No need to consider
1378                          * this peer further.
1379                          */
1380                         continue;
1381                 else if (lpni->lpni_txcredits > best_lpni_credits)
1382                         goto select_lpni;
1383
1384                 /* The best peer found so far and the current peer
1385                  * have the same number of available credits let's
1386                  * make sure to select between them using Round Robin
1387                  */
1388                 if (best_lpni && (best_lpni->lpni_seq <= lpni->lpni_seq))
1389                         continue;
1390 select_lpni:
1391                 best_lpni_is_preferred = lpni_is_preferred;
1392                 best_lpni_healthv = lpni_healthv;
1393                 best_sel_prio = lpni_sel_prio;
1394                 best_lpni = lpni;
1395                 best_lpni_credits = lpni->lpni_txcredits;
1396         }
1397
1398         /* if we still can't find a peer ni then we can't reach it */
1399         if (!best_lpni) {
1400                 __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1401                         LNET_NIDNET(dst_nid);
1402                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1403                                 libcfs_net2str(net_id));
1404                 return NULL;
1405         }
1406
1407         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1408                libcfs_nid2str(best_lpni->lpni_nid));
1409
1410         return best_lpni;
1411 }
1412
1413 /*
1414  * Prerequisite: the best_ni should already be set in the sd
1415  * Find the best lpni.
1416  * If the net id is provided then restrict lpni selection on
1417  * that particular net.
1418  * Otherwise find any reachable lpni. When dealing with an MR
1419  * gateway and it has multiple lpnis which we can use
1420  * we want to select the best one from the list of reachable
1421  * ones.
1422  */
1423 static inline struct lnet_peer_ni *
1424 lnet_find_best_lpni(struct lnet_ni *lni, lnet_nid_t dst_nid,
1425                     struct lnet_peer *peer, __u32 net_id)
1426 {
1427         struct lnet_peer_net *peer_net;
1428
1429         /* find the best_lpni on any local network */
1430         if (net_id == LNET_NET_ANY) {
1431                 struct lnet_peer_ni *best_lpni = NULL;
1432                 struct lnet_peer_net *lpn;
1433                 list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
1434                         /* no net specified find any reachable peer ni */
1435                         if (!lnet_islocalnet_locked(lpn->lpn_net_id))
1436                                 continue;
1437                         best_lpni = lnet_select_peer_ni(lni, dst_nid, peer,
1438                                                         best_lpni, lpn);
1439                 }
1440
1441                 return best_lpni;
1442         }
1443         /* restrict on the specified net */
1444         peer_net = lnet_peer_get_net_locked(peer, net_id);
1445         if (peer_net)
1446                 return lnet_select_peer_ni(lni, dst_nid, peer, NULL, peer_net);
1447
1448         return NULL;
1449 }
1450
1451 static int
1452 lnet_compare_gw_lpnis(struct lnet_peer_ni *lpni1, struct lnet_peer_ni *lpni2)
1453 {
1454         if (lpni1->lpni_txqnob < lpni2->lpni_txqnob)
1455                 return 1;
1456
1457         if (lpni1->lpni_txqnob > lpni2->lpni_txqnob)
1458                 return -1;
1459
1460         if (lpni1->lpni_txcredits > lpni2->lpni_txcredits)
1461                 return 1;
1462
1463         if (lpni1->lpni_txcredits < lpni2->lpni_txcredits)
1464                 return -1;
1465
1466         return 0;
1467 }
1468
1469 /* Compare route priorities and hop counts */
1470 static int
1471 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
1472 {
1473         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1474         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1475
1476         if (r1->lr_priority < r2->lr_priority)
1477                 return 1;
1478
1479         if (r1->lr_priority > r2->lr_priority)
1480                 return -1;
1481
1482         if (r1_hops < r2_hops)
1483                 return 1;
1484
1485         if (r1_hops > r2_hops)
1486                 return -1;
1487
1488         return 0;
1489 }
1490
1491 static struct lnet_route *
1492 lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net,
1493                        struct lnet_peer_ni *remote_lpni,
1494                        struct lnet_route **prev_route,
1495                        struct lnet_peer_ni **gwni)
1496 {
1497         struct lnet_peer_ni *lpni, *best_gw_ni = NULL;
1498         struct lnet_route *best_route;
1499         struct lnet_route *last_route;
1500         struct lnet_route *route;
1501         int rc;
1502         bool best_rte_is_preferred = false;
1503         lnet_nid_t gw_pnid;
1504
1505         CDEBUG(D_NET, "Looking up a route to %s, from %s\n",
1506                libcfs_net2str(rnet->lrn_net), libcfs_net2str(src_net));
1507
1508         best_route = last_route = NULL;
1509         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1510                 if (!lnet_is_route_alive(route))
1511                         continue;
1512                 gw_pnid = route->lr_gateway->lp_primary_nid;
1513
1514                 /* no protection on below fields, but it's harmless */
1515                 if (last_route && (last_route->lr_seq - route->lr_seq < 0))
1516                         last_route = route;
1517
1518                 /* if the best route found is in the preferred list then
1519                  * tag it as preferred and use it later on. But if we
1520                  * didn't find any routes which are on the preferred list
1521                  * then just use the best route possible.
1522                  */
1523                 rc = lnet_peer_is_pref_rtr_locked(remote_lpni, gw_pnid);
1524
1525                 if (!best_route || (rc && !best_rte_is_preferred)) {
1526                         /* Restrict the selection of the router NI on the
1527                          * src_net provided. If the src_net is LNET_NID_ANY,
1528                          * then select the best interface available.
1529                          */
1530                         lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1531                                                    route->lr_gateway,
1532                                                    src_net);
1533                         if (!lpni) {
1534                                 CDEBUG(D_NET,
1535                                        "Gateway %s does not have a peer NI on net %s\n",
1536                                        libcfs_nid2str(gw_pnid),
1537                                        libcfs_net2str(src_net));
1538                                 continue;
1539                         }
1540                 }
1541
1542                 if (rc && !best_rte_is_preferred) {
1543                         /* This is the first preferred route we found,
1544                          * so it beats any route found previously
1545                          */
1546                         best_route = route;
1547                         if (!last_route)
1548                                 last_route = route;
1549                         best_gw_ni = lpni;
1550                         best_rte_is_preferred = true;
1551                         CDEBUG(D_NET, "preferred gw = %s\n",
1552                                libcfs_nid2str(gw_pnid));
1553                         continue;
1554                 } else if ((!rc) && best_rte_is_preferred)
1555                         /* The best route we found so far is in the preferred
1556                          * list, so it beats any non-preferred route
1557                          */
1558                         continue;
1559
1560                 if (!best_route) {
1561                         best_route = last_route = route;
1562                         best_gw_ni = lpni;
1563                         continue;
1564                 }
1565
1566                 rc = lnet_compare_routes(route, best_route);
1567                 if (rc == -1)
1568                         continue;
1569
1570                 /* Restrict the selection of the router NI on the
1571                  * src_net provided. If the src_net is LNET_NID_ANY,
1572                  * then select the best interface available.
1573                  */
1574                 lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1575                                            route->lr_gateway,
1576                                            src_net);
1577                 if (!lpni) {
1578                         CDEBUG(D_NET,
1579                                "Gateway %s does not have a peer NI on net %s\n",
1580                                libcfs_nid2str(gw_pnid),
1581                                libcfs_net2str(src_net));
1582                         continue;
1583                 }
1584
1585                 if (rc == 1) {
1586                         best_route = route;
1587                         best_gw_ni = lpni;
1588                         continue;
1589                 }
1590
1591                 rc = lnet_compare_gw_lpnis(lpni, best_gw_ni);
1592                 if (rc == -1)
1593                         continue;
1594
1595                 if (rc == 1 || route->lr_seq <= best_route->lr_seq) {
1596                         best_route = route;
1597                         best_gw_ni = lpni;
1598                         continue;
1599                 }
1600         }
1601
1602         *prev_route = last_route;
1603         *gwni = best_gw_ni;
1604
1605         return best_route;
1606 }
1607
1608 static struct lnet_ni *
1609 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1610                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1611                  int md_cpt)
1612 {
1613         struct lnet_ni *ni = NULL;
1614         unsigned int shortest_distance;
1615         int best_credits;
1616         int best_healthv;
1617         __u32 best_sel_prio;
1618
1619         /*
1620          * If there is no peer_ni that we can send to on this network,
1621          * then there is no point in looking for a new best_ni here.
1622         */
1623         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1624                 return best_ni;
1625
1626         if (best_ni == NULL) {
1627                 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1628                 shortest_distance = UINT_MAX;
1629                 best_credits = INT_MIN;
1630                 best_healthv = 0;
1631         } else {
1632                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1633                                                      best_ni->ni_dev_cpt);
1634                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1635                 best_healthv = atomic_read(&best_ni->ni_healthv);
1636                 best_sel_prio = best_ni->ni_sel_priority;
1637         }
1638
1639         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1640                 unsigned int distance;
1641                 int ni_credits;
1642                 int ni_healthv;
1643                 int ni_fatal;
1644                 __u32 ni_sel_prio;
1645
1646                 ni_credits = atomic_read(&ni->ni_tx_credits);
1647                 ni_healthv = atomic_read(&ni->ni_healthv);
1648                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1649                 ni_sel_prio = ni->ni_sel_priority;
1650
1651                 /*
1652                  * calculate the distance from the CPT on which
1653                  * the message memory is allocated to the CPT of
1654                  * the NI's physical device
1655                  */
1656                 distance = cfs_cpt_distance(lnet_cpt_table(),
1657                                             md_cpt,
1658                                             ni->ni_dev_cpt);
1659
1660                 /*
1661                  * All distances smaller than the NUMA range
1662                  * are treated equally.
1663                  */
1664                 if (distance < lnet_numa_range)
1665                         distance = lnet_numa_range;
1666
1667                 /*
1668                  * Select on health, shorter distance, available
1669                  * credits, then round-robin.
1670                  */
1671                 if (ni_fatal)
1672                         continue;
1673
1674                 if (best_ni)
1675                         CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d, p:%u] with best_ni %s [c:%d, d:%d, s:%d, p:%u]\n",
1676                                libcfs_nid2str(ni->ni_nid), ni_credits, distance,
1677                                ni->ni_seq, ni_sel_prio,
1678                                (best_ni) ? libcfs_nid2str(best_ni->ni_nid)
1679                                : "not selected", best_credits, shortest_distance,
1680                                (best_ni) ? best_ni->ni_seq : 0,
1681                                best_sel_prio);
1682                 else
1683                         goto select_ni;
1684
1685                 if (ni_healthv < best_healthv)
1686                         continue;
1687                 else if (ni_healthv > best_healthv)
1688                         goto select_ni;
1689
1690                 if (ni_sel_prio > best_sel_prio)
1691                         continue;
1692                 else if (ni_sel_prio < best_sel_prio)
1693                         goto select_ni;
1694
1695                 if (distance > shortest_distance)
1696                         continue;
1697                 else if (distance < shortest_distance)
1698                         goto select_ni;
1699
1700                 if (ni_credits < best_credits)
1701                         continue;
1702                 else if (ni_credits > best_credits)
1703                         goto select_ni;
1704
1705                 if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1706                         continue;
1707
1708 select_ni:
1709                 best_sel_prio = ni_sel_prio;
1710                 shortest_distance = distance;
1711                 best_healthv = ni_healthv;
1712                 best_ni = ni;
1713                 best_credits = ni_credits;
1714         }
1715
1716         CDEBUG(D_NET, "selected best_ni %s\n",
1717                (best_ni) ? libcfs_nid2str(best_ni->ni_nid) : "no selection");
1718
1719         return best_ni;
1720 }
1721
1722 /*
1723  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1724  * because such traffic is required to perform discovery. We therefore
1725  * exclude all GET and PUT on that portal. We also exclude all ACK and
1726  * REPLY traffic, but that is because the portal is not tracked in the
1727  * message structure for these message types. We could restrict this
1728  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1729  */
1730 static bool
1731 lnet_msg_discovery(struct lnet_msg *msg)
1732 {
1733         if (msg->msg_type == LNET_MSG_PUT) {
1734                 if (msg->msg_hdr.msg.put.ptl_index != LNET_RESERVED_PORTAL)
1735                         return true;
1736         } else if (msg->msg_type == LNET_MSG_GET) {
1737                 if (msg->msg_hdr.msg.get.ptl_index != LNET_RESERVED_PORTAL)
1738                         return true;
1739         }
1740         return false;
1741 }
1742
1743 #define SRC_SPEC        0x0001
1744 #define SRC_ANY         0x0002
1745 #define LOCAL_DST       0x0004
1746 #define REMOTE_DST      0x0008
1747 #define MR_DST          0x0010
1748 #define NMR_DST         0x0020
1749 #define SND_RESP        0x0040
1750
1751 /* The following to defines are used for return codes */
1752 #define REPEAT_SEND     0x1000
1753 #define PASS_THROUGH    0x2000
1754
1755 /* The different cases lnet_select pathway needs to handle */
1756 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1757 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1758 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1759 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1760 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1761 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1762 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1763 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1764
1765 static int
1766 lnet_handle_lo_send(struct lnet_send_data *sd)
1767 {
1768         struct lnet_msg *msg = sd->sd_msg;
1769         int cpt = sd->sd_cpt;
1770
1771         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1772                 return -ESHUTDOWN;
1773
1774         /* No send credit hassles with LOLND */
1775         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1776         msg->msg_hdr.dest_nid = cpu_to_le64(the_lnet.ln_loni->ni_nid);
1777         if (!msg->msg_routing)
1778                 msg->msg_hdr.src_nid =
1779                         cpu_to_le64(the_lnet.ln_loni->ni_nid);
1780         msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
1781         lnet_msg_commit(msg, cpt);
1782         msg->msg_txni = the_lnet.ln_loni;
1783
1784         return LNET_CREDIT_OK;
1785 }
1786
1787 static int
1788 lnet_handle_send(struct lnet_send_data *sd)
1789 {
1790         struct lnet_ni *best_ni = sd->sd_best_ni;
1791         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1792         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1793         struct lnet_msg *msg = sd->sd_msg;
1794         int cpt2;
1795         __u32 send_case = sd->sd_send_case;
1796         int rc;
1797         __u32 routing = send_case & REMOTE_DST;
1798          struct lnet_rsp_tracker *rspt;
1799
1800         /* Increment sequence number of the selected peer, peer net,
1801          * local ni and local net so that we pick the next ones
1802          * in Round Robin.
1803          */
1804         best_lpni->lpni_seq++;
1805         best_lpni->lpni_peer_net->lpn_seq++;
1806         best_ni->ni_seq++;
1807         best_ni->ni_net->net_seq++;
1808
1809         CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n",
1810                libcfs_nid2str(best_ni->ni_nid),
1811                best_ni->ni_seq, best_ni->ni_net->net_seq,
1812                atomic_read(&best_ni->ni_tx_credits),
1813                best_ni->ni_sel_priority,
1814                libcfs_nid2str(best_lpni->lpni_nid),
1815                best_lpni->lpni_seq, best_lpni->lpni_peer_net->lpn_seq,
1816                best_lpni->lpni_txcredits,
1817                best_lpni->lpni_sel_priority);
1818
1819         /*
1820          * grab a reference on the peer_ni so it sticks around even if
1821          * we need to drop and relock the lnet_net_lock below.
1822          */
1823         lnet_peer_ni_addref_locked(best_lpni);
1824
1825         /*
1826          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1827          * message. This ensures that we get a CPT that is correct for
1828          * the NI when the NI has been restricted to a subset of all CPTs.
1829          * If the selected CPT differs from the one currently locked, we
1830          * must unlock and relock the lnet_net_lock(), and then check whether
1831          * the configuration has changed. We don't have a hold on the best_ni
1832          * yet, and it may have vanished.
1833          */
1834         cpt2 = lnet_cpt_of_nid_locked(best_lpni->lpni_nid, best_ni);
1835         if (sd->sd_cpt != cpt2) {
1836                 __u32 seq = lnet_get_dlc_seq_locked();
1837                 lnet_net_unlock(sd->sd_cpt);
1838                 sd->sd_cpt = cpt2;
1839                 lnet_net_lock(sd->sd_cpt);
1840                 if (seq != lnet_get_dlc_seq_locked()) {
1841                         lnet_peer_ni_decref_locked(best_lpni);
1842                         return REPEAT_SEND;
1843                 }
1844         }
1845
1846         /*
1847          * store the best_lpni in the message right away to avoid having
1848          * to do the same operation under different conditions
1849          */
1850         msg->msg_txpeer = best_lpni;
1851         msg->msg_txni = best_ni;
1852
1853         /*
1854          * grab a reference for the best_ni since now it's in use in this
1855          * send. The reference will be dropped in lnet_finalize()
1856          */
1857         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1858
1859         /*
1860          * Always set the target.nid to the best peer picked. Either the
1861          * NID will be one of the peer NIDs selected, or the same NID as
1862          * what was originally set in the target or it will be the NID of
1863          * a router if this message should be routed
1864          */
1865         msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1866
1867         /*
1868          * lnet_msg_commit assigns the correct cpt to the message, which
1869          * is used to decrement the correct refcount on the ni when it's
1870          * time to return the credits
1871          */
1872         lnet_msg_commit(msg, sd->sd_cpt);
1873
1874         /*
1875          * If we are routing the message then we keep the src_nid that was
1876          * set by the originator. If we are not routing then we are the
1877          * originator and set it here.
1878          */
1879         if (!msg->msg_routing)
1880                 msg->msg_hdr.src_nid = cpu_to_le64(msg->msg_txni->ni_nid);
1881
1882         if (routing) {
1883                 msg->msg_target_is_router = 1;
1884                 msg->msg_target.pid = LNET_PID_LUSTRE;
1885                 /*
1886                  * since we're routing we want to ensure that the
1887                  * msg_hdr.dest_nid is set to the final destination. When
1888                  * the router receives this message it knows how to route
1889                  * it.
1890                  *
1891                  * final_dst_lpni is set at the beginning of the
1892                  * lnet_select_pathway() function and is never changed.
1893                  * It's safe to use it here.
1894                  */
1895                 msg->msg_hdr.dest_nid = cpu_to_le64(final_dst_lpni->lpni_nid);
1896         } else {
1897                 /*
1898                  * if we're not routing set the dest_nid to the best peer
1899                  * ni NID that we picked earlier in the algorithm.
1900                  */
1901                 msg->msg_hdr.dest_nid = cpu_to_le64(msg->msg_txpeer->lpni_nid);
1902         }
1903
1904         /*
1905          * if we have response tracker block update it with the next hop
1906          * nid
1907          */
1908         if (msg->msg_md) {
1909                 rspt = msg->msg_md->md_rspt_ptr;
1910                 if (rspt) {
1911                         rspt->rspt_next_hop_nid = msg->msg_txpeer->lpni_nid;
1912                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1913                                libcfs_nid2str(rspt->rspt_next_hop_nid));
1914                 }
1915         }
1916
1917         rc = lnet_post_send_locked(msg, 0);
1918
1919         if (!rc)
1920                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
1921                        libcfs_nid2str(msg->msg_hdr.src_nid),
1922                        libcfs_nid2str(msg->msg_txni->ni_nid),
1923                        libcfs_nid2str(sd->sd_src_nid),
1924                        libcfs_nid2str(msg->msg_hdr.dest_nid),
1925                        libcfs_nid2str(sd->sd_dst_nid),
1926                        libcfs_nid2str(msg->msg_txpeer->lpni_nid),
1927                        libcfs_nid2str(sd->sd_rtr_nid),
1928                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
1929
1930         return rc;
1931 }
1932
1933 static inline void
1934 lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni,
1935                          struct lnet_msg *msg)
1936 {
1937         if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) &&
1938             !lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) {
1939                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
1940                        libcfs_nid2str(lni->ni_nid),
1941                        libcfs_nid2str(lpni->lpni_nid));
1942                 lnet_peer_ni_set_non_mr_pref_nid(lpni, lni->ni_nid);
1943         }
1944 }
1945
1946 /*
1947  * Source Specified
1948  * Local Destination
1949  * non-mr peer
1950  *
1951  * use the source and destination NIDs as the pathway
1952  */
1953 static int
1954 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
1955 {
1956         /* the destination lpni is set before we get here. */
1957
1958         /* find local NI */
1959         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
1960         if (!sd->sd_best_ni) {
1961                 CERROR("Can't send to %s: src %s is not a "
1962                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
1963                                 libcfs_nid2str(sd->sd_src_nid));
1964                 return -EINVAL;
1965         }
1966
1967         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
1968
1969         return lnet_handle_send(sd);
1970 }
1971
1972 /*
1973  * Source Specified
1974  * Local Destination
1975  * MR Peer
1976  *
1977  * Don't run the selection algorithm on the peer NIs. By specifying the
1978  * local NID, we're also saying that we should always use the destination NID
1979  * provided. This handles the case where we should be using the same
1980  * destination NID for the all the messages which belong to the same RPC
1981  * request.
1982  */
1983 static int
1984 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
1985 {
1986         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
1987         if (!sd->sd_best_ni) {
1988                 CERROR("Can't send to %s: src %s is not a "
1989                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
1990                                 libcfs_nid2str(sd->sd_src_nid));
1991                 return -EINVAL;
1992         }
1993
1994         if (sd->sd_best_lpni &&
1995             sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid)
1996                 return lnet_handle_lo_send(sd);
1997         else if (sd->sd_best_lpni)
1998                 return lnet_handle_send(sd);
1999
2000         CERROR("can't send to %s. no NI on %s\n",
2001                libcfs_nid2str(sd->sd_dst_nid),
2002                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
2003
2004         return -EHOSTUNREACH;
2005 }
2006
2007 struct lnet_ni *
2008 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
2009                               struct lnet_peer *peer,
2010                               struct lnet_peer_net *peer_net,
2011                               int cpt)
2012 {
2013         struct lnet_net *local_net;
2014         struct lnet_ni *best_ni;
2015
2016         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
2017         if (!local_net)
2018                 return NULL;
2019
2020         /*
2021          * Iterate through the NIs in this local Net and select
2022          * the NI to send from. The selection is determined by
2023          * these 3 criterion in the following priority:
2024          *      1. NUMA
2025          *      2. NI available credits
2026          *      3. Round Robin
2027          */
2028         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
2029                                    peer, peer_net, cpt);
2030
2031         return best_ni;
2032 }
2033
2034 static int
2035 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg,
2036                              int cpt)
2037 {
2038         struct lnet_peer *peer;
2039         struct lnet_peer_ni *new_lpni;
2040         int rc;
2041
2042         lnet_peer_ni_addref_locked(lpni);
2043
2044         peer = lpni->lpni_peer_net->lpn_peer;
2045
2046         if (lnet_peer_gw_discovery(peer)) {
2047                 lnet_peer_ni_decref_locked(lpni);
2048                 return 0;
2049         }
2050
2051         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
2052                 lnet_peer_ni_decref_locked(lpni);
2053                 return 0;
2054         }
2055
2056         rc = lnet_discover_peer_locked(lpni, cpt, false);
2057         if (rc) {
2058                 lnet_peer_ni_decref_locked(lpni);
2059                 return rc;
2060         }
2061
2062         new_lpni = lnet_find_peer_ni_locked(lpni->lpni_nid);
2063         if (!new_lpni) {
2064                 lnet_peer_ni_decref_locked(lpni);
2065                 return -ENOENT;
2066         }
2067
2068         peer = new_lpni->lpni_peer_net->lpn_peer;
2069         spin_lock(&peer->lp_lock);
2070         if (lpni == new_lpni && lnet_peer_is_uptodate_locked(peer)) {
2071                 /* The peer NI did not change and the peer is up to date.
2072                  * Nothing more to do.
2073                  */
2074                 spin_unlock(&peer->lp_lock);
2075                 lnet_peer_ni_decref_locked(lpni);
2076                 lnet_peer_ni_decref_locked(new_lpni);
2077                 return 0;
2078         }
2079         spin_unlock(&peer->lp_lock);
2080
2081         /* Either the peer NI changed during discovery, or the peer isn't up
2082          * to date. In both cases we want to queue the message on the
2083          * (possibly new) peer's pending queue and queue the peer for discovery
2084          */
2085         msg->msg_sending = 0;
2086         msg->msg_txpeer = NULL;
2087         lnet_net_unlock(cpt);
2088         lnet_peer_queue_message(peer, msg);
2089         lnet_net_lock(cpt);
2090
2091         lnet_peer_ni_decref_locked(lpni);
2092         lnet_peer_ni_decref_locked(new_lpni);
2093
2094         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2095                msg, libcfs_nid2str(peer->lp_primary_nid));
2096
2097         return LNET_DC_WAIT;
2098 }
2099
2100 static int
2101 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2102                              lnet_nid_t dst_nid,
2103                              struct lnet_peer_ni **gw_lpni,
2104                              struct lnet_peer **gw_peer)
2105 {
2106         int rc;
2107         __u32 local_lnet;
2108         struct lnet_peer *gw;
2109         struct lnet_peer *lp;
2110         struct lnet_peer_net *lpn;
2111         struct lnet_peer_net *best_lpn = NULL;
2112         struct lnet_remotenet *rnet, *best_rnet = NULL;
2113         struct lnet_route *best_route = NULL;
2114         struct lnet_route *last_route = NULL;
2115         struct lnet_peer_ni *lpni = NULL;
2116         struct lnet_peer_ni *gwni = NULL;
2117         bool route_found = false;
2118         lnet_nid_t src_nid = (sd->sd_src_nid != LNET_NID_ANY) ? sd->sd_src_nid :
2119                 (sd->sd_best_ni != NULL) ? sd->sd_best_ni->ni_nid :
2120                 LNET_NID_ANY;
2121         int best_lpn_healthv = 0;
2122         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2123
2124         CDEBUG(D_NET, "using src nid %s for route restriction\n",
2125                libcfs_nid2str(src_nid));
2126
2127         /* If a router nid was specified then we are replying to a GET or
2128          * sending an ACK. In this case we use the gateway associated with the
2129          * specified router nid.
2130          */
2131         if (sd->sd_rtr_nid != LNET_NID_ANY) {
2132                 gwni = lnet_find_peer_ni_locked(sd->sd_rtr_nid);
2133                 if (gwni) {
2134                         gw = gwni->lpni_peer_net->lpn_peer;
2135                         lnet_peer_ni_decref_locked(gwni);
2136                         if (gw->lp_rtr_refcount) {
2137                                 local_lnet = LNET_NIDNET(sd->sd_rtr_nid);
2138                                 route_found = true;
2139                         }
2140                 } else {
2141                         CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n",
2142                                libcfs_nid2str(sd->sd_rtr_nid));
2143                 }
2144         }
2145
2146         if (!route_found) {
2147                 if (sd->sd_msg->msg_routing) {
2148                         /* If I'm routing this message then I need to find the
2149                          * next hop based on the destination NID
2150                          */
2151                         best_rnet = lnet_find_rnet_locked(LNET_NIDNET(sd->sd_dst_nid));
2152                         if (!best_rnet) {
2153                                 CERROR("Unable to route message to %s - Route table may be misconfigured\n",
2154                                        libcfs_nid2str(sd->sd_dst_nid));
2155                                 return -EHOSTUNREACH;
2156                         }
2157                 } else {
2158                         /* we've already looked up the initial lpni using
2159                          * dst_nid
2160                          */
2161                         lpni = sd->sd_best_lpni;
2162                         /* the peer tree must be in existence */
2163                         LASSERT(lpni && lpni->lpni_peer_net &&
2164                                 lpni->lpni_peer_net->lpn_peer);
2165                         lp = lpni->lpni_peer_net->lpn_peer;
2166
2167                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2168                                 /* is this remote network reachable?  */
2169                                 rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2170                                 if (!rnet)
2171                                         continue;
2172
2173                                 if (!best_lpn) {
2174                                         best_lpn = lpn;
2175                                         best_rnet = rnet;
2176                                 }
2177
2178                                 /* select the preferred peer net */
2179                                 if (best_lpn_healthv > lpn->lpn_healthv)
2180                                         continue;
2181                                 else if (best_lpn_healthv < lpn->lpn_healthv)
2182                                         goto use_lpn;
2183
2184                                 if (best_lpn_sel_prio < lpn->lpn_sel_priority)
2185                                         continue;
2186                                 else if (best_lpn_sel_prio > lpn->lpn_sel_priority)
2187                                         goto use_lpn;
2188
2189                                 if (best_lpn->lpn_seq <= lpn->lpn_seq)
2190                                         continue;
2191 use_lpn:
2192                                 best_lpn_healthv = lpn->lpn_healthv;
2193                                 best_lpn_sel_prio = lpn->lpn_sel_priority;
2194                                 best_lpn = lpn;
2195                                 best_rnet = rnet;
2196                         }
2197
2198                         if (!best_lpn) {
2199                                 CERROR("peer %s has no available nets\n",
2200                                        libcfs_nid2str(sd->sd_dst_nid));
2201                                 return -EHOSTUNREACH;
2202                         }
2203
2204                         sd->sd_best_lpni = lnet_find_best_lpni(sd->sd_best_ni,
2205                                                                sd->sd_dst_nid,
2206                                                                lp,
2207                                                                best_lpn->lpn_net_id);
2208                         if (!sd->sd_best_lpni) {
2209                                 CERROR("peer %s is unreachable\n",
2210                                        libcfs_nid2str(sd->sd_dst_nid));
2211                                 return -EHOSTUNREACH;
2212                         }
2213
2214                         /* We're attempting to round robin over the remote peer
2215                          * NI's so update the final destination we selected
2216                          */
2217                         sd->sd_final_dst_lpni = sd->sd_best_lpni;
2218
2219                         /* Increment the sequence number of the remote lpni so
2220                          * we can round robin over the different interfaces of
2221                          * the remote lpni
2222                          */
2223                         sd->sd_best_lpni->lpni_seq++;
2224                 }
2225
2226                 /*
2227                  * find the best route. Restrict the selection on the net of the
2228                  * local NI if we've already picked the local NI to send from.
2229                  * Otherwise, let's pick any route we can find and then find
2230                  * a local NI we can reach the route's gateway on. Any route we
2231                  * select will be reachable by virtue of the restriction we have
2232                  * when adding a route.
2233                  */
2234                 best_route = lnet_find_route_locked(best_rnet,
2235                                                     LNET_NIDNET(src_nid),
2236                                                     sd->sd_best_lpni,
2237                                                     &last_route, &gwni);
2238
2239                 if (!best_route) {
2240                         CERROR("no route to %s from %s\n",
2241                                libcfs_nid2str(dst_nid),
2242                                libcfs_nid2str(src_nid));
2243                         return -EHOSTUNREACH;
2244                 }
2245
2246                 if (!gwni) {
2247                         CERROR("Internal Error. Route expected to %s from %s\n",
2248                                libcfs_nid2str(dst_nid),
2249                                libcfs_nid2str(src_nid));
2250                         return -EFAULT;
2251                 }
2252
2253                 gw = best_route->lr_gateway;
2254                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2255                 local_lnet = best_route->lr_lnet;
2256         }
2257
2258         /*
2259          * Discover this gateway if it hasn't already been discovered.
2260          * This means we might delay the message until discovery has
2261          * completed
2262          */
2263         sd->sd_msg->msg_src_nid_param = sd->sd_src_nid;
2264         rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt);
2265         if (rc)
2266                 return rc;
2267
2268         if (!sd->sd_best_ni)
2269                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw,
2270                                         lnet_peer_get_net_locked(gw,
2271                                                                  local_lnet),
2272                                         sd->sd_md_cpt);
2273
2274         if (!sd->sd_best_ni) {
2275                 CERROR("Internal Error. Expected local ni on %s but non found :%s\n",
2276                        libcfs_net2str(local_lnet),
2277                        libcfs_nid2str(sd->sd_src_nid));
2278                 return -EFAULT;
2279         }
2280
2281         *gw_lpni = gwni;
2282         *gw_peer = gw;
2283
2284         /*
2285          * increment the sequence numbers since now we're sure we're
2286          * going to use this path
2287          */
2288         if (sd->sd_rtr_nid == LNET_NID_ANY) {
2289                 LASSERT(best_route && last_route);
2290                 best_route->lr_seq = last_route->lr_seq + 1;
2291                 if (best_lpn)
2292                         best_lpn->lpn_seq++;
2293         }
2294
2295         return 0;
2296 }
2297
2298 /*
2299  * Handle two cases:
2300  *
2301  * Case 1:
2302  *  Source specified
2303  *  Remote destination
2304  *  Non-MR destination
2305  *
2306  * Case 2:
2307  *  Source specified
2308  *  Remote destination
2309  *  MR destination
2310  *
2311  * The handling of these two cases is similar. Even though the destination
2312  * can be MR or non-MR, we'll deal directly with the router.
2313  */
2314 static int
2315 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2316 {
2317         int rc;
2318         struct lnet_peer_ni *gw_lpni = NULL;
2319         struct lnet_peer *gw_peer = NULL;
2320
2321         /* find local NI */
2322         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
2323         if (!sd->sd_best_ni) {
2324                 CERROR("Can't send to %s: src %s is not a "
2325                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
2326                                 libcfs_nid2str(sd->sd_src_nid));
2327                 return -EINVAL;
2328         }
2329
2330         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2331                                      &gw_peer);
2332         if (rc)
2333                 return rc;
2334
2335         if (sd->sd_send_case & NMR_DST)
2336                 /*
2337                  * since the final destination is non-MR let's set its preferred
2338                  * NID before we send
2339                  */
2340                 lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni,
2341                                          sd->sd_msg);
2342
2343         /*
2344          * We're going to send to the gw found so let's set its
2345          * info
2346          */
2347         sd->sd_peer = gw_peer;
2348         sd->sd_best_lpni = gw_lpni;
2349
2350         return lnet_handle_send(sd);
2351 }
2352
2353 struct lnet_ni *
2354 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2355                                bool discovery)
2356 {
2357         struct lnet_peer_net *lpn = NULL;
2358         struct lnet_peer_net *best_lpn = NULL;
2359         struct lnet_net *net = NULL;
2360         struct lnet_net *best_net = NULL;
2361         struct lnet_ni *best_ni = NULL;
2362         int best_lpn_healthv = 0;
2363         int best_net_healthv = 0;
2364         int net_healthv;
2365         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2366         __u32 lpn_sel_prio;
2367         __u32 best_net_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2368         __u32 net_sel_prio;
2369         bool exit = false;
2370
2371         /*
2372          * The peer can have multiple interfaces, some of them can be on
2373          * the local network and others on a routed network. We should
2374          * prefer the local network. However if the local network is not
2375          * available then we need to try the routed network
2376          */
2377
2378         /* go through all the peer nets and find the best_ni */
2379         list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
2380                 /*
2381                  * The peer's list of nets can contain non-local nets. We
2382                  * want to only examine the local ones.
2383                  */
2384                 net = lnet_get_net_locked(lpn->lpn_net_id);
2385                 if (!net)
2386                         continue;
2387
2388                 lpn_sel_prio = lpn->lpn_sel_priority;
2389                 net_healthv = lnet_get_net_healthv_locked(net);
2390                 net_sel_prio = net->net_sel_priority;
2391
2392                 /*
2393                  * if this is a discovery message and lp_disc_net_id is
2394                  * specified then use that net to send the discovery on.
2395                  */
2396                 if (peer->lp_disc_net_id == lpn->lpn_net_id &&
2397                     discovery) {
2398                         exit = true;
2399                         goto select_lpn;
2400                 }
2401
2402                 if (!best_lpn)
2403                         goto select_lpn;
2404
2405                 /* always select the lpn with the best health */
2406                 if (best_lpn_healthv > lpn->lpn_healthv)
2407                         continue;
2408                 else if (best_lpn_healthv < lpn->lpn_healthv)
2409                         goto select_lpn;
2410
2411                 /* select the preferred peer and local nets */
2412                 if (best_lpn_sel_prio < lpn_sel_prio)
2413                         continue;
2414                 else if (best_lpn_sel_prio > lpn_sel_prio)
2415                         goto select_lpn;
2416
2417                 if (best_net_healthv > net_healthv)
2418                         continue;
2419                 else if (best_net_healthv < net_healthv)
2420                         goto select_lpn;
2421
2422                 if (best_net_sel_prio < net_sel_prio)
2423                         continue;
2424                 else if (best_net_sel_prio > net_sel_prio)
2425                         goto select_lpn;
2426
2427                 if (best_lpn->lpn_seq < lpn->lpn_seq)
2428                         continue;
2429                 else if (best_lpn->lpn_seq > lpn->lpn_seq)
2430                         goto select_lpn;
2431
2432                 /* round robin over the local networks */
2433                 if (best_net->net_seq <= net->net_seq)
2434                         continue;
2435
2436 select_lpn:
2437                 best_net_healthv = net_healthv;
2438                 best_net_sel_prio = net_sel_prio;
2439                 best_lpn_healthv = lpn->lpn_healthv;
2440                 best_lpn_sel_prio = lpn_sel_prio;
2441                 best_lpn = lpn;
2442                 best_net = net;
2443
2444                 if (exit)
2445                         break;
2446         }
2447
2448         if (best_lpn) {
2449                 /* Select the best NI on the same net as best_lpn chosen
2450                  * above
2451                  */
2452                 best_ni = lnet_find_best_ni_on_spec_net(NULL, peer,
2453                                                         best_lpn, md_cpt);
2454         }
2455
2456         return best_ni;
2457 }
2458
2459 static struct lnet_ni *
2460 lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt)
2461 {
2462         struct lnet_ni *best_ni = NULL;
2463         struct lnet_peer_net *peer_net = lpni->lpni_peer_net;
2464         struct lnet_peer_ni *lpni_entry;
2465
2466         /*
2467          * We must use a consistent source address when sending to a
2468          * non-MR peer. However, a non-MR peer can have multiple NIDs
2469          * on multiple networks, and we may even need to talk to this
2470          * peer on multiple networks -- certain types of
2471          * load-balancing configuration do this.
2472          *
2473          * So we need to pick the NI the peer prefers for this
2474          * particular network.
2475          */
2476         LASSERT(peer_net);
2477         list_for_each_entry(lpni_entry, &peer_net->lpn_peer_nis,
2478                             lpni_peer_nis) {
2479                 if (lpni_entry->lpni_pref_nnids == 0)
2480                         continue;
2481                 LASSERT(lpni_entry->lpni_pref_nnids == 1);
2482                 best_ni = lnet_nid2ni_locked(lpni_entry->lpni_pref.nid, cpt);
2483                 break;
2484         }
2485
2486         return best_ni;
2487 }
2488
2489 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2490 static int
2491 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2492 {
2493         struct lnet_ni *best_ni = NULL;
2494         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
2495
2496         /*
2497          * We must use a consistent source address when sending to a
2498          * non-MR peer. However, a non-MR peer can have multiple NIDs
2499          * on multiple networks, and we may even need to talk to this
2500          * peer on multiple networks -- certain types of
2501          * load-balancing configuration do this.
2502          *
2503          * So we need to pick the NI the peer prefers for this
2504          * particular network.
2505          */
2506
2507         best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2508                                                        sd->sd_cpt);
2509
2510         /* if best_ni is still not set just pick one */
2511         if (!best_ni) {
2512                 best_ni =
2513                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2514                                                 sd->sd_best_lpni->lpni_peer_net,
2515                                                 sd->sd_md_cpt);
2516                 /* If there is no best_ni we don't have a route */
2517                 if (!best_ni) {
2518                         CERROR("no path to %s from net %s\n",
2519                                 libcfs_nid2str(best_lpni->lpni_nid),
2520                                 libcfs_net2str(best_lpni->lpni_net->net_id));
2521                         return -EHOSTUNREACH;
2522                 }
2523         }
2524
2525         sd->sd_best_ni = best_ni;
2526
2527         /* Set preferred NI if necessary. */
2528         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2529
2530         return 0;
2531 }
2532
2533
2534 /*
2535  * Source not specified
2536  * Local destination
2537  * Non-MR Peer
2538  *
2539  * always use the same source NID for NMR peers
2540  * If we've talked to that peer before then we already have a preferred
2541  * source NI associated with it. Otherwise, we select a preferred local NI
2542  * and store it in the peer
2543  */
2544 static int
2545 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2546 {
2547         int rc = 0;
2548
2549         /* sd->sd_best_lpni is already set to the final destination */
2550
2551         /*
2552          * At this point we should've created the peer ni and peer. If we
2553          * can't find it, then something went wrong. Instead of assert
2554          * output a relevant message and fail the send
2555          */
2556         if (!sd->sd_best_lpni) {
2557                 CERROR("Internal fault. Unable to send msg %s to %s. "
2558                        "NID not known\n",
2559                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2560                        libcfs_nid2str(sd->sd_dst_nid));
2561                 return -EFAULT;
2562         }
2563
2564         if (sd->sd_msg->msg_routing) {
2565                 /* If I'm forwarding this message then I can choose any NI
2566                  * on the destination peer net
2567                  */
2568                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL,
2569                                                                sd->sd_peer,
2570                                                                sd->sd_best_lpni->lpni_peer_net,
2571                                                                sd->sd_md_cpt);
2572                 if (!sd->sd_best_ni) {
2573                         CERROR("Unable to forward message to %s. No local NI available\n",
2574                                libcfs_nid2str(sd->sd_dst_nid));
2575                         rc = -EHOSTUNREACH;
2576                 }
2577         } else
2578                 rc = lnet_select_preferred_best_ni(sd);
2579
2580         if (!rc)
2581                 rc = lnet_handle_send(sd);
2582
2583         return rc;
2584 }
2585
2586 static int
2587 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2588 {
2589         /*
2590          * NOTE we've already handled the remote peer case. So we only
2591          * need to worry about the local case here.
2592          *
2593          * if we're sending a response, ACK or reply, we need to send it
2594          * to the destination NID given to us. At this point we already
2595          * have the peer_ni we're suppose to send to, so just find the
2596          * best_ni on the peer net and use that. Since we're sending to an
2597          * MR peer then we can just run the selection algorithm on our
2598          * local NIs and pick the best one.
2599          */
2600         if (sd->sd_send_case & SND_RESP) {
2601                 sd->sd_best_ni =
2602                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2603                                                 sd->sd_best_lpni->lpni_peer_net,
2604                                                 sd->sd_md_cpt);
2605
2606                 if (!sd->sd_best_ni) {
2607                         /*
2608                          * We're not going to deal with not able to send
2609                          * a response to the provided final destination
2610                          */
2611                         CERROR("Can't send response to %s. "
2612                                "No local NI available\n",
2613                                 libcfs_nid2str(sd->sd_dst_nid));
2614                         return -EHOSTUNREACH;
2615                 }
2616
2617                 return lnet_handle_send(sd);
2618         }
2619
2620         /*
2621          * If we get here that means we're sending a fresh request, PUT or
2622          * GET, so we need to run our standard selection algorithm.
2623          * First find the best local interface that's on any of the peer's
2624          * networks.
2625          */
2626         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2627                                         sd->sd_md_cpt,
2628                                         lnet_msg_discovery(sd->sd_msg));
2629         if (sd->sd_best_ni) {
2630                 sd->sd_best_lpni =
2631                   lnet_find_best_lpni(sd->sd_best_ni, sd->sd_dst_nid,
2632                                       sd->sd_peer,
2633                                       sd->sd_best_ni->ni_net->net_id);
2634
2635                 /*
2636                  * if we're successful in selecting a peer_ni on the local
2637                  * network, then send to it. Otherwise fall through and
2638                  * try and see if we can reach it over another routed
2639                  * network
2640                  */
2641                 if (sd->sd_best_lpni &&
2642                     sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid) {
2643                         /*
2644                          * in case we initially started with a routed
2645                          * destination, let's reset to local
2646                          */
2647                         sd->sd_send_case &= ~REMOTE_DST;
2648                         sd->sd_send_case |= LOCAL_DST;
2649                         return lnet_handle_lo_send(sd);
2650                 } else if (sd->sd_best_lpni) {
2651                         /*
2652                          * in case we initially started with a routed
2653                          * destination, let's reset to local
2654                          */
2655                         sd->sd_send_case &= ~REMOTE_DST;
2656                         sd->sd_send_case |= LOCAL_DST;
2657                         return lnet_handle_send(sd);
2658                 }
2659
2660                 CERROR("Internal Error. Expected to have a best_lpni: "
2661                        "%s -> %s\n",
2662                        libcfs_nid2str(sd->sd_src_nid),
2663                        libcfs_nid2str(sd->sd_dst_nid));
2664
2665                 return -EFAULT;
2666         }
2667
2668         /*
2669          * Peer doesn't have a local network. Let's see if there is
2670          * a remote network we can reach it on.
2671          */
2672         return PASS_THROUGH;
2673 }
2674
2675 /*
2676  * Case 1:
2677  *      Source NID not specified
2678  *      Local destination
2679  *      MR peer
2680  *
2681  * Case 2:
2682  *      Source NID not speified
2683  *      Remote destination
2684  *      MR peer
2685  *
2686  * In both of these cases if we're sending a response, ACK or REPLY, then
2687  * we need to send to the destination NID provided.
2688  *
2689  * In the remote case let's deal with MR routers.
2690  *
2691  */
2692
2693 static int
2694 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2695 {
2696         int rc = 0;
2697         struct lnet_peer *gw_peer = NULL;
2698         struct lnet_peer_ni *gw_lpni = NULL;
2699
2700         /*
2701          * handle sending a response to a remote peer here so we don't
2702          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2703          */
2704         if (sd->sd_send_case & REMOTE_DST &&
2705             sd->sd_send_case & SND_RESP) {
2706                 struct lnet_peer_ni *gw;
2707                 struct lnet_peer *gw_peer;
2708
2709                 rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw,
2710                                                   &gw_peer);
2711                 if (rc < 0) {
2712                         CERROR("Can't send response to %s. "
2713                                "No route available\n",
2714                                 libcfs_nid2str(sd->sd_dst_nid));
2715                         return -EHOSTUNREACH;
2716                 } else if (rc > 0) {
2717                         return rc;
2718                 }
2719
2720                 sd->sd_best_lpni = gw;
2721                 sd->sd_peer = gw_peer;
2722
2723                 return lnet_handle_send(sd);
2724         }
2725
2726         /*
2727          * Even though the NID for the peer might not be on a local network,
2728          * since the peer is MR there could be other interfaces on the
2729          * local network. In that case we'd still like to prefer the local
2730          * network over the routed network. If we're unable to do that
2731          * then we select the best router among the different routed networks,
2732          * and if the router is MR then we can deal with it as such.
2733          */
2734         rc = lnet_handle_any_mr_dsta(sd);
2735         if (rc != PASS_THROUGH)
2736                 return rc;
2737
2738         /*
2739          * Now that we must route to the destination, we must consider the
2740          * MR case, where the destination has multiple interfaces, some of
2741          * which we can route to and others we do not. For this reason we
2742          * need to select the destination which we can route to and if
2743          * there are multiple, we need to round robin.
2744          */
2745         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2746                                           &gw_peer);
2747         if (rc)
2748                 return rc;
2749
2750         sd->sd_send_case &= ~LOCAL_DST;
2751         sd->sd_send_case |= REMOTE_DST;
2752
2753         sd->sd_peer = gw_peer;
2754         sd->sd_best_lpni = gw_lpni;
2755
2756         return lnet_handle_send(sd);
2757 }
2758
2759 /*
2760  * Source not specified
2761  * Remote destination
2762  * Non-MR peer
2763  *
2764  * Must send to the specified peer NID using the same source NID that
2765  * we've used before. If it's the first time to talk to that peer then
2766  * find the source NI and assign it as preferred to that peer
2767  */
2768 static int
2769 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2770 {
2771         int rc;
2772         struct lnet_peer_ni *gw_lpni = NULL;
2773         struct lnet_peer *gw_peer = NULL;
2774
2775         /*
2776          * Let's see if we have a preferred NI to talk to this NMR peer
2777          */
2778         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2779                                                               sd->sd_cpt);
2780
2781         /*
2782          * find the router and that'll find the best NI if we didn't find
2783          * it already.
2784          */
2785         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2786                                           &gw_peer);
2787         if (rc)
2788                 return rc;
2789
2790         /*
2791          * set the best_ni we've chosen as the preferred one for
2792          * this peer
2793          */
2794         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2795
2796         /* we'll be sending to the gw */
2797         sd->sd_best_lpni = gw_lpni;
2798         sd->sd_peer = gw_peer;
2799
2800         return lnet_handle_send(sd);
2801 }
2802
2803 static int
2804 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2805 {
2806         /*
2807          * turn off the SND_RESP bit.
2808          * It will be checked in the case handling
2809          */
2810         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2811
2812         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2813                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2814                 (send_case & SRC_SPEC) ? libcfs_nid2str(sd->sd_src_nid) : "",
2815                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2816                 libcfs_nid2str(sd->sd_dst_nid),
2817                 (send_case & LOCAL_DST) ? "local" : "routed");
2818
2819         switch (send_case) {
2820         /*
2821          * For all cases where the source is specified, we should always
2822          * use the destination NID, whether it's an MR destination or not,
2823          * since we're continuing a series of related messages for the
2824          * same RPC
2825          */
2826         case SRC_SPEC_LOCAL_NMR_DST:
2827                 return lnet_handle_spec_local_nmr_dst(sd);
2828         case SRC_SPEC_LOCAL_MR_DST:
2829                 return lnet_handle_spec_local_mr_dst(sd);
2830         case SRC_SPEC_ROUTER_NMR_DST:
2831         case SRC_SPEC_ROUTER_MR_DST:
2832                 return lnet_handle_spec_router_dst(sd);
2833         case SRC_ANY_LOCAL_NMR_DST:
2834                 return lnet_handle_any_local_nmr_dst(sd);
2835         case SRC_ANY_LOCAL_MR_DST:
2836         case SRC_ANY_ROUTER_MR_DST:
2837                 return lnet_handle_any_mr_dst(sd);
2838         case SRC_ANY_ROUTER_NMR_DST:
2839                 return lnet_handle_any_router_nmr_dst(sd);
2840         default:
2841                 CERROR("Unknown send case\n");
2842                 return -1;
2843         }
2844 }
2845
2846 static int
2847 lnet_select_pathway(lnet_nid_t src_nid, lnet_nid_t dst_nid,
2848                     struct lnet_msg *msg, lnet_nid_t rtr_nid)
2849 {
2850         struct lnet_peer_ni *lpni;
2851         struct lnet_peer *peer;
2852         struct lnet_send_data send_data;
2853         int cpt, rc;
2854         int md_cpt;
2855         __u32 send_case = 0;
2856         bool final_hop;
2857         bool mr_forwarding_allowed;
2858
2859         memset(&send_data, 0, sizeof(send_data));
2860
2861         /*
2862          * get an initial CPT to use for locking. The idea here is not to
2863          * serialize the calls to select_pathway, so that as many
2864          * operations can run concurrently as possible. To do that we use
2865          * the CPT where this call is being executed. Later on when we
2866          * determine the CPT to use in lnet_message_commit, we switch the
2867          * lock and check if there was any configuration change.  If none,
2868          * then we proceed, if there is, then we restart the operation.
2869          */
2870         cpt = lnet_net_lock_current();
2871
2872         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
2873         if (md_cpt == CFS_CPT_ANY)
2874                 md_cpt = cpt;
2875
2876 again:
2877
2878         /*
2879          * If we're being asked to send to the loopback interface, there
2880          * is no need to go through any selection. We can just shortcut
2881          * the entire process and send over lolnd
2882          */
2883         send_data.sd_msg = msg;
2884         send_data.sd_cpt = cpt;
2885         if (dst_nid == LNET_NID_LO_0) {
2886                 rc = lnet_handle_lo_send(&send_data);
2887                 lnet_net_unlock(cpt);
2888                 return rc;
2889         }
2890
2891         /*
2892          * find an existing peer_ni, or create one and mark it as having been
2893          * created due to network traffic. This call will create the
2894          * peer->peer_net->peer_ni tree.
2895          */
2896         lpni = lnet_nid2peerni_locked(dst_nid, LNET_NID_ANY, cpt);
2897         if (IS_ERR(lpni)) {
2898                 lnet_net_unlock(cpt);
2899                 return PTR_ERR(lpni);
2900         }
2901
2902         /*
2903          * Cache the original src_nid and rtr_nid. If we need to resend the
2904          * message then we'll need to know whether the src_nid was originally
2905          * specified for this message. If it was originally specified,
2906          * then we need to keep using the same src_nid since it's
2907          * continuing the same sequence of messages. Similarly, rtr_nid will
2908          * affect our choice of next hop.
2909          */
2910         msg->msg_src_nid_param = src_nid;
2911         msg->msg_rtr_nid_param = rtr_nid;
2912
2913         /*
2914          * If necessary, perform discovery on the peer that owns this peer_ni.
2915          * Note, this can result in the ownership of this peer_ni changing
2916          * to another peer object.
2917          */
2918         rc = lnet_initiate_peer_discovery(lpni, msg, cpt);
2919         if (rc) {
2920                 lnet_peer_ni_decref_locked(lpni);
2921                 lnet_net_unlock(cpt);
2922                 return rc;
2923         }
2924         lnet_peer_ni_decref_locked(lpni);
2925
2926         peer = lpni->lpni_peer_net->lpn_peer;
2927
2928         /*
2929          * Identify the different send cases
2930          */
2931         if (src_nid == LNET_NID_ANY)
2932                 send_case |= SRC_ANY;
2933         else
2934                 send_case |= SRC_SPEC;
2935
2936         if (lnet_get_net_locked(LNET_NIDNET(dst_nid)))
2937                 send_case |= LOCAL_DST;
2938         else
2939                 send_case |= REMOTE_DST;
2940
2941         final_hop = false;
2942         if (msg->msg_routing && (send_case & LOCAL_DST))
2943                 final_hop = true;
2944
2945         /* Determine whether to allow MR forwarding for this message.
2946          * NB: MR forwarding is allowed if the message originator and the
2947          * destination are both MR capable, and the destination lpni that was
2948          * originally chosen by the originator is unhealthy or down.
2949          * We check the MR capability of the destination further below
2950          */
2951         mr_forwarding_allowed = false;
2952         if (final_hop) {
2953                 struct lnet_peer *src_lp;
2954                 struct lnet_peer_ni *src_lpni;
2955
2956                 src_lpni = lnet_nid2peerni_locked(msg->msg_hdr.src_nid,
2957                                                   LNET_NID_ANY, cpt);
2958                 /* We don't fail the send if we hit any errors here. We'll just
2959                  * try to send it via non-multi-rail criteria
2960                  */
2961                 if (!IS_ERR(src_lpni)) {
2962                         /* Drop ref taken by lnet_nid2peerni_locked() */
2963                         lnet_peer_ni_decref_locked(src_lpni);
2964                         src_lp = lpni->lpni_peer_net->lpn_peer;
2965                         if (lnet_peer_is_multi_rail(src_lp) &&
2966                             !lnet_is_peer_ni_alive(lpni))
2967                                 mr_forwarding_allowed = true;
2968
2969                 }
2970                 CDEBUG(D_NET, "msg %p MR forwarding %s\n", msg,
2971                        mr_forwarding_allowed ? "allowed" : "not allowed");
2972         }
2973
2974         /*
2975          * Deal with the peer as NMR in the following cases:
2976          * 1. the peer is NMR
2977          * 2. We're trying to recover a specific peer NI
2978          * 3. I'm a router sending to the final destination and MR forwarding is
2979          *    not allowed for this message (as determined above).
2980          *    In this case the source of the message would've
2981          *    already selected the final destination so my job
2982          *    is to honor the selection.
2983          */
2984         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery ||
2985             (final_hop && !mr_forwarding_allowed))
2986                 send_case |= NMR_DST;
2987         else
2988                 send_case |= MR_DST;
2989
2990         if (lnet_msg_is_response(msg))
2991                 send_case |= SND_RESP;
2992
2993         /* assign parameters to the send_data */
2994         send_data.sd_rtr_nid = rtr_nid;
2995         send_data.sd_src_nid = src_nid;
2996         send_data.sd_dst_nid = dst_nid;
2997         send_data.sd_best_lpni = lpni;
2998         /*
2999          * keep a pointer to the final destination in case we're going to
3000          * route, so we'll need to access it later
3001          */
3002         send_data.sd_final_dst_lpni = lpni;
3003         send_data.sd_peer = peer;
3004         send_data.sd_md_cpt = md_cpt;
3005         send_data.sd_send_case = send_case;
3006
3007         rc = lnet_handle_send_case_locked(&send_data);
3008
3009         /*
3010          * Update the local cpt since send_data.sd_cpt might've been
3011          * updated as a result of calling lnet_handle_send_case_locked().
3012          */
3013         cpt = send_data.sd_cpt;
3014
3015         if (rc == REPEAT_SEND)
3016                 goto again;
3017
3018         lnet_net_unlock(cpt);
3019
3020         return rc;
3021 }
3022
3023 int
3024 lnet_send(lnet_nid_t src_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid)
3025 {
3026         lnet_nid_t              dst_nid = msg->msg_target.nid;
3027         int                     rc;
3028
3029         /*
3030          * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
3031          * but we might want to use pre-determined router for ACK/REPLY
3032          * in the future
3033          */
3034         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
3035         LASSERT(msg->msg_txpeer == NULL);
3036         LASSERT(msg->msg_txni == NULL);
3037         LASSERT(!msg->msg_sending);
3038         LASSERT(!msg->msg_target_is_router);
3039         LASSERT(!msg->msg_receiving);
3040
3041         msg->msg_sending = 1;
3042
3043         LASSERT(!msg->msg_tx_committed);
3044
3045         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
3046         if (rc < 0) {
3047                 if (rc == -EHOSTUNREACH)
3048                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
3049                 else
3050                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
3051                 return rc;
3052         }
3053
3054         if (rc == LNET_CREDIT_OK)
3055                 lnet_ni_send(msg->msg_txni, msg);
3056
3057         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
3058         return 0;
3059 }
3060
3061 enum lnet_mt_event_type {
3062         MT_TYPE_LOCAL_NI = 0,
3063         MT_TYPE_PEER_NI
3064 };
3065
3066 struct lnet_mt_event_info {
3067         enum lnet_mt_event_type mt_type;
3068         lnet_nid_t mt_nid;
3069 };
3070
3071 /* called with res_lock held */
3072 void
3073 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
3074 {
3075         struct lnet_rsp_tracker *rspt;
3076
3077         /*
3078          * msg has a refcount on the MD so the MD is not going away.
3079          * The rspt queue for the cpt is protected by
3080          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
3081          */
3082         if (!md->md_rspt_ptr)
3083                 return;
3084
3085         rspt = md->md_rspt_ptr;
3086
3087         /* debug code */
3088         LASSERT(rspt->rspt_cpt == cpt);
3089
3090         md->md_rspt_ptr = NULL;
3091
3092         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3093                 /*
3094                  * The monitor thread has invalidated this handle because the
3095                  * response timed out, but it failed to lookup the MD. That
3096                  * means this response tracker is on the zombie list. We can
3097                  * safely remove it under the resource lock (held by caller) and
3098                  * free the response tracker block.
3099                  */
3100                 list_del(&rspt->rspt_on_list);
3101                 lnet_rspt_free(rspt, cpt);
3102         } else {
3103                 /*
3104                  * invalidate the handle to indicate that a response has been
3105                  * received, which will then lead the monitor thread to clean up
3106                  * the rspt block.
3107                  */
3108                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
3109         }
3110 }
3111
3112 void
3113 lnet_clean_zombie_rstqs(void)
3114 {
3115         struct lnet_rsp_tracker *rspt, *tmp;
3116         int i;
3117
3118         cfs_cpt_for_each(i, lnet_cpt_table()) {
3119                 list_for_each_entry_safe(rspt, tmp,
3120                                          the_lnet.ln_mt_zombie_rstqs[i],
3121                                          rspt_on_list) {
3122                         list_del(&rspt->rspt_on_list);
3123                         lnet_rspt_free(rspt, i);
3124                 }
3125         }
3126
3127         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
3128 }
3129
3130 static void
3131 lnet_finalize_expired_responses(void)
3132 {
3133         struct lnet_libmd *md;
3134         struct lnet_rsp_tracker *rspt, *tmp;
3135         ktime_t now;
3136         int i;
3137
3138         if (the_lnet.ln_mt_rstq == NULL)
3139                 return;
3140
3141         cfs_cpt_for_each(i, lnet_cpt_table()) {
3142                 LIST_HEAD(local_queue);
3143
3144                 lnet_net_lock(i);
3145                 if (!the_lnet.ln_mt_rstq[i]) {
3146                         lnet_net_unlock(i);
3147                         continue;
3148                 }
3149                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
3150                 lnet_net_unlock(i);
3151
3152                 now = ktime_get();
3153
3154                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
3155                         /*
3156                          * The rspt mdh will be invalidated when a response
3157                          * is received or whenever we want to discard the
3158                          * block the monitor thread will walk the queue
3159                          * and clean up any rsts with an invalid mdh.
3160                          * The monitor thread will walk the queue until
3161                          * the first unexpired rspt block. This means that
3162                          * some rspt blocks which received their
3163                          * corresponding responses will linger in the
3164                          * queue until they are cleaned up eventually.
3165                          */
3166                         lnet_res_lock(i);
3167                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3168                                 lnet_res_unlock(i);
3169                                 list_del(&rspt->rspt_on_list);
3170                                 lnet_rspt_free(rspt, i);
3171                                 continue;
3172                         }
3173
3174                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
3175                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
3176                                 struct lnet_peer_ni *lpni;
3177                                 lnet_nid_t nid;
3178
3179                                 md = lnet_handle2md(&rspt->rspt_mdh);
3180                                 if (!md) {
3181                                         /* MD has been queued for unlink, but
3182                                          * rspt hasn't been detached (Note we've
3183                                          * checked above that the rspt_mdh is
3184                                          * valid). Since we cannot lookup the MD
3185                                          * we're unable to detach the rspt
3186                                          * ourselves. Thus, move the rspt to the
3187                                          * zombie list where we'll wait for
3188                                          * either:
3189                                          *   1. The remaining operations on the
3190                                          *   MD to complete. In this case the
3191                                          *   final operation will result in
3192                                          *   lnet_msg_detach_md()->
3193                                          *   lnet_detach_rsp_tracker() where
3194                                          *   we will clean up this response
3195                                          *   tracker.
3196                                          *   2. LNet to shutdown. In this case
3197                                          *   we'll wait until after all LND Nets
3198                                          *   have shutdown and then we can
3199                                          *   safely free any remaining response
3200                                          *   tracker blocks on the zombie list.
3201                                          * Note: We need to hold the resource
3202                                          * lock when adding to the zombie list
3203                                          * because we may have concurrent access
3204                                          * with lnet_detach_rsp_tracker().
3205                                          */
3206                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
3207                                         list_move(&rspt->rspt_on_list,
3208                                                   the_lnet.ln_mt_zombie_rstqs[i]);
3209                                         lnet_res_unlock(i);
3210                                         continue;
3211                                 }
3212                                 LASSERT(md->md_rspt_ptr == rspt);
3213                                 md->md_rspt_ptr = NULL;
3214                                 lnet_res_unlock(i);
3215
3216                                 LNetMDUnlink(rspt->rspt_mdh);
3217
3218                                 nid = rspt->rspt_next_hop_nid;
3219
3220                                 list_del(&rspt->rspt_on_list);
3221                                 lnet_rspt_free(rspt, i);
3222
3223                                 /* If we're shutting down we just want to clean
3224                                  * up the rspt blocks
3225                                  */
3226                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3227                                         continue;
3228
3229                                 lnet_net_lock(i);
3230                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
3231                                 lnet_net_unlock(i);
3232
3233                                 CDEBUG(D_NET,
3234                                        "Response timeout: md = %p: nid = %s\n",
3235                                        md, libcfs_nid2str(nid));
3236
3237                                 /*
3238                                  * If there is a timeout on the response
3239                                  * from the next hop decrement its health
3240                                  * value so that we don't use it
3241                                  */
3242                                 lnet_net_lock(0);
3243                                 lpni = lnet_find_peer_ni_locked(nid);
3244                                 if (lpni) {
3245                                         lnet_handle_remote_failure_locked(lpni);
3246                                         lnet_peer_ni_decref_locked(lpni);
3247                                 }
3248                                 lnet_net_unlock(0);
3249                         } else {
3250                                 lnet_res_unlock(i);
3251                                 break;
3252                         }
3253                 }
3254
3255                 if (!list_empty(&local_queue)) {
3256                         lnet_net_lock(i);
3257                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
3258                         lnet_net_unlock(i);
3259                 }
3260         }
3261 }
3262
3263 static void
3264 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
3265 {
3266         struct lnet_msg *msg;
3267
3268         while (!list_empty(resendq)) {
3269                 struct lnet_peer_ni *lpni;
3270
3271                 msg = list_entry(resendq->next, struct lnet_msg,
3272                                  msg_list);
3273
3274                 list_del_init(&msg->msg_list);
3275
3276                 lpni = lnet_find_peer_ni_locked(msg->msg_hdr.dest_nid);
3277                 if (!lpni) {
3278                         lnet_net_unlock(cpt);
3279                         CERROR("Expected that a peer is already created for %s\n",
3280                                libcfs_nid2str(msg->msg_hdr.dest_nid));
3281                         msg->msg_no_resend = true;
3282                         lnet_finalize(msg, -EFAULT);
3283                         lnet_net_lock(cpt);
3284                 } else {
3285                         int rc;
3286
3287                         lnet_peer_ni_decref_locked(lpni);
3288
3289                         lnet_net_unlock(cpt);
3290                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3291                                libcfs_nid2str(msg->msg_src_nid_param),
3292                                libcfs_id2str(msg->msg_target),
3293                                lnet_msgtyp2str(msg->msg_type),
3294                                msg->msg_recovery,
3295                                msg->msg_retry_count);
3296                         rc = lnet_send(msg->msg_src_nid_param, msg,
3297                                        msg->msg_rtr_nid_param);
3298                         if (rc) {
3299                                 CERROR("Error sending %s to %s: %d\n",
3300                                        lnet_msgtyp2str(msg->msg_type),
3301                                        libcfs_id2str(msg->msg_target), rc);
3302                                 msg->msg_no_resend = true;
3303                                 lnet_finalize(msg, rc);
3304                         }
3305                         lnet_net_lock(cpt);
3306                         if (!rc)
3307                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3308                 }
3309         }
3310 }
3311
3312 static void
3313 lnet_resend_pending_msgs(void)
3314 {
3315         int i;
3316
3317         cfs_cpt_for_each(i, lnet_cpt_table()) {
3318                 lnet_net_lock(i);
3319                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3320                 lnet_net_unlock(i);
3321         }
3322 }
3323
3324 /* called with cpt and ni_lock held */
3325 static void
3326 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3327 {
3328         struct lnet_handle_md recovery_mdh;
3329
3330         LNetInvalidateMDHandle(&recovery_mdh);
3331
3332         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3333             force) {
3334                 recovery_mdh = ni->ni_ping_mdh;
3335                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3336         }
3337         lnet_ni_unlock(ni);
3338         lnet_net_unlock(cpt);
3339         if (!LNetMDHandleIsInvalid(recovery_mdh))
3340                 LNetMDUnlink(recovery_mdh);
3341         lnet_net_lock(cpt);
3342         lnet_ni_lock(ni);
3343 }
3344
3345 static void
3346 lnet_recover_local_nis(void)
3347 {
3348         struct lnet_mt_event_info *ev_info;
3349         LIST_HEAD(processed_list);
3350         LIST_HEAD(local_queue);
3351         struct lnet_handle_md mdh;
3352         struct lnet_ni *tmp;
3353         struct lnet_ni *ni;
3354         lnet_nid_t nid;
3355         int healthv;
3356         int rc;
3357         time64_t now;
3358
3359         /*
3360          * splice the recovery queue on a local queue. We will iterate
3361          * through the local queue and update it as needed. Once we're
3362          * done with the traversal, we'll splice the local queue back on
3363          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3364          * will be traversed in the next iteration.
3365          */
3366         lnet_net_lock(0);
3367         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3368                          &local_queue);
3369         lnet_net_unlock(0);
3370
3371         now = ktime_get_seconds();
3372
3373         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3374                 /*
3375                  * if an NI is being deleted or it is now healthy, there
3376                  * is no need to keep it around in the recovery queue.
3377                  * The monitor thread is the only thread responsible for
3378                  * removing the NI from the recovery queue.
3379                  * Multiple threads can be adding NIs to the recovery
3380                  * queue.
3381                  */
3382                 healthv = atomic_read(&ni->ni_healthv);
3383
3384                 lnet_net_lock(0);
3385                 lnet_ni_lock(ni);
3386                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3387                     healthv == LNET_MAX_HEALTH_VALUE) {
3388                         list_del_init(&ni->ni_recovery);
3389                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3390                         lnet_ni_unlock(ni);
3391                         lnet_ni_decref_locked(ni, 0);
3392                         lnet_net_unlock(0);
3393                         continue;
3394                 }
3395
3396                 /*
3397                  * if the local NI failed recovery we must unlink the md.
3398                  * But we want to keep the local_ni on the recovery queue
3399                  * so we can continue the attempts to recover it.
3400                  */
3401                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3402                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3403                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3404                 }
3405
3406
3407                 lnet_ni_unlock(ni);
3408
3409                 if (now < ni->ni_next_ping) {
3410                         lnet_net_unlock(0);
3411                         continue;
3412                 }
3413
3414                 lnet_net_unlock(0);
3415
3416                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3417                        libcfs_nid2str(ni->ni_nid));
3418
3419                 lnet_ni_lock(ni);
3420                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3421                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3422                         lnet_ni_unlock(ni);
3423
3424                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3425                         if (!ev_info) {
3426                                 CERROR("out of memory. Can't recover %s\n",
3427                                        libcfs_nid2str(ni->ni_nid));
3428                                 lnet_ni_lock(ni);
3429                                 ni->ni_recovery_state &=
3430                                   ~LNET_NI_RECOVERY_PENDING;
3431                                 lnet_ni_unlock(ni);
3432                                 continue;
3433                         }
3434
3435                         mdh = ni->ni_ping_mdh;
3436                         /*
3437                          * Invalidate the ni mdh in case it's deleted.
3438                          * We'll unlink the mdh in this case below.
3439                          */
3440                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3441                         nid = ni->ni_nid;
3442
3443                         /*
3444                          * remove the NI from the local queue and drop the
3445                          * reference count to it while we're recovering
3446                          * it. The reason for that, is that the NI could
3447                          * be deleted, and the way the code is structured
3448                          * is if we don't drop the NI, then the deletion
3449                          * code will enter a loop waiting for the
3450                          * reference count to be removed while holding the
3451                          * ln_mutex_lock(). When we look up the peer to
3452                          * send to in lnet_select_pathway() we will try to
3453                          * lock the ln_mutex_lock() as well, leading to
3454                          * a deadlock. By dropping the refcount and
3455                          * removing it from the list, we allow for the NI
3456                          * to be removed, then we use the cached NID to
3457                          * look it up again. If it's gone, then we just
3458                          * continue examining the rest of the queue.
3459                          */
3460                         lnet_net_lock(0);
3461                         list_del_init(&ni->ni_recovery);
3462                         lnet_ni_decref_locked(ni, 0);
3463                         lnet_net_unlock(0);
3464
3465                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3466                         ev_info->mt_nid = nid;
3467                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3468                                             ev_info, the_lnet.ln_mt_handler,
3469                                             true);
3470                         /* lookup the nid again */
3471                         lnet_net_lock(0);
3472                         ni = lnet_nid2ni_locked(nid, 0);
3473                         if (!ni) {
3474                                 /*
3475                                  * the NI has been deleted when we dropped
3476                                  * the ref count
3477                                  */
3478                                 lnet_net_unlock(0);
3479                                 LNetMDUnlink(mdh);
3480                                 continue;
3481                         }
3482                         ni->ni_ping_count++;
3483
3484                         ni->ni_ping_mdh = mdh;
3485                         lnet_ni_add_to_recoveryq_locked(ni, &processed_list,
3486                                                         now);
3487
3488                         if (rc) {
3489                                 lnet_ni_lock(ni);
3490                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3491                                 lnet_ni_unlock(ni);
3492                         }
3493                         lnet_net_unlock(0);
3494                 } else
3495                         lnet_ni_unlock(ni);
3496         }
3497
3498         /*
3499          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3500          * reexamined in the next iteration.
3501          */
3502         list_splice_init(&processed_list, &local_queue);
3503         lnet_net_lock(0);
3504         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3505         lnet_net_unlock(0);
3506 }
3507
3508 static int
3509 lnet_resendqs_create(void)
3510 {
3511         struct list_head **resendqs;
3512         resendqs = lnet_create_array_of_queues();
3513
3514         if (!resendqs)
3515                 return -ENOMEM;
3516
3517         lnet_net_lock(LNET_LOCK_EX);
3518         the_lnet.ln_mt_resendqs = resendqs;
3519         lnet_net_unlock(LNET_LOCK_EX);
3520
3521         return 0;
3522 }
3523
3524 static void
3525 lnet_clean_local_ni_recoveryq(void)
3526 {
3527         struct lnet_ni *ni;
3528
3529         /* This is only called when the monitor thread has stopped */
3530         lnet_net_lock(0);
3531
3532         while (!list_empty(&the_lnet.ln_mt_localNIRecovq)) {
3533                 ni = list_entry(the_lnet.ln_mt_localNIRecovq.next,
3534                                 struct lnet_ni, ni_recovery);
3535                 list_del_init(&ni->ni_recovery);
3536                 lnet_ni_lock(ni);
3537                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3538                 lnet_ni_unlock(ni);
3539                 lnet_ni_decref_locked(ni, 0);
3540         }
3541
3542         lnet_net_unlock(0);
3543 }
3544
3545 static void
3546 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3547                                      bool force)
3548 {
3549         struct lnet_handle_md recovery_mdh;
3550
3551         LNetInvalidateMDHandle(&recovery_mdh);
3552
3553         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3554                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3555                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3556         }
3557         spin_unlock(&lpni->lpni_lock);
3558         lnet_net_unlock(cpt);
3559         if (!LNetMDHandleIsInvalid(recovery_mdh))
3560                 LNetMDUnlink(recovery_mdh);
3561         lnet_net_lock(cpt);
3562         spin_lock(&lpni->lpni_lock);
3563 }
3564
3565 static void
3566 lnet_clean_peer_ni_recoveryq(void)
3567 {
3568         struct lnet_peer_ni *lpni, *tmp;
3569
3570         lnet_net_lock(LNET_LOCK_EX);
3571
3572         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3573                                  lpni_recovery) {
3574                 list_del_init(&lpni->lpni_recovery);
3575                 spin_lock(&lpni->lpni_lock);
3576                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3577                 spin_unlock(&lpni->lpni_lock);
3578                 lnet_peer_ni_decref_locked(lpni);
3579         }
3580
3581         lnet_net_unlock(LNET_LOCK_EX);
3582 }
3583
3584 static void
3585 lnet_clean_resendqs(void)
3586 {
3587         struct lnet_msg *msg, *tmp;
3588         LIST_HEAD(msgs);
3589         int i;
3590
3591         cfs_cpt_for_each(i, lnet_cpt_table()) {
3592                 lnet_net_lock(i);
3593                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3594                 lnet_net_unlock(i);
3595                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3596                         list_del_init(&msg->msg_list);
3597                         msg->msg_no_resend = true;
3598                         lnet_finalize(msg, -ESHUTDOWN);
3599                 }
3600         }
3601
3602         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3603 }
3604
3605 static void
3606 lnet_recover_peer_nis(void)
3607 {
3608         struct lnet_mt_event_info *ev_info;
3609         LIST_HEAD(processed_list);
3610         LIST_HEAD(local_queue);
3611         struct lnet_handle_md mdh;
3612         struct lnet_peer_ni *lpni;
3613         struct lnet_peer_ni *tmp;
3614         lnet_nid_t nid;
3615         int healthv;
3616         int rc;
3617         time64_t now;
3618
3619         /*
3620          * Always use cpt 0 for locking across all interactions with
3621          * ln_mt_peerNIRecovq
3622          */
3623         lnet_net_lock(0);
3624         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3625                          &local_queue);
3626         lnet_net_unlock(0);
3627
3628         now = ktime_get_seconds();
3629
3630         list_for_each_entry_safe(lpni, tmp, &local_queue,
3631                                  lpni_recovery) {
3632                 /*
3633                  * The same protection strategy is used here as is in the
3634                  * local recovery case.
3635                  */
3636                 lnet_net_lock(0);
3637                 healthv = atomic_read(&lpni->lpni_healthv);
3638                 spin_lock(&lpni->lpni_lock);
3639                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3640                     healthv == LNET_MAX_HEALTH_VALUE) {
3641                         list_del_init(&lpni->lpni_recovery);
3642                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3643                         spin_unlock(&lpni->lpni_lock);
3644                         lnet_peer_ni_decref_locked(lpni);
3645                         lnet_net_unlock(0);
3646                         continue;
3647                 }
3648
3649                 /*
3650                  * If the peer NI has failed recovery we must unlink the
3651                  * md. But we want to keep the peer ni on the recovery
3652                  * queue so we can try to continue recovering it
3653                  */
3654                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3655                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3656                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3657                 }
3658
3659                 spin_unlock(&lpni->lpni_lock);
3660
3661                 if (now < lpni->lpni_next_ping) {
3662                         lnet_net_unlock(0);
3663                         continue;
3664                 }
3665
3666                 lnet_net_unlock(0);
3667
3668                 /*
3669                  * NOTE: we're racing with peer deletion from user space.
3670                  * It's possible that a peer is deleted after we check its
3671                  * state. In this case the recovery can create a new peer
3672                  */
3673                 spin_lock(&lpni->lpni_lock);
3674                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3675                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3676                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3677                         spin_unlock(&lpni->lpni_lock);
3678
3679                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3680                         if (!ev_info) {
3681                                 CERROR("out of memory. Can't recover %s\n",
3682                                        libcfs_nid2str(lpni->lpni_nid));
3683                                 spin_lock(&lpni->lpni_lock);
3684                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3685                                 spin_unlock(&lpni->lpni_lock);
3686                                 continue;
3687                         }
3688
3689                         /* look at the comments in lnet_recover_local_nis() */
3690                         mdh = lpni->lpni_recovery_ping_mdh;
3691                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3692                         nid = lpni->lpni_nid;
3693                         lnet_net_lock(0);
3694                         list_del_init(&lpni->lpni_recovery);
3695                         lnet_peer_ni_decref_locked(lpni);
3696                         lnet_net_unlock(0);
3697
3698                         ev_info->mt_type = MT_TYPE_PEER_NI;
3699                         ev_info->mt_nid = nid;
3700                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3701                                             ev_info, the_lnet.ln_mt_handler,
3702                                             true);
3703                         lnet_net_lock(0);
3704                         /*
3705                          * lnet_find_peer_ni_locked() grabs a refcount for
3706                          * us. No need to take it explicitly.
3707                          */
3708                         lpni = lnet_find_peer_ni_locked(nid);
3709                         if (!lpni) {
3710                                 lnet_net_unlock(0);
3711                                 LNetMDUnlink(mdh);
3712                                 continue;
3713                         }
3714
3715                         lpni->lpni_ping_count++;
3716
3717                         lpni->lpni_recovery_ping_mdh = mdh;
3718
3719                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
3720                                                              &processed_list,
3721                                                              now);
3722                         if (rc) {
3723                                 spin_lock(&lpni->lpni_lock);
3724                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3725                                 spin_unlock(&lpni->lpni_lock);
3726                         }
3727
3728                         /* Drop the ref taken by lnet_find_peer_ni_locked() */
3729                         lnet_peer_ni_decref_locked(lpni);
3730                         lnet_net_unlock(0);
3731                 } else
3732                         spin_unlock(&lpni->lpni_lock);
3733         }
3734
3735         list_splice_init(&processed_list, &local_queue);
3736         lnet_net_lock(0);
3737         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3738         lnet_net_unlock(0);
3739 }
3740
3741 static int
3742 lnet_monitor_thread(void *arg)
3743 {
3744         time64_t rsp_timeout = 0;
3745         time64_t now;
3746
3747         wait_for_completion(&the_lnet.ln_started);
3748         /*
3749          * The monitor thread takes care of the following:
3750          *  1. Checks the aliveness of routers
3751          *  2. Checks if there are messages on the resend queue to resend
3752          *     them.
3753          *  3. Check if there are any NIs on the local recovery queue and
3754          *     pings them
3755          *  4. Checks if there are any NIs on the remote recovery queue
3756          *     and pings them.
3757          */
3758         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
3759                 now = ktime_get_real_seconds();
3760
3761                 if (lnet_router_checker_active())
3762                         lnet_check_routers();
3763
3764                 lnet_resend_pending_msgs();
3765
3766                 if (now >= rsp_timeout) {
3767                         lnet_finalize_expired_responses();
3768                         rsp_timeout = now + (lnet_transaction_timeout / 2);
3769                 }
3770
3771                 lnet_recover_local_nis();
3772                 lnet_recover_peer_nis();
3773
3774                 /*
3775                  * TODO do we need to check if we should sleep without
3776                  * timeout?  Technically, an active system will always
3777                  * have messages in flight so this check will always
3778                  * evaluate to false. And on an idle system do we care
3779                  * if we wake up every 1 second? Although, we've seen
3780                  * cases where we get a complaint that an idle thread
3781                  * is waking up unnecessarily.
3782                  */
3783                 wait_for_completion_interruptible_timeout(
3784                         &the_lnet.ln_mt_wait_complete,
3785                         cfs_time_seconds(1));
3786                 /* Must re-init the completion before testing anything,
3787                  * including ln_mt_state.
3788                  */
3789                 reinit_completion(&the_lnet.ln_mt_wait_complete);
3790         }
3791
3792         /* Shutting down */
3793         lnet_net_lock(LNET_LOCK_EX);
3794         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3795         lnet_net_unlock(LNET_LOCK_EX);
3796
3797         /* signal that the monitor thread is exiting */
3798         up(&the_lnet.ln_mt_signal);
3799
3800         return 0;
3801 }
3802
3803 /*
3804  * lnet_send_ping
3805  * Sends a ping.
3806  * Returns == 0 if success
3807  * Returns > 0 if LNetMDBind or prior fails
3808  * Returns < 0 if LNetGet fails
3809  */
3810 int
3811 lnet_send_ping(lnet_nid_t dest_nid,
3812                struct lnet_handle_md *mdh, int nnis,
3813                void *user_data, lnet_handler_t handler, bool recovery)
3814 {
3815         struct lnet_md md = { NULL };
3816         struct lnet_process_id id;
3817         struct lnet_ping_buffer *pbuf;
3818         int rc;
3819
3820         if (dest_nid == LNET_NID_ANY) {
3821                 rc = -EHOSTUNREACH;
3822                 goto fail_error;
3823         }
3824
3825         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
3826         if (!pbuf) {
3827                 rc = ENOMEM;
3828                 goto fail_error;
3829         }
3830
3831         /* initialize md content */
3832         md.start     = &pbuf->pb_info;
3833         md.length    = LNET_PING_INFO_SIZE(nnis);
3834         md.threshold = 2; /* GET/REPLY */
3835         md.max_size  = 0;
3836         md.options   = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE;
3837         md.user_ptr  = user_data;
3838         md.handler   = handler;
3839
3840         rc = LNetMDBind(&md, LNET_UNLINK, mdh);
3841         if (rc) {
3842                 lnet_ping_buffer_decref(pbuf);
3843                 CERROR("Can't bind MD: %d\n", rc);
3844                 rc = -rc; /* change the rc to positive */
3845                 goto fail_error;
3846         }
3847         id.pid = LNET_PID_LUSTRE;
3848         id.nid = dest_nid;
3849
3850         rc = LNetGet(LNET_NID_ANY, *mdh, id,
3851                      LNET_RESERVED_PORTAL,
3852                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
3853
3854         if (rc)
3855                 goto fail_unlink_md;
3856
3857         return 0;
3858
3859 fail_unlink_md:
3860         LNetMDUnlink(*mdh);
3861         LNetInvalidateMDHandle(mdh);
3862 fail_error:
3863         return rc;
3864 }
3865
3866 static void
3867 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
3868                            int status, bool send, bool unlink_event)
3869 {
3870         lnet_nid_t nid = ev_info->mt_nid;
3871
3872         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
3873                 struct lnet_ni *ni;
3874
3875                 lnet_net_lock(0);
3876                 ni = lnet_nid2ni_locked(nid, 0);
3877                 if (!ni) {
3878                         lnet_net_unlock(0);
3879                         return;
3880                 }
3881                 lnet_ni_lock(ni);
3882                 if (!send || (send && status != 0))
3883                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3884                 if (status)
3885                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
3886                 lnet_ni_unlock(ni);
3887                 lnet_net_unlock(0);
3888
3889                 if (status != 0) {
3890                         CERROR("local NI (%s) recovery failed with %d\n",
3891                                libcfs_nid2str(nid), status);
3892                         return;
3893                 }
3894                 /*
3895                  * need to increment healthv for the ni here, because in
3896                  * the lnet_finalize() path we don't have access to this
3897                  * NI. And in order to get access to it, we'll need to
3898                  * carry forward too much information.
3899                  * In the peer case, it'll naturally be incremented
3900                  */
3901                 if (!unlink_event)
3902                         lnet_inc_healthv(&ni->ni_healthv,
3903                                          lnet_health_sensitivity);
3904         } else {
3905                 struct lnet_peer_ni *lpni;
3906                 int cpt;
3907
3908                 cpt = lnet_net_lock_current();
3909                 lpni = lnet_find_peer_ni_locked(nid);
3910                 if (!lpni) {
3911                         lnet_net_unlock(cpt);
3912                         return;
3913                 }
3914                 spin_lock(&lpni->lpni_lock);
3915                 if (!send || (send && status != 0))
3916                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3917                 if (status)
3918                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
3919                 spin_unlock(&lpni->lpni_lock);
3920                 lnet_peer_ni_decref_locked(lpni);
3921                 lnet_net_unlock(cpt);
3922
3923                 if (status != 0)
3924                         CERROR("peer NI (%s) recovery failed with %d\n",
3925                                libcfs_nid2str(nid), status);
3926         }
3927 }
3928
3929 void
3930 lnet_mt_event_handler(struct lnet_event *event)
3931 {
3932         struct lnet_mt_event_info *ev_info = event->md_user_ptr;
3933         struct lnet_ping_buffer *pbuf;
3934
3935         /* TODO: remove assert */
3936         LASSERT(event->type == LNET_EVENT_REPLY ||
3937                 event->type == LNET_EVENT_SEND ||
3938                 event->type == LNET_EVENT_UNLINK);
3939
3940         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
3941                event->status);
3942
3943         switch (event->type) {
3944         case LNET_EVENT_UNLINK:
3945                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
3946                        libcfs_nid2str(ev_info->mt_nid));
3947                 /* fallthrough */
3948         case LNET_EVENT_REPLY:
3949                 lnet_handle_recovery_reply(ev_info, event->status, false,
3950                                            event->type == LNET_EVENT_UNLINK);
3951                 break;
3952         case LNET_EVENT_SEND:
3953                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
3954                                libcfs_nid2str(ev_info->mt_nid),
3955                                (event->status) ? "unsuccessfully" :
3956                                "successfully", event->status);
3957                 lnet_handle_recovery_reply(ev_info, event->status, true, false);
3958                 break;
3959         default:
3960                 CERROR("Unexpected event: %d\n", event->type);
3961                 break;
3962         }
3963         if (event->unlinked) {
3964                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
3965                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
3966                 lnet_ping_buffer_decref(pbuf);
3967         }
3968 }
3969
3970 static int
3971 lnet_rsp_tracker_create(void)
3972 {
3973         struct list_head **rstqs;
3974         rstqs = lnet_create_array_of_queues();
3975
3976         if (!rstqs)
3977                 return -ENOMEM;
3978
3979         the_lnet.ln_mt_rstq = rstqs;
3980
3981         return 0;
3982 }
3983
3984 static void
3985 lnet_rsp_tracker_clean(void)
3986 {
3987         lnet_finalize_expired_responses();
3988
3989         cfs_percpt_free(the_lnet.ln_mt_rstq);
3990         the_lnet.ln_mt_rstq = NULL;
3991 }
3992
3993 int lnet_monitor_thr_start(void)
3994 {
3995         int rc = 0;
3996         struct task_struct *task;
3997
3998         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
3999                 return -EALREADY;
4000
4001         rc = lnet_resendqs_create();
4002         if (rc)
4003                 return rc;
4004
4005         rc = lnet_rsp_tracker_create();
4006         if (rc)
4007                 goto clean_queues;
4008
4009         sema_init(&the_lnet.ln_mt_signal, 0);
4010
4011         lnet_net_lock(LNET_LOCK_EX);
4012         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
4013         lnet_net_unlock(LNET_LOCK_EX);
4014         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
4015         if (IS_ERR(task)) {
4016                 rc = PTR_ERR(task);
4017                 CERROR("Can't start monitor thread: %d\n", rc);
4018                 goto clean_thread;
4019         }
4020
4021         return 0;
4022
4023 clean_thread:
4024         lnet_net_lock(LNET_LOCK_EX);
4025         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4026         lnet_net_unlock(LNET_LOCK_EX);
4027         /* block until event callback signals exit */
4028         down(&the_lnet.ln_mt_signal);
4029         /* clean up */
4030         lnet_net_lock(LNET_LOCK_EX);
4031         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
4032         lnet_net_unlock(LNET_LOCK_EX);
4033         lnet_rsp_tracker_clean();
4034         lnet_clean_local_ni_recoveryq();
4035         lnet_clean_peer_ni_recoveryq();
4036         lnet_clean_resendqs();
4037         the_lnet.ln_mt_handler = NULL;
4038         return rc;
4039 clean_queues:
4040         lnet_rsp_tracker_clean();
4041         lnet_clean_local_ni_recoveryq();
4042         lnet_clean_peer_ni_recoveryq();
4043         lnet_clean_resendqs();
4044         return rc;
4045 }
4046
4047 void lnet_monitor_thr_stop(void)
4048 {
4049         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
4050                 return;
4051
4052         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
4053         lnet_net_lock(LNET_LOCK_EX);
4054         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4055         lnet_net_unlock(LNET_LOCK_EX);
4056
4057         /* tell the monitor thread that we're shutting down */
4058         complete(&the_lnet.ln_mt_wait_complete);
4059
4060         /* block until monitor thread signals that it's done */
4061         down(&the_lnet.ln_mt_signal);
4062         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
4063
4064         /* perform cleanup tasks */
4065         lnet_rsp_tracker_clean();
4066         lnet_clean_local_ni_recoveryq();
4067         lnet_clean_peer_ni_recoveryq();
4068         lnet_clean_resendqs();
4069 }
4070
4071 void
4072 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
4073                   __u32 msg_type)
4074 {
4075         lnet_net_lock(cpt);
4076         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
4077         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4078         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
4079         lnet_net_unlock(cpt);
4080
4081         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
4082 }
4083
4084 static void
4085 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
4086 {
4087         struct lnet_hdr *hdr = &msg->msg_hdr;
4088
4089         if (msg->msg_wanted != 0)
4090                 lnet_setpayloadbuffer(msg);
4091
4092         lnet_build_msg_event(msg, LNET_EVENT_PUT);
4093
4094         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
4095          * it back into the ACK during lnet_finalize() */
4096         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
4097                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
4098
4099         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
4100                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
4101 }
4102
4103 static int
4104 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
4105 {
4106         struct lnet_hdr         *hdr = &msg->msg_hdr;
4107         struct lnet_match_info  info;
4108         int                     rc;
4109         bool                    ready_delay;
4110
4111         /* Convert put fields to host byte order */
4112         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
4113         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
4114         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
4115
4116         /* Primary peer NID. */
4117         info.mi_id.nid  = msg->msg_initiator;
4118         info.mi_id.pid  = hdr->src_pid;
4119         info.mi_opc     = LNET_MD_OP_PUT;
4120         info.mi_portal  = hdr->msg.put.ptl_index;
4121         info.mi_rlength = hdr->payload_length;
4122         info.mi_roffset = hdr->msg.put.offset;
4123         info.mi_mbits   = hdr->msg.put.match_bits;
4124         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
4125
4126         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
4127         ready_delay = msg->msg_rx_ready_delay;
4128
4129  again:
4130         rc = lnet_ptl_match_md(&info, msg);
4131         switch (rc) {
4132         default:
4133                 LBUG();
4134
4135         case LNET_MATCHMD_OK:
4136                 lnet_recv_put(ni, msg);
4137                 return 0;
4138
4139         case LNET_MATCHMD_NONE:
4140                 if (ready_delay)
4141                         /* no eager_recv or has already called it, should
4142                          * have been attached on delayed list */
4143                         return 0;
4144
4145                 rc = lnet_ni_eager_recv(ni, msg);
4146                 if (rc == 0) {
4147                         ready_delay = true;
4148                         goto again;
4149                 }
4150                 /* fall through */
4151
4152         case LNET_MATCHMD_DROP:
4153                 CNETERR("Dropping PUT from %s portal %d match %llu"
4154                         " offset %d length %d: %d\n",
4155                         libcfs_id2str(info.mi_id), info.mi_portal,
4156                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
4157
4158                 return -ENOENT; /* -ve: OK but no match */
4159         }
4160 }
4161
4162 static int
4163 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
4164 {
4165         struct lnet_match_info info;
4166         struct lnet_hdr *hdr = &msg->msg_hdr;
4167         struct lnet_process_id source_id;
4168         struct lnet_handle_wire reply_wmd;
4169         int rc;
4170
4171         /* Convert get fields to host byte order */
4172         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
4173         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
4174         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
4175         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
4176
4177         source_id.nid = hdr->src_nid;
4178         source_id.pid = hdr->src_pid;
4179         /* Primary peer NID */
4180         info.mi_id.nid  = msg->msg_initiator;
4181         info.mi_id.pid  = hdr->src_pid;
4182         info.mi_opc     = LNET_MD_OP_GET;
4183         info.mi_portal  = hdr->msg.get.ptl_index;
4184         info.mi_rlength = hdr->msg.get.sink_length;
4185         info.mi_roffset = hdr->msg.get.src_offset;
4186         info.mi_mbits   = hdr->msg.get.match_bits;
4187         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
4188
4189         rc = lnet_ptl_match_md(&info, msg);
4190         if (rc == LNET_MATCHMD_DROP) {
4191                 CNETERR("Dropping GET from %s portal %d match %llu"
4192                         " offset %d length %d\n",
4193                         libcfs_id2str(info.mi_id), info.mi_portal,
4194                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
4195                 return -ENOENT; /* -ve: OK but no match */
4196         }
4197
4198         LASSERT(rc == LNET_MATCHMD_OK);
4199
4200         lnet_build_msg_event(msg, LNET_EVENT_GET);
4201
4202         reply_wmd = hdr->msg.get.return_wmd;
4203
4204         lnet_prep_send(msg, LNET_MSG_REPLY, source_id,
4205                        msg->msg_offset, msg->msg_wanted);
4206
4207         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
4208
4209         if (rdma_get) {
4210                 /* The LND completes the REPLY from her recv procedure */
4211                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
4212                              msg->msg_offset, msg->msg_len, msg->msg_len);
4213                 return 0;
4214         }
4215
4216         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
4217         msg->msg_receiving = 0;
4218
4219         rc = lnet_send(ni->ni_nid, msg, msg->msg_from);
4220         if (rc < 0) {
4221                 /* didn't get as far as lnet_ni_send() */
4222                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
4223                        libcfs_nid2str(ni->ni_nid),
4224                        libcfs_id2str(info.mi_id), rc);
4225
4226                 lnet_finalize(msg, rc);
4227         }
4228
4229         return 0;
4230 }
4231
4232 static int
4233 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
4234 {
4235         void *private = msg->msg_private;
4236         struct lnet_hdr *hdr = &msg->msg_hdr;
4237         struct lnet_process_id src = {0};
4238         struct lnet_libmd *md;
4239         unsigned int rlength;
4240         unsigned int mlength;
4241         int cpt;
4242
4243         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4244         lnet_res_lock(cpt);
4245
4246         src.nid = hdr->src_nid;
4247         src.pid = hdr->src_pid;
4248
4249         /* NB handles only looked up by creator (no flips) */
4250         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4251         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4252                 CNETERR("%s: Dropping REPLY from %s for %s "
4253                         "MD %#llx.%#llx\n",
4254                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4255                         (md == NULL) ? "invalid" : "inactive",
4256                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4257                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4258                 if (md != NULL && md->md_me != NULL)
4259                         CERROR("REPLY MD also attached to portal %d\n",
4260                                md->md_me->me_portal);
4261
4262                 lnet_res_unlock(cpt);
4263                 return -ENOENT; /* -ve: OK but no match */
4264         }
4265
4266         LASSERT(md->md_offset == 0);
4267
4268         rlength = hdr->payload_length;
4269         mlength = min(rlength, md->md_length);
4270
4271         if (mlength < rlength &&
4272             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4273                 CNETERR("%s: Dropping REPLY from %s length %d "
4274                         "for MD %#llx would overflow (%d)\n",
4275                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4276                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4277                         mlength);
4278                 lnet_res_unlock(cpt);
4279                 return -ENOENT; /* -ve: OK but no match */
4280         }
4281
4282         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4283                libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4284                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4285
4286         lnet_msg_attach_md(msg, md, 0, mlength);
4287
4288         if (mlength != 0)
4289                 lnet_setpayloadbuffer(msg);
4290
4291         lnet_res_unlock(cpt);
4292
4293         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4294
4295         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4296         return 0;
4297 }
4298
4299 static int
4300 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4301 {
4302         struct lnet_hdr *hdr = &msg->msg_hdr;
4303         struct lnet_process_id src = {0};
4304         struct lnet_libmd *md;
4305         int cpt;
4306
4307         src.nid = hdr->src_nid;
4308         src.pid = hdr->src_pid;
4309
4310         /* Convert ack fields to host byte order */
4311         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4312         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4313
4314         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4315         lnet_res_lock(cpt);
4316
4317         /* NB handles only looked up by creator (no flips) */
4318         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4319         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4320                 /* Don't moan; this is expected */
4321                 CDEBUG(D_NET,
4322                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4323                        libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4324                        (md == NULL) ? "invalid" : "inactive",
4325                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4326                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4327                 if (md != NULL && md->md_me != NULL)
4328                         CERROR("Source MD also attached to portal %d\n",
4329                                md->md_me->me_portal);
4330
4331                 lnet_res_unlock(cpt);
4332                 return -ENOENT;                  /* -ve! */
4333         }
4334
4335         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4336                libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
4337                hdr->msg.ack.dst_wmd.wh_object_cookie);
4338
4339         lnet_msg_attach_md(msg, md, 0, 0);
4340
4341         lnet_res_unlock(cpt);
4342
4343         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4344
4345         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4346         return 0;
4347 }
4348
4349 /**
4350  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4351  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4352  * \retval -ve                  error code
4353  */
4354 int
4355 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4356 {
4357         int     rc = 0;
4358
4359         if (!the_lnet.ln_routing)
4360                 return -ECANCELED;
4361
4362         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4363             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4364                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4365                         msg->msg_rx_ready_delay = 1;
4366                 } else {
4367                         lnet_net_unlock(msg->msg_rx_cpt);
4368                         rc = lnet_ni_eager_recv(ni, msg);
4369                         lnet_net_lock(msg->msg_rx_cpt);
4370                 }
4371         }
4372
4373         if (rc == 0)
4374                 rc = lnet_post_routed_recv_locked(msg, 0);
4375         return rc;
4376 }
4377
4378 int
4379 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4380 {
4381         int     rc;
4382
4383         switch (msg->msg_type) {
4384         case LNET_MSG_ACK:
4385                 rc = lnet_parse_ack(ni, msg);
4386                 break;
4387         case LNET_MSG_PUT:
4388                 rc = lnet_parse_put(ni, msg);
4389                 break;
4390         case LNET_MSG_GET:
4391                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4392                 break;
4393         case LNET_MSG_REPLY:
4394                 rc = lnet_parse_reply(ni, msg);
4395                 break;
4396         default: /* prevent an unused label if !kernel */
4397                 LASSERT(0);
4398                 return -EPROTO;
4399         }
4400
4401         LASSERT(rc == 0 || rc == -ENOENT);
4402         return rc;
4403 }
4404
4405 char *
4406 lnet_msgtyp2str (int type)
4407 {
4408         switch (type) {
4409         case LNET_MSG_ACK:
4410                 return ("ACK");
4411         case LNET_MSG_PUT:
4412                 return ("PUT");
4413         case LNET_MSG_GET:
4414                 return ("GET");
4415         case LNET_MSG_REPLY:
4416                 return ("REPLY");
4417         case LNET_MSG_HELLO:
4418                 return ("HELLO");
4419         default:
4420                 return ("<UNKNOWN>");
4421         }
4422 }
4423
4424 int
4425 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
4426            void *private, int rdma_req)
4427 {
4428         struct lnet_peer_ni *lpni;
4429         struct lnet_msg *msg;
4430         __u32 payload_length;
4431         lnet_pid_t dest_pid;
4432         lnet_nid_t dest_nid;
4433         lnet_nid_t src_nid;
4434         bool push = false;
4435         int for_me;
4436         __u32 type;
4437         int rc = 0;
4438         int cpt;
4439
4440         LASSERT (!in_interrupt ());
4441
4442         type = le32_to_cpu(hdr->type);
4443         src_nid = le64_to_cpu(hdr->src_nid);
4444         dest_nid = le64_to_cpu(hdr->dest_nid);
4445         dest_pid = le32_to_cpu(hdr->dest_pid);
4446         payload_length = le32_to_cpu(hdr->payload_length);
4447
4448         for_me = (ni->ni_nid == dest_nid);
4449         cpt = lnet_cpt_of_nid(from_nid, ni);
4450
4451         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4452                 libcfs_nid2str(dest_nid),
4453                 libcfs_nid2str(ni->ni_nid),
4454                 libcfs_nid2str(src_nid),
4455                 lnet_msgtyp2str(type),
4456                 (for_me) ? "for me" : "routed");
4457
4458         switch (type) {
4459         case LNET_MSG_ACK:
4460         case LNET_MSG_GET:
4461                 if (payload_length > 0) {
4462                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4463                                libcfs_nid2str(from_nid),
4464                                libcfs_nid2str(src_nid),
4465                                lnet_msgtyp2str(type), payload_length);
4466                         return -EPROTO;
4467                 }
4468                 break;
4469
4470         case LNET_MSG_PUT:
4471         case LNET_MSG_REPLY:
4472                 if (payload_length >
4473                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4474                         CERROR("%s, src %s: bad %s payload %d "
4475                                "(%d max expected)\n",
4476                                libcfs_nid2str(from_nid),
4477                                libcfs_nid2str(src_nid),
4478                                lnet_msgtyp2str(type),
4479                                payload_length,
4480                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4481                         return -EPROTO;
4482                 }
4483                 break;
4484
4485         default:
4486                 CERROR("%s, src %s: Bad message type 0x%x\n",
4487                        libcfs_nid2str(from_nid),
4488                        libcfs_nid2str(src_nid), type);
4489                 return -EPROTO;
4490         }
4491
4492         if (the_lnet.ln_routing &&
4493             ni->ni_net->net_last_alive != ktime_get_real_seconds()) {
4494                 lnet_ni_lock(ni);
4495                 spin_lock(&ni->ni_net->net_lock);
4496                 ni->ni_net->net_last_alive = ktime_get_real_seconds();
4497                 spin_unlock(&ni->ni_net->net_lock);
4498                 push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP);
4499                 lnet_ni_unlock(ni);
4500         }
4501
4502         if (push)
4503                 lnet_push_update_to_peers(1);
4504
4505         /* Regard a bad destination NID as a protocol error.  Senders should
4506          * know what they're doing; if they don't they're misconfigured, buggy
4507          * or malicious so we chop them off at the knees :) */
4508
4509         if (!for_me) {
4510                 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
4511                         /* should have gone direct */
4512                         CERROR("%s, src %s: Bad dest nid %s "
4513                                "(should have been sent direct)\n",
4514                                 libcfs_nid2str(from_nid),
4515                                 libcfs_nid2str(src_nid),
4516                                 libcfs_nid2str(dest_nid));
4517                         return -EPROTO;
4518                 }
4519
4520                 if (lnet_islocalnid(dest_nid)) {
4521                         /* dest is another local NI; sender should have used
4522                          * this node's NID on its own network */
4523                         CERROR("%s, src %s: Bad dest nid %s "
4524                                "(it's my nid but on a different network)\n",
4525                                 libcfs_nid2str(from_nid),
4526                                 libcfs_nid2str(src_nid),
4527                                 libcfs_nid2str(dest_nid));
4528                         return -EPROTO;
4529                 }
4530
4531                 if (rdma_req && type == LNET_MSG_GET) {
4532                         CERROR("%s, src %s: Bad optimized GET for %s "
4533                                "(final destination must be me)\n",
4534                                 libcfs_nid2str(from_nid),
4535                                 libcfs_nid2str(src_nid),
4536                                 libcfs_nid2str(dest_nid));
4537                         return -EPROTO;
4538                 }
4539
4540                 if (!the_lnet.ln_routing) {
4541                         CERROR("%s, src %s: Dropping message for %s "
4542                                "(routing not enabled)\n",
4543                                 libcfs_nid2str(from_nid),
4544                                 libcfs_nid2str(src_nid),
4545                                 libcfs_nid2str(dest_nid));
4546                         goto drop;
4547                 }
4548         }
4549
4550         /* Message looks OK; we're not going to return an error, so we MUST
4551          * call back lnd_recv() come what may... */
4552
4553         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4554             fail_peer(src_nid, 0)) {                    /* shall we now? */
4555                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4556                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4557                        lnet_msgtyp2str(type));
4558                 goto drop;
4559         }
4560
4561         if (!list_empty(&the_lnet.ln_drop_rules) &&
4562             lnet_drop_rule_match(hdr, ni->ni_nid, NULL)) {
4563                 CDEBUG(D_NET,
4564                        "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
4565                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4566                        libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
4567                 goto drop;
4568         }
4569
4570         msg = lnet_msg_alloc();
4571         if (msg == NULL) {
4572                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4573                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4574                        lnet_msgtyp2str(type));
4575                 goto drop;
4576         }
4577
4578         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4579          * pointers NULL etc */
4580
4581         msg->msg_type = type;
4582         msg->msg_private = private;
4583         msg->msg_receiving = 1;
4584         msg->msg_rdma_get = rdma_req;
4585         msg->msg_len = msg->msg_wanted = payload_length;
4586         msg->msg_offset = 0;
4587         msg->msg_hdr = *hdr;
4588         /* for building message event */
4589         msg->msg_from = from_nid;
4590         if (!for_me) {
4591                 msg->msg_target.pid     = dest_pid;
4592                 msg->msg_target.nid     = dest_nid;
4593                 msg->msg_routing        = 1;
4594
4595         } else {
4596                 /* convert common msg->hdr fields to host byteorder */
4597                 msg->msg_hdr.type       = type;
4598                 msg->msg_hdr.src_nid    = src_nid;
4599                 msg->msg_hdr.src_pid    = le32_to_cpu(msg->msg_hdr.src_pid);
4600                 msg->msg_hdr.dest_nid   = dest_nid;
4601                 msg->msg_hdr.dest_pid   = dest_pid;
4602                 msg->msg_hdr.payload_length = payload_length;
4603         }
4604
4605         lnet_net_lock(cpt);
4606         lpni = lnet_nid2peerni_locked(from_nid, ni->ni_nid, cpt);
4607         if (IS_ERR(lpni)) {
4608                 lnet_net_unlock(cpt);
4609                 CERROR("%s, src %s: Dropping %s "
4610                        "(error %ld looking up sender)\n",
4611                        libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
4612                        lnet_msgtyp2str(type), PTR_ERR(lpni));
4613                 lnet_msg_free(msg);
4614                 if (rc == -ESHUTDOWN)
4615                         /* We are shutting down.  Don't do anything more */
4616                         return 0;
4617                 goto drop;
4618         }
4619
4620         /* If this message was forwarded to us from a router then we may need
4621          * to update router aliveness or check for an asymmetrical route
4622          * (or both)
4623          */
4624         if (((lnet_drop_asym_route && for_me) ||
4625              !lpni->lpni_peer_net->lpn_peer->lp_alive) &&
4626             LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
4627                 __u32 src_net_id = LNET_NIDNET(src_nid);
4628                 struct lnet_peer *gw = lpni->lpni_peer_net->lpn_peer;
4629                 struct lnet_route *route;
4630                 bool found = false;
4631
4632                 list_for_each_entry(route, &gw->lp_routes, lr_gwlist) {
4633                         if (route->lr_net == src_net_id) {
4634                                 found = true;
4635                                 /* If we're transitioning the gateway from
4636                                  * dead -> alive, and discovery is disabled
4637                                  * locally or on the gateway, then we need to
4638                                  * update the cached route aliveness for each
4639                                  * route to the src_nid's net.
4640                                  *
4641                                  * Otherwise, we're only checking for
4642                                  * symmetrical route, and we can break the
4643                                  * loop
4644                                  */
4645                                 if (!gw->lp_alive &&
4646                                     lnet_is_discovery_disabled(gw))
4647                                         lnet_set_route_aliveness(route, true);
4648                                 else
4649                                         break;
4650                         }
4651                 }
4652                 if (lnet_drop_asym_route && for_me && !found) {
4653                         lnet_net_unlock(cpt);
4654                         /* we would not use from_nid to route a message to
4655                          * src_nid
4656                          * => asymmetric routing detected but forbidden
4657                          */
4658                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4659                                libcfs_nid2str(from_nid),
4660                                libcfs_nid2str(src_nid), lnet_msgtyp2str(type));
4661                         lnet_msg_free(msg);
4662                         goto drop;
4663                 }
4664                 if (!gw->lp_alive) {
4665                         struct lnet_peer_net *lpn;
4666                         struct lnet_peer_ni *lpni2;
4667
4668                         gw->lp_alive = true;
4669                         /* Mark all remote NIs on src_nid's net UP */
4670                         lpn = lnet_peer_get_net_locked(gw, src_net_id);
4671                         if (lpn)
4672                                 list_for_each_entry(lpni2, &lpn->lpn_peer_nis,
4673                                                     lpni_peer_nis)
4674                                         lpni2->lpni_ns_status = LNET_NI_STATUS_UP;
4675                 }
4676         }
4677
4678         lpni->lpni_last_alive = ktime_get_seconds();
4679
4680         msg->msg_rxpeer = lpni;
4681         msg->msg_rxni = ni;
4682         lnet_ni_addref_locked(ni, cpt);
4683         /* Multi-Rail: Primary NID of source. */
4684         msg->msg_initiator = lnet_peer_primary_nid_locked(src_nid);
4685
4686         /*
4687          * mark the status of this lpni as UP since we received a message
4688          * from it. The ping response reports back the ns_status which is
4689          * marked on the remote as up or down and we cache it here.
4690          */
4691         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
4692
4693         lnet_msg_commit(msg, cpt);
4694
4695         /* message delay simulation */
4696         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
4697                      lnet_delay_rule_match_locked(hdr, msg))) {
4698                 lnet_net_unlock(cpt);
4699                 return 0;
4700         }
4701
4702         if (!for_me) {
4703                 rc = lnet_parse_forward_locked(ni, msg);
4704                 lnet_net_unlock(cpt);
4705
4706                 if (rc < 0)
4707                         goto free_drop;
4708
4709                 if (rc == LNET_CREDIT_OK) {
4710                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
4711                                      0, payload_length, payload_length);
4712                 }
4713                 return 0;
4714         }
4715
4716         lnet_net_unlock(cpt);
4717
4718         rc = lnet_parse_local(ni, msg);
4719         if (rc != 0)
4720                 goto free_drop;
4721         return 0;
4722
4723  free_drop:
4724         LASSERT(msg->msg_md == NULL);
4725         lnet_finalize(msg, rc);
4726
4727  drop:
4728         lnet_drop_message(ni, cpt, private, payload_length, type);
4729         return 0;
4730 }
4731 EXPORT_SYMBOL(lnet_parse);
4732
4733 void
4734 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
4735 {
4736         while (!list_empty(head)) {
4737                 struct lnet_process_id id = {0};
4738                 struct lnet_msg *msg;
4739
4740                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4741                 list_del(&msg->msg_list);
4742
4743                 id.nid = msg->msg_hdr.src_nid;
4744                 id.pid = msg->msg_hdr.src_pid;
4745
4746                 LASSERT(msg->msg_md == NULL);
4747                 LASSERT(msg->msg_rx_delayed);
4748                 LASSERT(msg->msg_rxpeer != NULL);
4749                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4750
4751                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
4752                       " offset %d length %d: %s\n",
4753                       libcfs_id2str(id),
4754                       msg->msg_hdr.msg.put.ptl_index,
4755                       msg->msg_hdr.msg.put.match_bits,
4756                       msg->msg_hdr.msg.put.offset,
4757                       msg->msg_hdr.payload_length, reason);
4758
4759                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
4760                  * called lnet_drop_message(), so I just hang onto msg as well
4761                  * until that's done */
4762
4763                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
4764                                   msg->msg_private, msg->msg_len,
4765                                   msg->msg_type);
4766
4767                 msg->msg_no_resend = true;
4768                 /*
4769                  * NB: message will not generate event because w/o attached MD,
4770                  * but we still should give error code so lnet_msg_decommit()
4771                  * can skip counters operations and other checks.
4772                  */
4773                 lnet_finalize(msg, -ENOENT);
4774         }
4775 }
4776
4777 void
4778 lnet_recv_delayed_msg_list(struct list_head *head)
4779 {
4780         while (!list_empty(head)) {
4781                 struct lnet_msg *msg;
4782                 struct lnet_process_id id;
4783
4784                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4785                 list_del(&msg->msg_list);
4786
4787                 /* md won't disappear under me, since each msg
4788                  * holds a ref on it */
4789
4790                 id.nid = msg->msg_hdr.src_nid;
4791                 id.pid = msg->msg_hdr.src_pid;
4792
4793                 LASSERT(msg->msg_rx_delayed);
4794                 LASSERT(msg->msg_md != NULL);
4795                 LASSERT(msg->msg_rxpeer != NULL);
4796                 LASSERT(msg->msg_rxni != NULL);
4797                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4798
4799                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
4800                        "match %llu offset %d length %d.\n",
4801                         libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
4802                         msg->msg_hdr.msg.put.match_bits,
4803                         msg->msg_hdr.msg.put.offset,
4804                         msg->msg_hdr.payload_length);
4805
4806                 lnet_recv_put(msg->msg_rxni, msg);
4807         }
4808 }
4809
4810 static void
4811 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
4812                         struct lnet_libmd *md, struct lnet_handle_md mdh)
4813 {
4814         s64 timeout_ns;
4815         struct lnet_rsp_tracker *local_rspt;
4816
4817         /*
4818          * MD has a refcount taken by message so it's not going away.
4819          * The MD however can be looked up. We need to secure the access
4820          * to the md_rspt_ptr by taking the res_lock.
4821          * The rspt can be accessed without protection up to when it gets
4822          * added to the list.
4823          */
4824
4825         lnet_res_lock(cpt);
4826         local_rspt = md->md_rspt_ptr;
4827         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
4828         if (local_rspt != NULL) {
4829                 /*
4830                  * we already have an rspt attached to the md, so we'll
4831                  * update the deadline on that one.
4832                  */
4833                 lnet_rspt_free(rspt, cpt);
4834         } else {
4835                 /* new md */
4836                 rspt->rspt_mdh = mdh;
4837                 rspt->rspt_cpt = cpt;
4838                 /* store the rspt so we can access it when we get the REPLY */
4839                 md->md_rspt_ptr = rspt;
4840                 local_rspt = rspt;
4841         }
4842         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
4843
4844         /*
4845          * add to the list of tracked responses. It's added to tail of the
4846          * list in order to expire all the older entries first.
4847          */
4848         lnet_net_lock(cpt);
4849         list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
4850         lnet_net_unlock(cpt);
4851         lnet_res_unlock(cpt);
4852 }
4853
4854 /**
4855  * Initiate an asynchronous PUT operation.
4856  *
4857  * There are several events associated with a PUT: completion of the send on
4858  * the initiator node (LNET_EVENT_SEND), and when the send completes
4859  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
4860  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
4861  * used at the target node to indicate the completion of incoming data
4862  * delivery.
4863  *
4864  * The local events will be logged in the EQ associated with the MD pointed to
4865  * by \a mdh handle. Using a MD without an associated EQ results in these
4866  * events being discarded. In this case, the caller must have another
4867  * mechanism (e.g., a higher level protocol) for determining when it is safe
4868  * to modify the memory region associated with the MD.
4869  *
4870  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
4871  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
4872  *
4873  * \param self Indicates the NID of a local interface through which to send
4874  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
4875  * \param mdh A handle for the MD that describes the memory to be sent. The MD
4876  * must be "free floating" (See LNetMDBind()).
4877  * \param ack Controls whether an acknowledgment is requested.
4878  * Acknowledgments are only sent when they are requested by the initiating
4879  * process and the target MD enables them.
4880  * \param target A process identifier for the target process.
4881  * \param portal The index in the \a target's portal table.
4882  * \param match_bits The match bits to use for MD selection at the target
4883  * process.
4884  * \param offset The offset into the target MD (only used when the target
4885  * MD has the LNET_MD_MANAGE_REMOTE option set).
4886  * \param hdr_data 64 bits of user data that can be included in the message
4887  * header. This data is written to an event queue entry at the target if an
4888  * EQ is present on the matching MD.
4889  *
4890  * \retval  0      Success, and only in this case events will be generated
4891  * and logged to EQ (if it exists).
4892  * \retval -EIO    Simulated failure.
4893  * \retval -ENOMEM Memory allocation failure.
4894  * \retval -ENOENT Invalid MD object.
4895  *
4896  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
4897  */
4898 int
4899 LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
4900         struct lnet_process_id target, unsigned int portal,
4901         __u64 match_bits, unsigned int offset,
4902         __u64 hdr_data)
4903 {
4904         struct lnet_msg *msg;
4905         struct lnet_libmd *md;
4906         int cpt;
4907         int rc;
4908         struct lnet_rsp_tracker *rspt = NULL;
4909
4910         LASSERT(the_lnet.ln_refcount > 0);
4911
4912         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4913             fail_peer(target.nid, 1)) {                 /* shall we now? */
4914                 CERROR("Dropping PUT to %s: simulated failure\n",
4915                        libcfs_id2str(target));
4916                 return -EIO;
4917         }
4918
4919         msg = lnet_msg_alloc();
4920         if (msg == NULL) {
4921                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
4922                        libcfs_id2str(target));
4923                 return -ENOMEM;
4924         }
4925         msg->msg_vmflush = !!(current->flags & PF_MEMALLOC);
4926
4927         cpt = lnet_cpt_of_cookie(mdh.cookie);
4928
4929         if (ack == LNET_ACK_REQ) {
4930                 rspt = lnet_rspt_alloc(cpt);
4931                 if (!rspt) {
4932                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
4933                                 libcfs_id2str(target));
4934                         return -ENOMEM;
4935                 }
4936                 INIT_LIST_HEAD(&rspt->rspt_on_list);
4937         }
4938
4939         lnet_res_lock(cpt);
4940
4941         md = lnet_handle2md(&mdh);
4942         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4943                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
4944                        match_bits, portal, libcfs_id2str(target),
4945                        md == NULL ? -1 : md->md_threshold);
4946                 if (md != NULL && md->md_me != NULL)
4947                         CERROR("Source MD also attached to portal %d\n",
4948                                md->md_me->me_portal);
4949                 lnet_res_unlock(cpt);
4950
4951                 if (rspt)
4952                         lnet_rspt_free(rspt, cpt);
4953
4954                 lnet_msg_free(msg);
4955                 return -ENOENT;
4956         }
4957
4958         CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
4959
4960         lnet_msg_attach_md(msg, md, 0, 0);
4961
4962         lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
4963
4964         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
4965         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
4966         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
4967         msg->msg_hdr.msg.put.hdr_data = hdr_data;
4968
4969         /* NB handles only looked up by creator (no flips) */
4970         if (ack == LNET_ACK_REQ) {
4971                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
4972                         the_lnet.ln_interface_cookie;
4973                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
4974                         md->md_lh.lh_cookie;
4975         } else {
4976                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
4977                         LNET_WIRE_HANDLE_COOKIE_NONE;
4978                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
4979                         LNET_WIRE_HANDLE_COOKIE_NONE;
4980         }
4981
4982         lnet_res_unlock(cpt);
4983
4984         lnet_build_msg_event(msg, LNET_EVENT_SEND);
4985
4986         if (rspt && lnet_response_tracking_enabled(LNET_MSG_PUT,
4987                                                    md->md_options))
4988                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
4989         else if (rspt)
4990                 lnet_rspt_free(rspt, cpt);
4991
4992         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
4993                                  CFS_FAIL_ONCE))
4994                 rc = -EIO;
4995         else
4996                 rc = lnet_send(self, msg, LNET_NID_ANY);
4997
4998         if (rc != 0) {
4999                 CNETERR("Error sending PUT to %s: %d\n",
5000                         libcfs_id2str(target), rc);
5001                 msg->msg_no_resend = true;
5002                 lnet_finalize(msg, rc);
5003         }
5004
5005         /* completion will be signalled by an event */
5006         return 0;
5007 }
5008 EXPORT_SYMBOL(LNetPut);
5009
5010 /*
5011  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
5012  * returns a msg for the LND to pass to lnet_finalize() when the sink
5013  * data has been received.
5014  *
5015  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
5016  * lnet_finalize() is called on it, so the LND must call this first
5017  */
5018 struct lnet_msg *
5019 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
5020 {
5021         struct lnet_msg *msg = lnet_msg_alloc();
5022         struct lnet_libmd *getmd = getmsg->msg_md;
5023         struct lnet_process_id peer_id = getmsg->msg_target;
5024         int cpt;
5025
5026         LASSERT(!getmsg->msg_target_is_router);
5027         LASSERT(!getmsg->msg_routing);
5028
5029         if (msg == NULL) {
5030                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
5031                        libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
5032                 goto drop;
5033         }
5034
5035         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
5036         lnet_res_lock(cpt);
5037
5038         LASSERT(getmd->md_refcount > 0);
5039
5040         if (getmd->md_threshold == 0) {
5041                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
5042                         libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
5043                         getmd);
5044                 lnet_res_unlock(cpt);
5045                 goto drop;
5046         }
5047
5048         LASSERT(getmd->md_offset == 0);
5049
5050         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
5051                libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
5052
5053         /* setup information for lnet_build_msg_event */
5054         msg->msg_initiator = getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
5055         msg->msg_from = peer_id.nid;
5056         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
5057         msg->msg_hdr.src_nid = peer_id.nid;
5058         msg->msg_hdr.payload_length = getmd->md_length;
5059         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
5060
5061         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
5062         lnet_res_unlock(cpt);
5063
5064         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
5065
5066         lnet_net_lock(cpt);
5067         lnet_msg_commit(msg, cpt);
5068         lnet_net_unlock(cpt);
5069
5070         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
5071
5072         return msg;
5073
5074  drop:
5075         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
5076
5077         lnet_net_lock(cpt);
5078         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
5079         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
5080         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
5081                 getmd->md_length;
5082         lnet_net_unlock(cpt);
5083
5084         if (msg != NULL)
5085                 lnet_msg_free(msg);
5086
5087         return NULL;
5088 }
5089 EXPORT_SYMBOL(lnet_create_reply_msg);
5090
5091 void
5092 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
5093                        unsigned int len)
5094 {
5095         /* Set the REPLY length, now the RDMA that elides the REPLY message has
5096          * completed and I know it. */
5097         LASSERT(reply != NULL);
5098         LASSERT(reply->msg_type == LNET_MSG_GET);
5099         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
5100
5101         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
5102          * the end of my buffer, I might as well be dead. */
5103         LASSERT(len <= reply->msg_ev.mlength);
5104
5105         reply->msg_ev.mlength = len;
5106 }
5107 EXPORT_SYMBOL(lnet_set_reply_msg_len);
5108
5109 /**
5110  * Initiate an asynchronous GET operation.
5111  *
5112  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
5113  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
5114  * the target node in the REPLY has been written to local MD.
5115  *
5116  * On the target node, an LNET_EVENT_GET is logged when the GET request
5117  * arrives and is accepted into a MD.
5118  *
5119  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
5120  * \param mdh A handle for the MD that describes the memory into which the
5121  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
5122  *
5123  * \retval  0      Success, and only in this case events will be generated
5124  * and logged to EQ (if it exists) of the MD.
5125  * \retval -EIO    Simulated failure.
5126  * \retval -ENOMEM Memory allocation failure.
5127  * \retval -ENOENT Invalid MD object.
5128  */
5129 int
5130 LNetGet(lnet_nid_t self, struct lnet_handle_md mdh,
5131         struct lnet_process_id target, unsigned int portal,
5132         __u64 match_bits, unsigned int offset, bool recovery)
5133 {
5134         struct lnet_msg *msg;
5135         struct lnet_libmd *md;
5136         struct lnet_rsp_tracker *rspt;
5137         int cpt;
5138         int rc;
5139
5140         LASSERT(the_lnet.ln_refcount > 0);
5141
5142         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5143             fail_peer(target.nid, 1))                   /* shall we now? */
5144         {
5145                 CERROR("Dropping GET to %s: simulated failure\n",
5146                        libcfs_id2str(target));
5147                 return -EIO;
5148         }
5149
5150         msg = lnet_msg_alloc();
5151         if (!msg) {
5152                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
5153                        libcfs_id2str(target));
5154                 return -ENOMEM;
5155         }
5156
5157         cpt = lnet_cpt_of_cookie(mdh.cookie);
5158
5159         rspt = lnet_rspt_alloc(cpt);
5160         if (!rspt) {
5161                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
5162                        libcfs_id2str(target));
5163                 return -ENOMEM;
5164         }
5165         INIT_LIST_HEAD(&rspt->rspt_on_list);
5166
5167         msg->msg_recovery = recovery;
5168
5169         lnet_res_lock(cpt);
5170
5171         md = lnet_handle2md(&mdh);
5172         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5173                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
5174                        match_bits, portal, libcfs_id2str(target),
5175                        md == NULL ? -1 : md->md_threshold);
5176                 if (md != NULL && md->md_me != NULL)
5177                         CERROR("REPLY MD also attached to portal %d\n",
5178                                md->md_me->me_portal);
5179
5180                 lnet_res_unlock(cpt);
5181
5182                 lnet_msg_free(msg);
5183                 lnet_rspt_free(rspt, cpt);
5184                 return -ENOENT;
5185         }
5186
5187         CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
5188
5189         lnet_msg_attach_md(msg, md, 0, 0);
5190
5191         lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
5192
5193         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5194         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5195         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5196         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5197
5198         /* NB handles only looked up by creator (no flips) */
5199         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5200                 the_lnet.ln_interface_cookie;
5201         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5202                 md->md_lh.lh_cookie;
5203
5204         lnet_res_unlock(cpt);
5205
5206         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5207
5208         if (lnet_response_tracking_enabled(LNET_MSG_GET, md->md_options))
5209                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5210         else
5211                 lnet_rspt_free(rspt, cpt);
5212
5213         rc = lnet_send(self, msg, LNET_NID_ANY);
5214         if (rc < 0) {
5215                 CNETERR("Error sending GET to %s: %d\n",
5216                         libcfs_id2str(target), rc);
5217                 msg->msg_no_resend = true;
5218                 lnet_finalize(msg, rc);
5219         }
5220
5221         /* completion will be signalled by an event */
5222         return 0;
5223 }
5224 EXPORT_SYMBOL(LNetGet);
5225
5226 /**
5227  * Calculate distance to node at \a dstnid.
5228  *
5229  * \param dstnid Target NID.
5230  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5231  * is saved here.
5232  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5233  * here.
5234  *
5235  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5236  * local_nid_dist_zero is set, which is the default.
5237  * \retval positives Distance to target NID, i.e. number of hops plus one.
5238  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5239  */
5240 int
5241 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
5242 {
5243         struct list_head *e;
5244         struct lnet_ni *ni = NULL;
5245         struct lnet_remotenet *rnet;
5246         __u32 dstnet = LNET_NIDNET(dstnid);
5247         int hops;
5248         int cpt;
5249         __u32 order = 2;
5250         struct list_head *rn_list;
5251
5252         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5253          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5254          * keep order 0 free for 0@lo and order 1 free for a local NID
5255          * match */
5256
5257         LASSERT(the_lnet.ln_refcount > 0);
5258
5259         cpt = lnet_net_lock_current();
5260
5261         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5262                 if (ni->ni_nid == dstnid) {
5263                         if (srcnidp != NULL)
5264                                 *srcnidp = dstnid;
5265                         if (orderp != NULL) {
5266                                 if (dstnid == LNET_NID_LO_0)
5267                                         *orderp = 0;
5268                                 else
5269                                         *orderp = 1;
5270                         }
5271                         lnet_net_unlock(cpt);
5272
5273                         return local_nid_dist_zero ? 0 : 1;
5274                 }
5275
5276                 if (LNET_NIDNET(ni->ni_nid) == dstnet) {
5277                         /* Check if ni was originally created in
5278                          * current net namespace.
5279                          * If not, assign order above 0xffff0000,
5280                          * to make this ni not a priority. */
5281                         if (current->nsproxy &&
5282                             !net_eq(ni->ni_net_ns, current->nsproxy->net_ns))
5283                                         order += 0xffff0000;
5284                         if (srcnidp != NULL)
5285                                 *srcnidp = ni->ni_nid;
5286                         if (orderp != NULL)
5287                                 *orderp = order;
5288                         lnet_net_unlock(cpt);
5289                         return 1;
5290                 }
5291
5292                 order++;
5293         }
5294
5295         rn_list = lnet_net2rnethash(dstnet);
5296         list_for_each(e, rn_list) {
5297                 rnet = list_entry(e, struct lnet_remotenet, lrn_list);
5298
5299                 if (rnet->lrn_net == dstnet) {
5300                         struct lnet_route *route;
5301                         struct lnet_route *shortest = NULL;
5302                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5303                         __u32 route_hops;
5304
5305                         LASSERT(!list_empty(&rnet->lrn_routes));
5306
5307                         list_for_each_entry(route, &rnet->lrn_routes,
5308                                             lr_list) {
5309                                 route_hops = route->lr_hops;
5310                                 if (route_hops == LNET_UNDEFINED_HOPS)
5311                                         route_hops = 1;
5312                                 if (shortest == NULL ||
5313                                     route_hops < shortest_hops) {
5314                                         shortest = route;
5315                                         shortest_hops = route_hops;
5316                                 }
5317                         }
5318
5319                         LASSERT(shortest != NULL);
5320                         hops = shortest_hops;
5321                         if (srcnidp != NULL) {
5322                                 struct lnet_net *net;
5323                                 net = lnet_get_net_locked(shortest->lr_lnet);
5324                                 LASSERT(net);
5325                                 ni = lnet_get_next_ni_locked(net, NULL);
5326                                 *srcnidp = ni->ni_nid;
5327                         }
5328                         if (orderp != NULL)
5329                                 *orderp = order;
5330                         lnet_net_unlock(cpt);
5331                         return hops + 1;
5332                 }
5333                 order++;
5334         }
5335
5336         lnet_net_unlock(cpt);
5337         return -EHOSTUNREACH;
5338 }
5339 EXPORT_SYMBOL(LNetDist);