Whamcloud - gitweb
3259946cd14a87c55b53e7afd1ba8e9636d43544
[fs/lustre-release.git] / lnet / lnet / lib-move.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/lib-move.c
32  *
33  * Data movement routines
34  */
35
36 #define DEBUG_SUBSYSTEM S_LNET
37
38 #include <linux/pagemap.h>
39
40 #include <lnet/lib-lnet.h>
41 #include <linux/nsproxy.h>
42 #include <lnet/lnet_rdma.h>
43 #include <net/net_namespace.h>
44
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48
49 struct lnet_send_data {
50         struct lnet_ni *sd_best_ni;
51         struct lnet_peer_ni *sd_best_lpni;
52         struct lnet_peer_ni *sd_final_dst_lpni;
53         struct lnet_peer *sd_peer;
54         struct lnet_peer *sd_gw_peer;
55         struct lnet_peer_ni *sd_gw_lpni;
56         struct lnet_peer_net *sd_peer_net;
57         struct lnet_msg *sd_msg;
58         lnet_nid_t sd_dst_nid;
59         lnet_nid_t sd_src_nid;
60         lnet_nid_t sd_rtr_nid;
61         int sd_cpt;
62         int sd_md_cpt;
63         __u32 sd_send_case;
64 };
65
66 static inline bool
67 lnet_msg_is_response(struct lnet_msg *msg)
68 {
69         return msg->msg_type == LNET_MSG_ACK || msg->msg_type == LNET_MSG_REPLY;
70 }
71
72 static inline bool
73 lnet_response_tracking_enabled(__u32 msg_type, unsigned int md_options)
74 {
75         if (md_options & LNET_MD_NO_TRACK_RESPONSE)
76                 /* Explicitly disabled in MD options */
77                 return false;
78
79         if (md_options & LNET_MD_TRACK_RESPONSE)
80                 /* Explicity enabled in MD options */
81                 return true;
82
83         if (lnet_response_tracking == 3)
84                 /* Enabled for all message types */
85                 return true;
86
87         if (msg_type == LNET_MSG_PUT)
88                 return lnet_response_tracking == 2;
89
90         if (msg_type == LNET_MSG_GET)
91                 return lnet_response_tracking == 1;
92
93         return false;
94 }
95
96 static inline struct lnet_comm_count *
97 get_stats_counts(struct lnet_element_stats *stats,
98                  enum lnet_stats_type stats_type)
99 {
100         switch (stats_type) {
101         case LNET_STATS_TYPE_SEND:
102                 return &stats->el_send_stats;
103         case LNET_STATS_TYPE_RECV:
104                 return &stats->el_recv_stats;
105         case LNET_STATS_TYPE_DROP:
106                 return &stats->el_drop_stats;
107         default:
108                 CERROR("Unknown stats type\n");
109         }
110
111         return NULL;
112 }
113
114 void lnet_incr_stats(struct lnet_element_stats *stats,
115                      enum lnet_msg_type msg_type,
116                      enum lnet_stats_type stats_type)
117 {
118         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
119         if (!counts)
120                 return;
121
122         switch (msg_type) {
123         case LNET_MSG_ACK:
124                 atomic_inc(&counts->co_ack_count);
125                 break;
126         case LNET_MSG_PUT:
127                 atomic_inc(&counts->co_put_count);
128                 break;
129         case LNET_MSG_GET:
130                 atomic_inc(&counts->co_get_count);
131                 break;
132         case LNET_MSG_REPLY:
133                 atomic_inc(&counts->co_reply_count);
134                 break;
135         case LNET_MSG_HELLO:
136                 atomic_inc(&counts->co_hello_count);
137                 break;
138         default:
139                 CERROR("There is a BUG in the code. Unknown message type\n");
140                 break;
141         }
142 }
143
144 __u32 lnet_sum_stats(struct lnet_element_stats *stats,
145                      enum lnet_stats_type stats_type)
146 {
147         struct lnet_comm_count *counts = get_stats_counts(stats, stats_type);
148         if (!counts)
149                 return 0;
150
151         return (atomic_read(&counts->co_ack_count) +
152                 atomic_read(&counts->co_put_count) +
153                 atomic_read(&counts->co_get_count) +
154                 atomic_read(&counts->co_reply_count) +
155                 atomic_read(&counts->co_hello_count));
156 }
157
158 static inline void assign_stats(struct lnet_ioctl_comm_count *msg_stats,
159                                 struct lnet_comm_count *counts)
160 {
161         msg_stats->ico_get_count = atomic_read(&counts->co_get_count);
162         msg_stats->ico_put_count = atomic_read(&counts->co_put_count);
163         msg_stats->ico_reply_count = atomic_read(&counts->co_reply_count);
164         msg_stats->ico_ack_count = atomic_read(&counts->co_ack_count);
165         msg_stats->ico_hello_count = atomic_read(&counts->co_hello_count);
166 }
167
168 void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
169                               struct lnet_element_stats *stats)
170 {
171         struct lnet_comm_count *counts;
172
173         LASSERT(msg_stats);
174         LASSERT(stats);
175
176         counts = get_stats_counts(stats, LNET_STATS_TYPE_SEND);
177         if (!counts)
178                 return;
179         assign_stats(&msg_stats->im_send_stats, counts);
180
181         counts = get_stats_counts(stats, LNET_STATS_TYPE_RECV);
182         if (!counts)
183                 return;
184         assign_stats(&msg_stats->im_recv_stats, counts);
185
186         counts = get_stats_counts(stats, LNET_STATS_TYPE_DROP);
187         if (!counts)
188                 return;
189         assign_stats(&msg_stats->im_drop_stats, counts);
190 }
191
192 int
193 lnet_fail_nid(lnet_nid_t nid4, unsigned int threshold)
194 {
195         struct lnet_test_peer *tp;
196         struct list_head *el;
197         struct list_head *next;
198         struct lnet_nid nid;
199         LIST_HEAD(cull);
200
201         lnet_nid4_to_nid(nid4, &nid);
202         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
203         if (threshold != 0) {
204                 /* Adding a new entry */
205                 LIBCFS_ALLOC(tp, sizeof(*tp));
206                 if (tp == NULL)
207                         return -ENOMEM;
208
209                 tp->tp_nid = nid;
210                 tp->tp_threshold = threshold;
211
212                 lnet_net_lock(0);
213                 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
214                 lnet_net_unlock(0);
215                 return 0;
216         }
217
218         lnet_net_lock(0);
219
220         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
221                 tp = list_entry(el, struct lnet_test_peer, tp_list);
222
223                 if (tp->tp_threshold == 0 ||    /* needs culling anyway */
224                     LNET_NID_IS_ANY(&nid) ||    /* removing all entries */
225                     nid_same(&tp->tp_nid, &nid)) {      /* matched this one */
226                         list_move(&tp->tp_list, &cull);
227                 }
228         }
229
230         lnet_net_unlock(0);
231
232         while (!list_empty(&cull)) {
233                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
234
235                 list_del(&tp->tp_list);
236                 LIBCFS_FREE(tp, sizeof(*tp));
237         }
238         return 0;
239 }
240
241 static int
242 fail_peer(lnet_nid_t nid4, int outgoing)
243 {
244         struct lnet_test_peer *tp;
245         struct list_head *el;
246         struct list_head *next;
247         struct lnet_nid nid;
248         LIST_HEAD(cull);
249         int fail = 0;
250
251         lnet_nid4_to_nid(nid4, &nid);
252         /* NB: use lnet_net_lock(0) to serialize operations on test peers */
253         lnet_net_lock(0);
254
255         list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
256                 tp = list_entry(el, struct lnet_test_peer, tp_list);
257
258                 if (tp->tp_threshold == 0) {
259                         /* zombie entry */
260                         if (outgoing) {
261                                 /* only cull zombies on outgoing tests,
262                                  * since we may be at interrupt priority on
263                                  * incoming messages. */
264                                 list_move(&tp->tp_list, &cull);
265                         }
266                         continue;
267                 }
268
269                 if (LNET_NID_IS_ANY(&tp->tp_nid) ||     /* fail every peer */
270                     nid_same(&nid, &tp->tp_nid)) {      /* fail this peer */
271                         fail = 1;
272
273                         if (tp->tp_threshold != LNET_MD_THRESH_INF) {
274                                 tp->tp_threshold--;
275                                 if (outgoing &&
276                                     tp->tp_threshold == 0) {
277                                         /* see above */
278                                         list_move(&tp->tp_list, &cull);
279                                 }
280                         }
281                         break;
282                 }
283         }
284
285         lnet_net_unlock(0);
286
287         while (!list_empty(&cull)) {
288                 tp = list_entry(cull.next, struct lnet_test_peer, tp_list);
289                 list_del(&tp->tp_list);
290
291                 LIBCFS_FREE(tp, sizeof(*tp));
292         }
293
294         return fail;
295 }
296
297 unsigned int
298 lnet_iov_nob(unsigned int niov, struct kvec *iov)
299 {
300         unsigned int nob = 0;
301
302         LASSERT(niov == 0 || iov != NULL);
303         while (niov-- > 0)
304                 nob += (iov++)->iov_len;
305
306         return (nob);
307 }
308 EXPORT_SYMBOL(lnet_iov_nob);
309
310 void
311 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
312                   unsigned int nsiov, struct kvec *siov, unsigned int soffset,
313                   unsigned int nob)
314 {
315         /* NB diov, siov are READ-ONLY */
316         unsigned int this_nob;
317
318         if (nob == 0)
319                 return;
320
321         /* skip complete frags before 'doffset' */
322         LASSERT(ndiov > 0);
323         while (doffset >= diov->iov_len) {
324                 doffset -= diov->iov_len;
325                 diov++;
326                 ndiov--;
327                 LASSERT(ndiov > 0);
328         }
329
330         /* skip complete frags before 'soffset' */
331         LASSERT(nsiov > 0);
332         while (soffset >= siov->iov_len) {
333                 soffset -= siov->iov_len;
334                 siov++;
335                 nsiov--;
336                 LASSERT(nsiov > 0);
337         }
338
339         do {
340                 LASSERT(ndiov > 0);
341                 LASSERT(nsiov > 0);
342                 this_nob = min3((unsigned int)diov->iov_len - doffset,
343                                 (unsigned int)siov->iov_len - soffset,
344                                 nob);
345
346                 memcpy((char *)diov->iov_base + doffset,
347                        (char *)siov->iov_base + soffset, this_nob);
348                 nob -= this_nob;
349
350                 if (diov->iov_len > doffset + this_nob) {
351                         doffset += this_nob;
352                 } else {
353                         diov++;
354                         ndiov--;
355                         doffset = 0;
356                 }
357
358                 if (siov->iov_len > soffset + this_nob) {
359                         soffset += this_nob;
360                 } else {
361                         siov++;
362                         nsiov--;
363                         soffset = 0;
364                 }
365         } while (nob > 0);
366 }
367 EXPORT_SYMBOL(lnet_copy_iov2iov);
368
369 unsigned int
370 lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
371 {
372         unsigned int  nob = 0;
373
374         LASSERT(niov == 0 || kiov != NULL);
375         while (niov-- > 0)
376                 nob += (kiov++)->bv_len;
377
378         return (nob);
379 }
380 EXPORT_SYMBOL(lnet_kiov_nob);
381
382 void
383 lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov,
384                     unsigned int doffset,
385                     unsigned int nsiov, struct bio_vec *siov,
386                     unsigned int soffset,
387                     unsigned int nob)
388 {
389         /* NB diov, siov are READ-ONLY */
390         unsigned int    this_nob;
391         char           *daddr = NULL;
392         char           *saddr = NULL;
393
394         if (nob == 0)
395                 return;
396
397         LASSERT (!in_interrupt ());
398
399         LASSERT (ndiov > 0);
400         while (doffset >= diov->bv_len) {
401                 doffset -= diov->bv_len;
402                 diov++;
403                 ndiov--;
404                 LASSERT(ndiov > 0);
405         }
406
407         LASSERT(nsiov > 0);
408         while (soffset >= siov->bv_len) {
409                 soffset -= siov->bv_len;
410                 siov++;
411                 nsiov--;
412                 LASSERT(nsiov > 0);
413         }
414
415         do {
416                 LASSERT(ndiov > 0);
417                 LASSERT(nsiov > 0);
418                 this_nob = min3(diov->bv_len - doffset,
419                                 siov->bv_len - soffset,
420                                 nob);
421
422                 if (daddr == NULL)
423                         daddr = ((char *)kmap(diov->bv_page)) +
424                                 diov->bv_offset + doffset;
425                 if (saddr == NULL)
426                         saddr = ((char *)kmap(siov->bv_page)) +
427                                 siov->bv_offset + soffset;
428
429                 /* Vanishing risk of kmap deadlock when mapping 2 pages.
430                  * However in practice at least one of the kiovs will be mapped
431                  * kernel pages and the map/unmap will be NOOPs */
432
433                 memcpy (daddr, saddr, this_nob);
434                 nob -= this_nob;
435
436                 if (diov->bv_len > doffset + this_nob) {
437                         daddr += this_nob;
438                         doffset += this_nob;
439                 } else {
440                         kunmap(diov->bv_page);
441                         daddr = NULL;
442                         diov++;
443                         ndiov--;
444                         doffset = 0;
445                 }
446
447                 if (siov->bv_len > soffset + this_nob) {
448                         saddr += this_nob;
449                         soffset += this_nob;
450                 } else {
451                         kunmap(siov->bv_page);
452                         saddr = NULL;
453                         siov++;
454                         nsiov--;
455                         soffset = 0;
456                 }
457         } while (nob > 0);
458
459         if (daddr != NULL)
460                 kunmap(diov->bv_page);
461         if (saddr != NULL)
462                 kunmap(siov->bv_page);
463 }
464 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
465
466 void
467 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
468                     unsigned int nkiov, struct bio_vec *kiov,
469                     unsigned int kiovoffset,
470                     unsigned int nob)
471 {
472         /* NB iov, kiov are READ-ONLY */
473         unsigned int    this_nob;
474         char           *addr = NULL;
475
476         if (nob == 0)
477                 return;
478
479         LASSERT (!in_interrupt ());
480
481         LASSERT (niov > 0);
482         while (iovoffset >= iov->iov_len) {
483                 iovoffset -= iov->iov_len;
484                 iov++;
485                 niov--;
486                 LASSERT(niov > 0);
487         }
488
489         LASSERT(nkiov > 0);
490         while (kiovoffset >= kiov->bv_len) {
491                 kiovoffset -= kiov->bv_len;
492                 kiov++;
493                 nkiov--;
494                 LASSERT(nkiov > 0);
495         }
496
497         do {
498                 LASSERT(niov > 0);
499                 LASSERT(nkiov > 0);
500                 this_nob = min3((unsigned int)iov->iov_len - iovoffset,
501                                 (unsigned int)kiov->bv_len - kiovoffset,
502                                 nob);
503
504                 if (addr == NULL)
505                         addr = ((char *)kmap(kiov->bv_page)) +
506                                 kiov->bv_offset + kiovoffset;
507
508                 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
509                 nob -= this_nob;
510
511                 if (iov->iov_len > iovoffset + this_nob) {
512                         iovoffset += this_nob;
513                 } else {
514                         iov++;
515                         niov--;
516                         iovoffset = 0;
517                 }
518
519                 if (kiov->bv_len > kiovoffset + this_nob) {
520                         addr += this_nob;
521                         kiovoffset += this_nob;
522                 } else {
523                         kunmap(kiov->bv_page);
524                         addr = NULL;
525                         kiov++;
526                         nkiov--;
527                         kiovoffset = 0;
528                 }
529
530         } while (nob > 0);
531
532         if (addr != NULL)
533                 kunmap(kiov->bv_page);
534 }
535 EXPORT_SYMBOL(lnet_copy_kiov2iov);
536
537 void
538 lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
539                    unsigned int kiovoffset,
540                    unsigned int niov, struct kvec *iov, unsigned int iovoffset,
541                    unsigned int nob)
542 {
543         /* NB kiov, iov are READ-ONLY */
544         unsigned int    this_nob;
545         char           *addr = NULL;
546
547         if (nob == 0)
548                 return;
549
550         LASSERT (!in_interrupt ());
551
552         LASSERT (nkiov > 0);
553         while (kiovoffset >= kiov->bv_len) {
554                 kiovoffset -= kiov->bv_len;
555                 kiov++;
556                 nkiov--;
557                 LASSERT(nkiov > 0);
558         }
559
560         LASSERT(niov > 0);
561         while (iovoffset >= iov->iov_len) {
562                 iovoffset -= iov->iov_len;
563                 iov++;
564                 niov--;
565                 LASSERT(niov > 0);
566         }
567
568         do {
569                 LASSERT(nkiov > 0);
570                 LASSERT(niov > 0);
571                 this_nob = min3((unsigned int)kiov->bv_len - kiovoffset,
572                                 (unsigned int)iov->iov_len - iovoffset,
573                                 nob);
574
575                 if (addr == NULL)
576                         addr = ((char *)kmap(kiov->bv_page)) +
577                                 kiov->bv_offset + kiovoffset;
578
579                 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
580                 nob -= this_nob;
581
582                 if (kiov->bv_len > kiovoffset + this_nob) {
583                         addr += this_nob;
584                         kiovoffset += this_nob;
585                 } else {
586                         kunmap(kiov->bv_page);
587                         addr = NULL;
588                         kiov++;
589                         nkiov--;
590                         kiovoffset = 0;
591                 }
592
593                 if (iov->iov_len > iovoffset + this_nob) {
594                         iovoffset += this_nob;
595                 } else {
596                         iov++;
597                         niov--;
598                         iovoffset = 0;
599                 }
600         } while (nob > 0);
601
602         if (addr != NULL)
603                 kunmap(kiov->bv_page);
604 }
605 EXPORT_SYMBOL(lnet_copy_iov2kiov);
606
607 int
608 lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
609                   int src_niov, struct bio_vec *src,
610                   unsigned int offset, unsigned int len)
611 {
612         /* Initialise 'dst' to the subset of 'src' starting at 'offset',
613          * for exactly 'len' bytes, and return the number of entries.
614          * NB not destructive to 'src' */
615         unsigned int    frag_len;
616         unsigned int    niov;
617
618         if (len == 0)                           /* no data => */
619                 return (0);                     /* no frags */
620
621         LASSERT(src_niov > 0);
622         while (offset >= src->bv_len) {      /* skip initial frags */
623                 offset -= src->bv_len;
624                 src_niov--;
625                 src++;
626                 LASSERT(src_niov > 0);
627         }
628
629         niov = 1;
630         for (;;) {
631                 LASSERT(src_niov > 0);
632                 LASSERT((int)niov <= dst_niov);
633
634                 frag_len = src->bv_len - offset;
635                 dst->bv_page = src->bv_page;
636                 dst->bv_offset = src->bv_offset + offset;
637
638                 if (len <= frag_len) {
639                         dst->bv_len = len;
640                         LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
641                         return niov;
642                 }
643
644                 dst->bv_len = frag_len;
645                 LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
646
647                 len -= frag_len;
648                 dst++;
649                 src++;
650                 niov++;
651                 src_niov--;
652                 offset = 0;
653         }
654 }
655 EXPORT_SYMBOL(lnet_extract_kiov);
656
657 void
658 lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
659              int delayed, unsigned int offset, unsigned int mlen,
660              unsigned int rlen)
661 {
662         unsigned int niov = 0;
663         struct kvec *iov = NULL;
664         struct bio_vec  *kiov = NULL;
665         int rc;
666
667         LASSERT (!in_interrupt ());
668         LASSERT (mlen == 0 || msg != NULL);
669
670         if (msg != NULL) {
671                 LASSERT(msg->msg_receiving);
672                 LASSERT(!msg->msg_sending);
673                 LASSERT(rlen == msg->msg_len);
674                 LASSERT(mlen <= msg->msg_len);
675                 LASSERT(msg->msg_offset == offset);
676                 LASSERT(msg->msg_wanted == mlen);
677
678                 msg->msg_receiving = 0;
679
680                 if (mlen != 0) {
681                         niov = msg->msg_niov;
682                         kiov = msg->msg_kiov;
683
684                         LASSERT (niov > 0);
685                         LASSERT ((iov == NULL) != (kiov == NULL));
686                 }
687         }
688
689         rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
690                                              niov, kiov, offset, mlen,
691                                              rlen);
692         if (rc < 0)
693                 lnet_finalize(msg, rc);
694 }
695
696 static void
697 lnet_setpayloadbuffer(struct lnet_msg *msg)
698 {
699         struct lnet_libmd *md = msg->msg_md;
700
701         LASSERT(msg->msg_len > 0);
702         LASSERT(!msg->msg_routing);
703         LASSERT(md != NULL);
704         LASSERT(msg->msg_niov == 0);
705         LASSERT(msg->msg_kiov == NULL);
706
707         msg->msg_niov = md->md_niov;
708         msg->msg_kiov = md->md_kiov;
709 }
710
711 void
712 lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target,
713                unsigned int offset, unsigned int len)
714 {
715         msg->msg_type = type;
716         msg->msg_target = target;
717         msg->msg_len = len;
718         msg->msg_offset = offset;
719
720         if (len != 0)
721                 lnet_setpayloadbuffer(msg);
722
723         memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
724         msg->msg_hdr.type           = cpu_to_le32(type);
725         /* dest_nid will be overwritten by lnet_select_pathway() */
726         msg->msg_hdr.dest_nid       = cpu_to_le64(target.nid);
727         msg->msg_hdr.dest_pid       = cpu_to_le32(target.pid);
728         /* src_nid will be set later */
729         msg->msg_hdr.src_pid        = cpu_to_le32(the_lnet.ln_pid);
730         msg->msg_hdr.payload_length = cpu_to_le32(len);
731 }
732
733 void
734 lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
735 {
736         void *priv = msg->msg_private;
737         int rc;
738
739         LASSERT(!in_interrupt());
740         LASSERT(nid_is_lo0(&ni->ni_nid) ||
741                 (msg->msg_txcredit && msg->msg_peertxcredit));
742
743         rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
744         if (rc < 0) {
745                 msg->msg_no_resend = true;
746                 lnet_finalize(msg, rc);
747         }
748 }
749
750 static int
751 lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
752 {
753         int     rc;
754
755         LASSERT(!msg->msg_sending);
756         LASSERT(msg->msg_receiving);
757         LASSERT(!msg->msg_rx_ready_delay);
758         LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
759
760         msg->msg_rx_ready_delay = 1;
761         rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
762                                                   &msg->msg_private);
763         if (rc != 0) {
764                 CERROR("recv from %s / send to %s aborted: "
765                        "eager_recv failed %d\n",
766                        libcfs_nidstr(&msg->msg_rxpeer->lpni_nid),
767                        libcfs_id2str(msg->msg_target), rc);
768                 LASSERT(rc < 0); /* required by my callers */
769         }
770
771         return rc;
772 }
773
774 static bool
775 lnet_is_peer_deadline_passed(struct lnet_peer_ni *lpni, time64_t now)
776 {
777         time64_t deadline;
778
779         deadline = lpni->lpni_last_alive +
780                    lpni->lpni_net->net_tunables.lct_peer_timeout;
781
782         /*
783          * assume peer_ni is alive as long as we're within the configured
784          * peer timeout
785          */
786         if (deadline > now)
787                 return false;
788
789         return true;
790 }
791
792 /* NB: returns 1 when alive, 0 when dead, negative when error;
793  *     may drop the lnet_net_lock */
794 static int
795 lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
796                        struct lnet_msg *msg)
797 {
798         time64_t now = ktime_get_seconds();
799
800         if (!lnet_peer_aliveness_enabled(lpni))
801                 return -ENODEV;
802
803         /*
804          * If we're resending a message, let's attempt to send it even if
805          * the peer is down to fulfill our resend quota on the message
806          */
807         if (msg->msg_retry_count > 0)
808                 return 1;
809
810         /* try and send recovery messages irregardless */
811         if (msg->msg_recovery)
812                 return 1;
813
814         /* always send any responses */
815         if (lnet_msg_is_response(msg))
816                 return 1;
817
818         if (!lnet_is_peer_deadline_passed(lpni, now))
819                 return true;
820
821         return lnet_is_peer_ni_alive(lpni);
822 }
823
824 /**
825  * \param msg The message to be sent.
826  * \param do_send True if lnet_ni_send() should be called in this function.
827  *        lnet_send() is going to lnet_net_unlock immediately after this, so
828  *        it sets do_send FALSE and I don't do the unlock/send/lock bit.
829  *
830  * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
831  * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
832  * \retval -EHOSTUNREACH If the next hop of the message appears dead.
833  * \retval -ECANCELED If the MD of the message has been unlinked.
834  */
835 static int
836 lnet_post_send_locked(struct lnet_msg *msg, int do_send)
837 {
838         struct lnet_peer_ni     *lp = msg->msg_txpeer;
839         struct lnet_ni          *ni = msg->msg_txni;
840         int                     cpt = msg->msg_tx_cpt;
841         struct lnet_tx_queue    *tq = ni->ni_tx_queues[cpt];
842
843         /* non-lnet_send() callers have checked before */
844         LASSERT(!do_send || msg->msg_tx_delayed);
845         LASSERT(!msg->msg_receiving);
846         LASSERT(msg->msg_tx_committed);
847
848         /* can't get here if we're sending to the loopback interface */
849         if (the_lnet.ln_loni)
850                 LASSERT(!nid_same(&lp->lpni_nid, &the_lnet.ln_loni->ni_nid));
851
852         /* NB 'lp' is always the next hop */
853         if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
854             lnet_peer_alive_locked(ni, lp, msg) == 0) {
855                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
856                 the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
857                         msg->msg_len;
858                 lnet_net_unlock(cpt);
859                 if (msg->msg_txpeer)
860                         lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
861                                         msg->msg_type,
862                                         LNET_STATS_TYPE_DROP);
863                 if (msg->msg_txni)
864                         lnet_incr_stats(&msg->msg_txni->ni_stats,
865                                         msg->msg_type,
866                                         LNET_STATS_TYPE_DROP);
867
868                 CNETERR("Dropping message for %s: peer not alive\n",
869                         libcfs_id2str(msg->msg_target));
870                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
871                 if (do_send)
872                         lnet_finalize(msg, -EHOSTUNREACH);
873
874                 lnet_net_lock(cpt);
875                 return -EHOSTUNREACH;
876         }
877
878         if (msg->msg_md != NULL &&
879             (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
880                 lnet_net_unlock(cpt);
881
882                 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
883                         "called on the MD/ME.\n",
884                         libcfs_id2str(msg->msg_target));
885                 if (do_send) {
886                         msg->msg_no_resend = true;
887                         CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
888                                msg, libcfs_id2str(msg->msg_target));
889                         lnet_finalize(msg, -ECANCELED);
890                 }
891
892                 lnet_net_lock(cpt);
893                 return -ECANCELED;
894         }
895
896         if (!msg->msg_peertxcredit) {
897                 spin_lock(&lp->lpni_lock);
898                 LASSERT((lp->lpni_txcredits < 0) ==
899                         !list_empty(&lp->lpni_txq));
900
901                 msg->msg_peertxcredit = 1;
902                 lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr);
903                 lp->lpni_txcredits--;
904
905                 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
906                         lp->lpni_mintxcredits = lp->lpni_txcredits;
907
908                 if (lp->lpni_txcredits < 0) {
909                         msg->msg_tx_delayed = 1;
910                         list_add_tail(&msg->msg_list, &lp->lpni_txq);
911                         spin_unlock(&lp->lpni_lock);
912                         return LNET_CREDIT_WAIT;
913                 }
914                 spin_unlock(&lp->lpni_lock);
915         }
916
917         if (!msg->msg_txcredit) {
918                 LASSERT((tq->tq_credits < 0) ==
919                         !list_empty(&tq->tq_delayed));
920
921                 msg->msg_txcredit = 1;
922                 tq->tq_credits--;
923                 atomic_dec(&ni->ni_tx_credits);
924
925                 if (tq->tq_credits < tq->tq_credits_min)
926                         tq->tq_credits_min = tq->tq_credits;
927
928                 if (tq->tq_credits < 0) {
929                         msg->msg_tx_delayed = 1;
930                         list_add_tail(&msg->msg_list, &tq->tq_delayed);
931                         return LNET_CREDIT_WAIT;
932                 }
933         }
934
935         if (unlikely(!list_empty(&the_lnet.ln_delay_rules)) &&
936             lnet_delay_rule_match_locked(&msg->msg_hdr, msg)) {
937                 msg->msg_tx_delayed = 1;
938                 return LNET_CREDIT_WAIT;
939         }
940
941         /* unset the tx_delay flag as we're going to send it now */
942         msg->msg_tx_delayed = 0;
943
944         if (do_send) {
945                 lnet_net_unlock(cpt);
946                 lnet_ni_send(ni, msg);
947                 lnet_net_lock(cpt);
948         }
949         return LNET_CREDIT_OK;
950 }
951
952
953 static struct lnet_rtrbufpool *
954 lnet_msg2bufpool(struct lnet_msg *msg)
955 {
956         struct lnet_rtrbufpool  *rbp;
957         int                     cpt;
958
959         LASSERT(msg->msg_rx_committed);
960
961         cpt = msg->msg_rx_cpt;
962         rbp = &the_lnet.ln_rtrpools[cpt][0];
963
964         LASSERT(msg->msg_len <= LNET_MTU);
965         while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
966                 rbp++;
967                 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
968         }
969
970         return rbp;
971 }
972
973 static int
974 lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
975 {
976         /* lnet_parse is going to lnet_net_unlock immediately after this, so it
977          * sets do_recv FALSE and I don't do the unlock/send/lock bit.
978          * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
979          * received or OK to receive */
980         struct lnet_peer_ni *lpni = msg->msg_rxpeer;
981         struct lnet_peer *lp;
982         struct lnet_rtrbufpool *rbp;
983         struct lnet_rtrbuf *rb;
984
985         LASSERT(msg->msg_kiov == NULL);
986         LASSERT(msg->msg_niov == 0);
987         LASSERT(msg->msg_routing);
988         LASSERT(msg->msg_receiving);
989         LASSERT(!msg->msg_sending);
990         LASSERT(lpni->lpni_peer_net);
991         LASSERT(lpni->lpni_peer_net->lpn_peer);
992
993         lp = lpni->lpni_peer_net->lpn_peer;
994
995         /* non-lnet_parse callers only receive delayed messages */
996         LASSERT(!do_recv || msg->msg_rx_delayed);
997
998         if (!msg->msg_peerrtrcredit) {
999                 /* lpni_lock protects the credit manipulation */
1000                 spin_lock(&lpni->lpni_lock);
1001
1002                 msg->msg_peerrtrcredit = 1;
1003                 lpni->lpni_rtrcredits--;
1004                 if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
1005                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
1006
1007                 if (lpni->lpni_rtrcredits < 0) {
1008                         spin_unlock(&lpni->lpni_lock);
1009                         /* must have checked eager_recv before here */
1010                         LASSERT(msg->msg_rx_ready_delay);
1011                         msg->msg_rx_delayed = 1;
1012                         /* lp_lock protects the lp_rtrq */
1013                         spin_lock(&lp->lp_lock);
1014                         list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1015                         spin_unlock(&lp->lp_lock);
1016                         return LNET_CREDIT_WAIT;
1017                 }
1018                 spin_unlock(&lpni->lpni_lock);
1019         }
1020
1021         rbp = lnet_msg2bufpool(msg);
1022
1023         if (!msg->msg_rtrcredit) {
1024                 msg->msg_rtrcredit = 1;
1025                 rbp->rbp_credits--;
1026                 if (rbp->rbp_credits < rbp->rbp_mincredits)
1027                         rbp->rbp_mincredits = rbp->rbp_credits;
1028
1029                 if (rbp->rbp_credits < 0) {
1030                         /* must have checked eager_recv before here */
1031                         LASSERT(msg->msg_rx_ready_delay);
1032                         msg->msg_rx_delayed = 1;
1033                         list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1034                         return LNET_CREDIT_WAIT;
1035                 }
1036         }
1037
1038         LASSERT(!list_empty(&rbp->rbp_bufs));
1039         rb = list_entry(rbp->rbp_bufs.next, struct lnet_rtrbuf, rb_list);
1040         list_del(&rb->rb_list);
1041
1042         msg->msg_niov = rbp->rbp_npages;
1043         msg->msg_kiov = &rb->rb_kiov[0];
1044
1045         /* unset the msg-rx_delayed flag since we're receiving the message */
1046         msg->msg_rx_delayed = 0;
1047
1048         if (do_recv) {
1049                 int cpt = msg->msg_rx_cpt;
1050
1051                 lnet_net_unlock(cpt);
1052                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
1053                              0, msg->msg_len, msg->msg_len);
1054                 lnet_net_lock(cpt);
1055         }
1056         return LNET_CREDIT_OK;
1057 }
1058
1059 void
1060 lnet_return_tx_credits_locked(struct lnet_msg *msg)
1061 {
1062         struct lnet_peer_ni     *txpeer = msg->msg_txpeer;
1063         struct lnet_ni          *txni = msg->msg_txni;
1064         struct lnet_msg         *msg2;
1065
1066         if (msg->msg_txcredit) {
1067                 struct lnet_ni       *ni = msg->msg_txni;
1068                 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1069
1070                 /* give back NI txcredits */
1071                 msg->msg_txcredit = 0;
1072
1073                 LASSERT((tq->tq_credits < 0) ==
1074                         !list_empty(&tq->tq_delayed));
1075
1076                 tq->tq_credits++;
1077                 atomic_inc(&ni->ni_tx_credits);
1078                 if (tq->tq_credits <= 0) {
1079                         msg2 = list_entry(tq->tq_delayed.next,
1080                                           struct lnet_msg, msg_list);
1081                         list_del(&msg2->msg_list);
1082
1083                         LASSERT(msg2->msg_txni == ni);
1084                         LASSERT(msg2->msg_tx_delayed);
1085                         LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1086
1087                         (void) lnet_post_send_locked(msg2, 1);
1088                 }
1089         }
1090
1091         if (msg->msg_peertxcredit) {
1092                 /* give back peer txcredits */
1093                 msg->msg_peertxcredit = 0;
1094
1095                 spin_lock(&txpeer->lpni_lock);
1096                 LASSERT((txpeer->lpni_txcredits < 0) ==
1097                         !list_empty(&txpeer->lpni_txq));
1098
1099                 txpeer->lpni_txqnob -= msg->msg_len + sizeof(struct lnet_hdr);
1100                 LASSERT(txpeer->lpni_txqnob >= 0);
1101
1102                 txpeer->lpni_txcredits++;
1103                 if (txpeer->lpni_txcredits <= 0) {
1104                         int msg2_cpt;
1105
1106                         msg2 = list_entry(txpeer->lpni_txq.next,
1107                                               struct lnet_msg, msg_list);
1108                         list_del(&msg2->msg_list);
1109                         spin_unlock(&txpeer->lpni_lock);
1110
1111                         LASSERT(msg2->msg_txpeer == txpeer);
1112                         LASSERT(msg2->msg_tx_delayed);
1113
1114                         msg2_cpt = msg2->msg_tx_cpt;
1115
1116                         /*
1117                          * The msg_cpt can be different from the msg2_cpt
1118                          * so we need to make sure we lock the correct cpt
1119                          * for msg2.
1120                          * Once we call lnet_post_send_locked() it is no
1121                          * longer safe to access msg2, since it could've
1122                          * been freed by lnet_finalize(), but we still
1123                          * need to relock the correct cpt, so we cache the
1124                          * msg2_cpt for the purpose of the check that
1125                          * follows the call to lnet_pose_send_locked().
1126                          */
1127                         if (msg2_cpt != msg->msg_tx_cpt) {
1128                                 lnet_net_unlock(msg->msg_tx_cpt);
1129                                 lnet_net_lock(msg2_cpt);
1130                         }
1131                         (void) lnet_post_send_locked(msg2, 1);
1132                         if (msg2_cpt != msg->msg_tx_cpt) {
1133                                 lnet_net_unlock(msg2_cpt);
1134                                 lnet_net_lock(msg->msg_tx_cpt);
1135                         }
1136                 } else {
1137                         spin_unlock(&txpeer->lpni_lock);
1138                 }
1139         }
1140
1141         if (txni != NULL) {
1142                 msg->msg_txni = NULL;
1143                 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1144         }
1145
1146         if (txpeer != NULL) {
1147                 msg->msg_txpeer = NULL;
1148                 lnet_peer_ni_decref_locked(txpeer);
1149         }
1150 }
1151
1152 void
1153 lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
1154 {
1155         struct lnet_msg *msg;
1156
1157         if (list_empty(&rbp->rbp_msgs))
1158                 return;
1159         msg = list_entry(rbp->rbp_msgs.next,
1160                          struct lnet_msg, msg_list);
1161         list_del(&msg->msg_list);
1162
1163         (void)lnet_post_routed_recv_locked(msg, 1);
1164 }
1165
1166 void
1167 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1168 {
1169         struct lnet_msg *msg;
1170         struct lnet_msg *tmp;
1171
1172         lnet_net_unlock(cpt);
1173
1174         list_for_each_entry_safe(msg, tmp, list, msg_list) {
1175                 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1176                              0, 0, 0, msg->msg_hdr.payload_length);
1177                 list_del_init(&msg->msg_list);
1178                 msg->msg_no_resend = true;
1179                 msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
1180                 lnet_finalize(msg, -ECANCELED);
1181         }
1182
1183         lnet_net_lock(cpt);
1184 }
1185
1186 void
1187 lnet_return_rx_credits_locked(struct lnet_msg *msg)
1188 {
1189         struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
1190         struct lnet_peer *lp;
1191         struct lnet_ni *rxni = msg->msg_rxni;
1192         struct lnet_msg *msg2;
1193
1194         if (msg->msg_rtrcredit) {
1195                 /* give back global router credits */
1196                 struct lnet_rtrbuf *rb;
1197                 struct lnet_rtrbufpool *rbp;
1198
1199                 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1200                  * there until it gets one allocated, or aborts the wait
1201                  * itself */
1202                 LASSERT(msg->msg_kiov != NULL);
1203
1204                 rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
1205                 rbp = rb->rb_pool;
1206
1207                 msg->msg_kiov = NULL;
1208                 msg->msg_rtrcredit = 0;
1209
1210                 LASSERT(rbp == lnet_msg2bufpool(msg));
1211
1212                 LASSERT((rbp->rbp_credits > 0) ==
1213                         !list_empty(&rbp->rbp_bufs));
1214
1215                 /* If routing is now turned off, we just drop this buffer and
1216                  * don't bother trying to return credits.  */
1217                 if (!the_lnet.ln_routing) {
1218                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1219                         goto routing_off;
1220                 }
1221
1222                 /* It is possible that a user has lowered the desired number of
1223                  * buffers in this pool.  Make sure we never put back
1224                  * more buffers than the stated number. */
1225                 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1226                         /* Discard this buffer so we don't have too
1227                          * many. */
1228                         lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1229                         rbp->rbp_nbuffers--;
1230                 } else {
1231                         list_add(&rb->rb_list, &rbp->rbp_bufs);
1232                         rbp->rbp_credits++;
1233                         if (rbp->rbp_credits <= 0)
1234                                 lnet_schedule_blocked_locked(rbp);
1235                 }
1236         }
1237
1238 routing_off:
1239         if (msg->msg_peerrtrcredit) {
1240                 LASSERT(rxpeerni);
1241                 LASSERT(rxpeerni->lpni_peer_net);
1242                 LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
1243
1244                 /* give back peer router credits */
1245                 msg->msg_peerrtrcredit = 0;
1246
1247                 spin_lock(&rxpeerni->lpni_lock);
1248                 rxpeerni->lpni_rtrcredits++;
1249                 spin_unlock(&rxpeerni->lpni_lock);
1250
1251                 lp = rxpeerni->lpni_peer_net->lpn_peer;
1252                 spin_lock(&lp->lp_lock);
1253
1254                 /* drop all messages which are queued to be routed on that
1255                  * peer. */
1256                 if (!the_lnet.ln_routing) {
1257                         LIST_HEAD(drop);
1258                         list_splice_init(&lp->lp_rtrq, &drop);
1259                         spin_unlock(&lp->lp_lock);
1260                         lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1261                 } else if (!list_empty(&lp->lp_rtrq)) {
1262                         int msg2_cpt;
1263
1264                         msg2 = list_entry(lp->lp_rtrq.next,
1265                                           struct lnet_msg, msg_list);
1266                         list_del(&msg2->msg_list);
1267                         msg2_cpt = msg2->msg_rx_cpt;
1268                         spin_unlock(&lp->lp_lock);
1269                         /*
1270                          * messages on the lp_rtrq can be from any NID in
1271                          * the peer, which means they might have different
1272                          * cpts. We need to make sure we lock the right
1273                          * one.
1274                          */
1275                         if (msg2_cpt != msg->msg_rx_cpt) {
1276                                 lnet_net_unlock(msg->msg_rx_cpt);
1277                                 lnet_net_lock(msg2_cpt);
1278                         }
1279                         (void) lnet_post_routed_recv_locked(msg2, 1);
1280                         if (msg2_cpt != msg->msg_rx_cpt) {
1281                                 lnet_net_unlock(msg2_cpt);
1282                                 lnet_net_lock(msg->msg_rx_cpt);
1283                         }
1284                 } else {
1285                         spin_unlock(&lp->lp_lock);
1286                 }
1287         }
1288         if (rxni != NULL) {
1289                 msg->msg_rxni = NULL;
1290                 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1291         }
1292         if (rxpeerni != NULL) {
1293                 msg->msg_rxpeer = NULL;
1294                 lnet_peer_ni_decref_locked(rxpeerni);
1295         }
1296 }
1297
1298 static struct lnet_peer_ni *
1299 lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid,
1300                     struct lnet_peer *peer,
1301                     struct lnet_peer_ni *best_lpni,
1302                     struct lnet_peer_net *peer_net)
1303 {
1304         /*
1305          * Look at the peer NIs for the destination peer that connect
1306          * to the chosen net. If a peer_ni is preferred when using the
1307          * best_ni to communicate, we use that one. If there is no
1308          * preferred peer_ni, or there are multiple preferred peer_ni,
1309          * the available transmit credits are used. If the transmit
1310          * credits are equal, we round-robin over the peer_ni.
1311          */
1312         struct lnet_peer_ni *lpni = NULL;
1313         int best_lpni_credits = (best_lpni) ? best_lpni->lpni_txcredits :
1314                 INT_MIN;
1315         int best_lpni_healthv = (best_lpni) ?
1316                 atomic_read(&best_lpni->lpni_healthv) : 0;
1317         bool best_lpni_is_preferred = false;
1318         bool lpni_is_preferred;
1319         int lpni_healthv;
1320         __u32 lpni_sel_prio;
1321         __u32 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1322
1323         while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1324                 /*
1325                  * if the best_ni we've chosen aleady has this lpni
1326                  * preferred, then let's use it
1327                  */
1328                 if (best_ni) {
1329                         lpni_is_preferred = lnet_peer_is_pref_nid_locked(
1330                                 lpni, &best_ni->ni_nid);
1331                         CDEBUG(D_NET, "%s lpni_is_preferred = %d\n",
1332                                libcfs_nidstr(&best_ni->ni_nid),
1333                                lpni_is_preferred);
1334                 } else {
1335                         lpni_is_preferred = false;
1336                 }
1337
1338                 lpni_healthv = atomic_read(&lpni->lpni_healthv);
1339                 lpni_sel_prio = lpni->lpni_sel_priority;
1340
1341                 if (best_lpni)
1342                         CDEBUG(D_NET, "n:[%s, %s] h:[%d, %d] p:[%d, %d] c:[%d, %d] s:[%d, %d]\n",
1343                                 libcfs_nidstr(&lpni->lpni_nid),
1344                                 libcfs_nidstr(&best_lpni->lpni_nid),
1345                                 lpni_healthv, best_lpni_healthv,
1346                                 lpni_sel_prio, best_sel_prio,
1347                                 lpni->lpni_txcredits, best_lpni_credits,
1348                                 lpni->lpni_seq, best_lpni->lpni_seq);
1349                 else
1350                         goto select_lpni;
1351
1352                 /* pick the healthiest peer ni */
1353                 if (lpni_healthv < best_lpni_healthv)
1354                         continue;
1355                 else if (lpni_healthv > best_lpni_healthv) {
1356                         if (best_lpni_is_preferred)
1357                                 best_lpni_is_preferred = false;
1358                         goto select_lpni;
1359                 }
1360
1361                 if (lpni_sel_prio > best_sel_prio)
1362                         continue;
1363                 else if (lpni_sel_prio < best_sel_prio) {
1364                         if (best_lpni_is_preferred)
1365                                 best_lpni_is_preferred = false;
1366                         goto select_lpni;
1367                 }
1368
1369                 /* if this is a preferred peer use it */
1370                 if (!best_lpni_is_preferred && lpni_is_preferred) {
1371                         best_lpni_is_preferred = true;
1372                         goto select_lpni;
1373                 } else if (best_lpni_is_preferred && !lpni_is_preferred) {
1374                         /* this is not the preferred peer so let's ignore
1375                          * it.
1376                          */
1377                         continue;
1378                 }
1379
1380                 if (lpni->lpni_txcredits < best_lpni_credits)
1381                         /* We already have a peer that has more credits
1382                          * available than this one. No need to consider
1383                          * this peer further.
1384                          */
1385                         continue;
1386                 else if (lpni->lpni_txcredits > best_lpni_credits)
1387                         goto select_lpni;
1388
1389                 /* The best peer found so far and the current peer
1390                  * have the same number of available credits let's
1391                  * make sure to select between them using Round Robin
1392                  */
1393                 if (best_lpni && (best_lpni->lpni_seq <= lpni->lpni_seq))
1394                         continue;
1395 select_lpni:
1396                 best_lpni_is_preferred = lpni_is_preferred;
1397                 best_lpni_healthv = lpni_healthv;
1398                 best_sel_prio = lpni_sel_prio;
1399                 best_lpni = lpni;
1400                 best_lpni_credits = lpni->lpni_txcredits;
1401         }
1402
1403         /* if we still can't find a peer ni then we can't reach it */
1404         if (!best_lpni) {
1405                 __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1406                         LNET_NIDNET(dst_nid);
1407                 CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
1408                                 libcfs_net2str(net_id));
1409                 return NULL;
1410         }
1411
1412         CDEBUG(D_NET, "sd_best_lpni = %s\n",
1413                libcfs_nidstr(&best_lpni->lpni_nid));
1414
1415         return best_lpni;
1416 }
1417
1418 /*
1419  * Prerequisite: the best_ni should already be set in the sd
1420  * Find the best lpni.
1421  * If the net id is provided then restrict lpni selection on
1422  * that particular net.
1423  * Otherwise find any reachable lpni. When dealing with an MR
1424  * gateway and it has multiple lpnis which we can use
1425  * we want to select the best one from the list of reachable
1426  * ones.
1427  */
1428 static inline struct lnet_peer_ni *
1429 lnet_find_best_lpni(struct lnet_ni *lni, lnet_nid_t dst_nid,
1430                     struct lnet_peer *peer, __u32 net_id)
1431 {
1432         struct lnet_peer_net *peer_net;
1433
1434         /* find the best_lpni on any local network */
1435         if (net_id == LNET_NET_ANY) {
1436                 struct lnet_peer_ni *best_lpni = NULL;
1437                 struct lnet_peer_net *lpn;
1438                 list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
1439                         /* no net specified find any reachable peer ni */
1440                         if (!lnet_islocalnet_locked(lpn->lpn_net_id))
1441                                 continue;
1442                         best_lpni = lnet_select_peer_ni(lni, dst_nid, peer,
1443                                                         best_lpni, lpn);
1444                 }
1445
1446                 return best_lpni;
1447         }
1448         /* restrict on the specified net */
1449         peer_net = lnet_peer_get_net_locked(peer, net_id);
1450         if (peer_net)
1451                 return lnet_select_peer_ni(lni, dst_nid, peer, NULL, peer_net);
1452
1453         return NULL;
1454 }
1455
1456 static int
1457 lnet_compare_gw_lpnis(struct lnet_peer_ni *lpni1, struct lnet_peer_ni *lpni2)
1458 {
1459         if (lpni1->lpni_txqnob < lpni2->lpni_txqnob)
1460                 return 1;
1461
1462         if (lpni1->lpni_txqnob > lpni2->lpni_txqnob)
1463                 return -1;
1464
1465         if (lpni1->lpni_txcredits > lpni2->lpni_txcredits)
1466                 return 1;
1467
1468         if (lpni1->lpni_txcredits < lpni2->lpni_txcredits)
1469                 return -1;
1470
1471         return 0;
1472 }
1473
1474 /* Compare route priorities and hop counts */
1475 static int
1476 lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
1477 {
1478         int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1479         int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1480
1481         if (r1->lr_priority < r2->lr_priority)
1482                 return 1;
1483
1484         if (r1->lr_priority > r2->lr_priority)
1485                 return -1;
1486
1487         if (r1_hops < r2_hops)
1488                 return 1;
1489
1490         if (r1_hops > r2_hops)
1491                 return -1;
1492
1493         return 0;
1494 }
1495
1496 static struct lnet_route *
1497 lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net,
1498                        struct lnet_peer_ni *remote_lpni,
1499                        struct lnet_route **prev_route,
1500                        struct lnet_peer_ni **gwni)
1501 {
1502         struct lnet_peer_ni *lpni, *best_gw_ni = NULL;
1503         struct lnet_route *best_route;
1504         struct lnet_route *last_route;
1505         struct lnet_route *route;
1506         int rc;
1507         bool best_rte_is_preferred = false;
1508         struct lnet_nid *gw_pnid;
1509
1510         CDEBUG(D_NET, "Looking up a route to %s, from %s\n",
1511                libcfs_net2str(rnet->lrn_net), libcfs_net2str(src_net));
1512
1513         best_route = last_route = NULL;
1514         list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1515                 if (!lnet_is_route_alive(route))
1516                         continue;
1517                 gw_pnid = &route->lr_gateway->lp_primary_nid;
1518
1519                 /* no protection on below fields, but it's harmless */
1520                 if (last_route && (last_route->lr_seq - route->lr_seq < 0))
1521                         last_route = route;
1522
1523                 /* if the best route found is in the preferred list then
1524                  * tag it as preferred and use it later on. But if we
1525                  * didn't find any routes which are on the preferred list
1526                  * then just use the best route possible.
1527                  */
1528                 rc = lnet_peer_is_pref_rtr_locked(remote_lpni, gw_pnid);
1529
1530                 if (!best_route || (rc && !best_rte_is_preferred)) {
1531                         /* Restrict the selection of the router NI on the
1532                          * src_net provided. If the src_net is LNET_NID_ANY,
1533                          * then select the best interface available.
1534                          */
1535                         lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1536                                                    route->lr_gateway,
1537                                                    src_net);
1538                         if (!lpni) {
1539                                 CDEBUG(D_NET,
1540                                        "Gateway %s does not have a peer NI on net %s\n",
1541                                        libcfs_nidstr(gw_pnid),
1542                                        libcfs_net2str(src_net));
1543                                 continue;
1544                         }
1545                 }
1546
1547                 if (rc && !best_rte_is_preferred) {
1548                         /* This is the first preferred route we found,
1549                          * so it beats any route found previously
1550                          */
1551                         best_route = route;
1552                         if (!last_route)
1553                                 last_route = route;
1554                         best_gw_ni = lpni;
1555                         best_rte_is_preferred = true;
1556                         CDEBUG(D_NET, "preferred gw = %s\n",
1557                                libcfs_nidstr(gw_pnid));
1558                         continue;
1559                 } else if ((!rc) && best_rte_is_preferred)
1560                         /* The best route we found so far is in the preferred
1561                          * list, so it beats any non-preferred route
1562                          */
1563                         continue;
1564
1565                 if (!best_route) {
1566                         best_route = last_route = route;
1567                         best_gw_ni = lpni;
1568                         continue;
1569                 }
1570
1571                 rc = lnet_compare_routes(route, best_route);
1572                 if (rc == -1)
1573                         continue;
1574
1575                 /* Restrict the selection of the router NI on the
1576                  * src_net provided. If the src_net is LNET_NID_ANY,
1577                  * then select the best interface available.
1578                  */
1579                 lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY,
1580                                            route->lr_gateway,
1581                                            src_net);
1582                 if (!lpni) {
1583                         CDEBUG(D_NET,
1584                                "Gateway %s does not have a peer NI on net %s\n",
1585                                libcfs_nidstr(gw_pnid),
1586                                libcfs_net2str(src_net));
1587                         continue;
1588                 }
1589
1590                 if (rc == 1) {
1591                         best_route = route;
1592                         best_gw_ni = lpni;
1593                         continue;
1594                 }
1595
1596                 rc = lnet_compare_gw_lpnis(lpni, best_gw_ni);
1597                 if (rc == -1)
1598                         continue;
1599
1600                 if (rc == 1 || route->lr_seq <= best_route->lr_seq) {
1601                         best_route = route;
1602                         best_gw_ni = lpni;
1603                         continue;
1604                 }
1605         }
1606
1607         *prev_route = last_route;
1608         *gwni = best_gw_ni;
1609
1610         return best_route;
1611 }
1612
1613 static inline unsigned int
1614 lnet_dev_prio_of_md(struct lnet_ni *ni, unsigned int dev_idx)
1615 {
1616         if (dev_idx == UINT_MAX)
1617                 return UINT_MAX;
1618
1619         if (!ni || !ni->ni_net || !ni->ni_net->net_lnd ||
1620             !ni->ni_net->net_lnd->lnd_get_dev_prio)
1621                 return UINT_MAX;
1622
1623         return ni->ni_net->net_lnd->lnd_get_dev_prio(ni, dev_idx);
1624 }
1625
1626 static struct lnet_ni *
1627 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni,
1628                  struct lnet_peer *peer, struct lnet_peer_net *peer_net,
1629                  struct lnet_msg *msg, int md_cpt)
1630 {
1631         struct lnet_libmd *md = msg->msg_md;
1632         unsigned int offset = msg->msg_offset;
1633         unsigned int shortest_distance;
1634         struct lnet_ni *ni = NULL;
1635         int best_credits;
1636         int best_healthv;
1637         __u32 best_sel_prio;
1638         unsigned int best_dev_prio;
1639         unsigned int dev_idx = UINT_MAX;
1640         struct page *page = lnet_get_first_page(md, offset);
1641         msg->msg_rdma_force = lnet_is_rdma_only_page(page);
1642
1643         if (msg->msg_rdma_force)
1644                 dev_idx = lnet_get_dev_idx(page);
1645
1646         /*
1647          * If there is no peer_ni that we can send to on this network,
1648          * then there is no point in looking for a new best_ni here.
1649         */
1650         if (!lnet_get_next_peer_ni_locked(peer, peer_net, NULL))
1651                 return best_ni;
1652
1653         if (best_ni == NULL) {
1654                 best_sel_prio = LNET_MAX_SELECTION_PRIORITY;
1655                 shortest_distance = UINT_MAX;
1656                 best_dev_prio = UINT_MAX;
1657                 best_credits = INT_MIN;
1658                 best_healthv = 0;
1659         } else {
1660                 best_dev_prio = lnet_dev_prio_of_md(best_ni, dev_idx);
1661                 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1662                                                      best_ni->ni_dev_cpt);
1663                 best_credits = atomic_read(&best_ni->ni_tx_credits);
1664                 best_healthv = atomic_read(&best_ni->ni_healthv);
1665                 best_sel_prio = best_ni->ni_sel_priority;
1666         }
1667
1668         while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1669                 unsigned int distance;
1670                 int ni_credits;
1671                 int ni_healthv;
1672                 int ni_fatal;
1673                 __u32 ni_sel_prio;
1674                 unsigned int ni_dev_prio;
1675
1676                 ni_credits = atomic_read(&ni->ni_tx_credits);
1677                 ni_healthv = atomic_read(&ni->ni_healthv);
1678                 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
1679                 ni_sel_prio = ni->ni_sel_priority;
1680
1681                 /*
1682                  * calculate the distance from the CPT on which
1683                  * the message memory is allocated to the CPT of
1684                  * the NI's physical device
1685                  */
1686                 distance = cfs_cpt_distance(lnet_cpt_table(),
1687                                             md_cpt,
1688                                             ni->ni_dev_cpt);
1689
1690                 ni_dev_prio = lnet_dev_prio_of_md(ni, dev_idx);
1691
1692                 /*
1693                  * All distances smaller than the NUMA range
1694                  * are treated equally.
1695                  */
1696                 if (distance < lnet_numa_range)
1697                         distance = lnet_numa_range;
1698
1699                 /*
1700                  * Select on health, selection policy, direct dma prio,
1701                  * shorter distance, available credits, then round-robin.
1702                  */
1703                 if (ni_fatal)
1704                         continue;
1705
1706                 if (best_ni)
1707                         CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d, p:%u, g:%u] with best_ni %s [c:%d, d:%d, s:%d, p:%u, g:%u]\n",
1708                                libcfs_nidstr(&ni->ni_nid), ni_credits, distance,
1709                                ni->ni_seq, ni_sel_prio, ni_dev_prio,
1710                                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid)
1711                                : "not selected", best_credits, shortest_distance,
1712                                (best_ni) ? best_ni->ni_seq : 0,
1713                                best_sel_prio, best_dev_prio);
1714                 else
1715                         goto select_ni;
1716
1717                 if (ni_healthv < best_healthv)
1718                         continue;
1719                 else if (ni_healthv > best_healthv)
1720                         goto select_ni;
1721
1722                 if (ni_sel_prio > best_sel_prio)
1723                         continue;
1724                 else if (ni_sel_prio < best_sel_prio)
1725                         goto select_ni;
1726
1727                 if (ni_dev_prio > best_dev_prio)
1728                         continue;
1729                 else if (ni_dev_prio < best_dev_prio)
1730                         goto select_ni;
1731
1732                 if (distance > shortest_distance)
1733                         continue;
1734                 else if (distance < shortest_distance)
1735                         goto select_ni;
1736
1737                 if (ni_credits < best_credits)
1738                         continue;
1739                 else if (ni_credits > best_credits)
1740                         goto select_ni;
1741
1742                 if (best_ni && best_ni->ni_seq <= ni->ni_seq)
1743                         continue;
1744
1745 select_ni:
1746                 best_sel_prio = ni_sel_prio;
1747                 best_dev_prio = ni_dev_prio;
1748                 shortest_distance = distance;
1749                 best_healthv = ni_healthv;
1750                 best_ni = ni;
1751                 best_credits = ni_credits;
1752         }
1753
1754         CDEBUG(D_NET, "selected best_ni %s\n",
1755                (best_ni) ? libcfs_nidstr(&best_ni->ni_nid) : "no selection");
1756
1757         return best_ni;
1758 }
1759
1760 /*
1761  * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1762  * because such traffic is required to perform discovery. We therefore
1763  * exclude all GET and PUT on that portal. We also exclude all ACK and
1764  * REPLY traffic, but that is because the portal is not tracked in the
1765  * message structure for these message types. We could restrict this
1766  * further by also checking for LNET_PROTO_PING_MATCHBITS.
1767  */
1768 static bool
1769 lnet_msg_discovery(struct lnet_msg *msg)
1770 {
1771         if (msg->msg_type == LNET_MSG_PUT) {
1772                 if (msg->msg_hdr.msg.put.ptl_index != LNET_RESERVED_PORTAL)
1773                         return true;
1774         } else if (msg->msg_type == LNET_MSG_GET) {
1775                 if (msg->msg_hdr.msg.get.ptl_index != LNET_RESERVED_PORTAL)
1776                         return true;
1777         }
1778         return false;
1779 }
1780
1781 #define SRC_SPEC        0x0001
1782 #define SRC_ANY         0x0002
1783 #define LOCAL_DST       0x0004
1784 #define REMOTE_DST      0x0008
1785 #define MR_DST          0x0010
1786 #define NMR_DST         0x0020
1787 #define SND_RESP        0x0040
1788
1789 /* The following to defines are used for return codes */
1790 #define REPEAT_SEND     0x1000
1791 #define PASS_THROUGH    0x2000
1792
1793 /* The different cases lnet_select pathway needs to handle */
1794 #define SRC_SPEC_LOCAL_MR_DST   (SRC_SPEC | LOCAL_DST | MR_DST)
1795 #define SRC_SPEC_ROUTER_MR_DST  (SRC_SPEC | REMOTE_DST | MR_DST)
1796 #define SRC_SPEC_LOCAL_NMR_DST  (SRC_SPEC | LOCAL_DST | NMR_DST)
1797 #define SRC_SPEC_ROUTER_NMR_DST (SRC_SPEC | REMOTE_DST | NMR_DST)
1798 #define SRC_ANY_LOCAL_MR_DST    (SRC_ANY | LOCAL_DST | MR_DST)
1799 #define SRC_ANY_ROUTER_MR_DST   (SRC_ANY | REMOTE_DST | MR_DST)
1800 #define SRC_ANY_LOCAL_NMR_DST   (SRC_ANY | LOCAL_DST | NMR_DST)
1801 #define SRC_ANY_ROUTER_NMR_DST  (SRC_ANY | REMOTE_DST | NMR_DST)
1802
1803 static int
1804 lnet_handle_lo_send(struct lnet_send_data *sd)
1805 {
1806         struct lnet_msg *msg = sd->sd_msg;
1807         int cpt = sd->sd_cpt;
1808
1809         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1810                 return -ESHUTDOWN;
1811
1812         /* No send credit hassles with LOLND */
1813         lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
1814         msg->msg_hdr.dest_nid =
1815                 cpu_to_le64(lnet_nid_to_nid4(&the_lnet.ln_loni->ni_nid));
1816         if (!msg->msg_routing)
1817                 msg->msg_hdr.src_nid =
1818                         cpu_to_le64(lnet_nid_to_nid4(&the_lnet.ln_loni->ni_nid));
1819         msg->msg_target.nid = lnet_nid_to_nid4(&the_lnet.ln_loni->ni_nid);
1820         lnet_msg_commit(msg, cpt);
1821         msg->msg_txni = the_lnet.ln_loni;
1822
1823         return LNET_CREDIT_OK;
1824 }
1825
1826 static int
1827 lnet_handle_send(struct lnet_send_data *sd)
1828 {
1829         struct lnet_ni *best_ni = sd->sd_best_ni;
1830         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
1831         struct lnet_peer_ni *final_dst_lpni = sd->sd_final_dst_lpni;
1832         struct lnet_msg *msg = sd->sd_msg;
1833         int cpt2;
1834         __u32 send_case = sd->sd_send_case;
1835         int rc;
1836         __u32 routing = send_case & REMOTE_DST;
1837          struct lnet_rsp_tracker *rspt;
1838
1839         /* Increment sequence number of the selected peer, peer net,
1840          * local ni and local net so that we pick the next ones
1841          * in Round Robin.
1842          */
1843         best_lpni->lpni_peer_net->lpn_seq++;
1844         best_lpni->lpni_seq = best_lpni->lpni_peer_net->lpn_seq;
1845         best_ni->ni_net->net_seq++;
1846         best_ni->ni_seq = best_ni->ni_net->net_seq;
1847
1848         CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n",
1849                libcfs_nidstr(&best_ni->ni_nid),
1850                best_ni->ni_seq, best_ni->ni_net->net_seq,
1851                atomic_read(&best_ni->ni_tx_credits),
1852                best_ni->ni_sel_priority,
1853                libcfs_nidstr(&best_lpni->lpni_nid),
1854                best_lpni->lpni_seq, best_lpni->lpni_peer_net->lpn_seq,
1855                best_lpni->lpni_txcredits,
1856                best_lpni->lpni_sel_priority);
1857
1858         /*
1859          * grab a reference on the peer_ni so it sticks around even if
1860          * we need to drop and relock the lnet_net_lock below.
1861          */
1862         lnet_peer_ni_addref_locked(best_lpni);
1863
1864         /*
1865          * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1866          * message. This ensures that we get a CPT that is correct for
1867          * the NI when the NI has been restricted to a subset of all CPTs.
1868          * If the selected CPT differs from the one currently locked, we
1869          * must unlock and relock the lnet_net_lock(), and then check whether
1870          * the configuration has changed. We don't have a hold on the best_ni
1871          * yet, and it may have vanished.
1872          */
1873         cpt2 = lnet_cpt_of_nid_locked(&best_lpni->lpni_nid, best_ni);
1874         if (sd->sd_cpt != cpt2) {
1875                 __u32 seq = lnet_get_dlc_seq_locked();
1876                 lnet_net_unlock(sd->sd_cpt);
1877                 sd->sd_cpt = cpt2;
1878                 lnet_net_lock(sd->sd_cpt);
1879                 if (seq != lnet_get_dlc_seq_locked()) {
1880                         lnet_peer_ni_decref_locked(best_lpni);
1881                         return REPEAT_SEND;
1882                 }
1883         }
1884
1885         /*
1886          * store the best_lpni in the message right away to avoid having
1887          * to do the same operation under different conditions
1888          */
1889         msg->msg_txpeer = best_lpni;
1890         msg->msg_txni = best_ni;
1891
1892         /*
1893          * grab a reference for the best_ni since now it's in use in this
1894          * send. The reference will be dropped in lnet_finalize()
1895          */
1896         lnet_ni_addref_locked(msg->msg_txni, sd->sd_cpt);
1897
1898         /*
1899          * Always set the target.nid to the best peer picked. Either the
1900          * NID will be one of the peer NIDs selected, or the same NID as
1901          * what was originally set in the target or it will be the NID of
1902          * a router if this message should be routed
1903          */
1904         /* FIXME handle large-addr nids */
1905         msg->msg_target.nid = lnet_nid_to_nid4(&msg->msg_txpeer->lpni_nid);
1906
1907         /*
1908          * lnet_msg_commit assigns the correct cpt to the message, which
1909          * is used to decrement the correct refcount on the ni when it's
1910          * time to return the credits
1911          */
1912         lnet_msg_commit(msg, sd->sd_cpt);
1913
1914         /*
1915          * If we are routing the message then we keep the src_nid that was
1916          * set by the originator. If we are not routing then we are the
1917          * originator and set it here.
1918          */
1919         if (!msg->msg_routing)
1920                 msg->msg_hdr.src_nid =
1921                         cpu_to_le64(lnet_nid_to_nid4(&msg->msg_txni->ni_nid));
1922
1923         if (routing) {
1924                 msg->msg_target_is_router = 1;
1925                 msg->msg_target.pid = LNET_PID_LUSTRE;
1926                 /*
1927                  * since we're routing we want to ensure that the
1928                  * msg_hdr.dest_nid is set to the final destination. When
1929                  * the router receives this message it knows how to route
1930                  * it.
1931                  *
1932                  * final_dst_lpni is set at the beginning of the
1933                  * lnet_select_pathway() function and is never changed.
1934                  * It's safe to use it here.
1935                  */
1936                 /* FIXME handle large-addr nid */
1937                 msg->msg_hdr.dest_nid =
1938                         cpu_to_le64(lnet_nid_to_nid4(&final_dst_lpni->lpni_nid));
1939         } else {
1940                 /*
1941                  * if we're not routing set the dest_nid to the best peer
1942                  * ni NID that we picked earlier in the algorithm.
1943                  */
1944                 msg->msg_hdr.dest_nid =
1945                         cpu_to_le64(lnet_nid_to_nid4(&msg->msg_txpeer->lpni_nid));
1946         }
1947
1948         /*
1949          * if we have response tracker block update it with the next hop
1950          * nid
1951          */
1952         if (msg->msg_md) {
1953                 rspt = msg->msg_md->md_rspt_ptr;
1954                 if (rspt) {
1955                         rspt->rspt_next_hop_nid =
1956                                 msg->msg_txpeer->lpni_nid;
1957                         CDEBUG(D_NET, "rspt_next_hop_nid = %s\n",
1958                                libcfs_nidstr(&rspt->rspt_next_hop_nid));
1959                 }
1960         }
1961
1962         rc = lnet_post_send_locked(msg, 0);
1963
1964         if (!rc)
1965                 CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
1966                        libcfs_nid2str(msg->msg_hdr.src_nid),
1967                        libcfs_nidstr(&msg->msg_txni->ni_nid),
1968                        libcfs_nid2str(sd->sd_src_nid),
1969                        libcfs_nid2str(msg->msg_hdr.dest_nid),
1970                        libcfs_nid2str(sd->sd_dst_nid),
1971                        libcfs_nidstr(&msg->msg_txpeer->lpni_nid),
1972                        libcfs_nid2str(sd->sd_rtr_nid),
1973                        lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count);
1974
1975         return rc;
1976 }
1977
1978 static inline void
1979 lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni,
1980                          struct lnet_msg *msg)
1981 {
1982         if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) &&
1983             !lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) {
1984                 CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
1985                        libcfs_nidstr(&lni->ni_nid),
1986                        libcfs_nidstr(&lpni->lpni_nid));
1987                 lnet_peer_ni_set_non_mr_pref_nid(lpni, &lni->ni_nid);
1988         }
1989 }
1990
1991 /*
1992  * Source Specified
1993  * Local Destination
1994  * non-mr peer
1995  *
1996  * use the source and destination NIDs as the pathway
1997  */
1998 static int
1999 lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
2000 {
2001         /* the destination lpni is set before we get here. */
2002
2003         /* find local NI */
2004         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
2005         if (!sd->sd_best_ni) {
2006                 CERROR("Can't send to %s: src %s is not a "
2007                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
2008                                 libcfs_nid2str(sd->sd_src_nid));
2009                 return -EINVAL;
2010         }
2011
2012         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2013
2014         return lnet_handle_send(sd);
2015 }
2016
2017 /*
2018  * Source Specified
2019  * Local Destination
2020  * MR Peer
2021  *
2022  * Don't run the selection algorithm on the peer NIs. By specifying the
2023  * local NID, we're also saying that we should always use the destination NID
2024  * provided. This handles the case where we should be using the same
2025  * destination NID for the all the messages which belong to the same RPC
2026  * request.
2027  */
2028 static int
2029 lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
2030 {
2031         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
2032         if (!sd->sd_best_ni) {
2033                 CERROR("Can't send to %s: src %s is not a "
2034                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
2035                                 libcfs_nid2str(sd->sd_src_nid));
2036                 return -EINVAL;
2037         }
2038
2039         if (sd->sd_best_lpni &&
2040             nid_same(&sd->sd_best_lpni->lpni_nid,
2041                       &the_lnet.ln_loni->ni_nid))
2042                 return lnet_handle_lo_send(sd);
2043         else if (sd->sd_best_lpni)
2044                 return lnet_handle_send(sd);
2045
2046         CERROR("can't send to %s. no NI on %s\n",
2047                libcfs_nid2str(sd->sd_dst_nid),
2048                libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
2049
2050         return -EHOSTUNREACH;
2051 }
2052
2053 struct lnet_ni *
2054 lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
2055                               struct lnet_peer *peer,
2056                               struct lnet_peer_net *peer_net,
2057                               struct lnet_msg *msg,
2058                               int cpt)
2059 {
2060         struct lnet_net *local_net;
2061         struct lnet_ni *best_ni;
2062
2063         local_net = lnet_get_net_locked(peer_net->lpn_net_id);
2064         if (!local_net)
2065                 return NULL;
2066
2067         /*
2068          * Iterate through the NIs in this local Net and select
2069          * the NI to send from. The selection is determined by
2070          * these 3 criterion in the following priority:
2071          *      1. NUMA
2072          *      2. NI available credits
2073          *      3. Round Robin
2074          */
2075         best_ni = lnet_get_best_ni(local_net, cur_best_ni,
2076                                    peer, peer_net, msg, cpt);
2077
2078         return best_ni;
2079 }
2080
2081 static int
2082 lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg,
2083                              int cpt)
2084 {
2085         struct lnet_peer *peer;
2086         struct lnet_peer_ni *new_lpni;
2087         int rc;
2088
2089         lnet_peer_ni_addref_locked(lpni);
2090
2091         peer = lpni->lpni_peer_net->lpn_peer;
2092
2093         if (lnet_peer_gw_discovery(peer)) {
2094                 lnet_peer_ni_decref_locked(lpni);
2095                 return 0;
2096         }
2097
2098         if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
2099                 lnet_peer_ni_decref_locked(lpni);
2100                 return 0;
2101         }
2102
2103         rc = lnet_discover_peer_locked(lpni, cpt, false);
2104         if (rc) {
2105                 lnet_peer_ni_decref_locked(lpni);
2106                 return rc;
2107         }
2108
2109         new_lpni = lnet_find_peer_ni_locked(lnet_nid_to_nid4(&lpni->lpni_nid));
2110         if (!new_lpni) {
2111                 lnet_peer_ni_decref_locked(lpni);
2112                 return -ENOENT;
2113         }
2114
2115         peer = new_lpni->lpni_peer_net->lpn_peer;
2116         spin_lock(&peer->lp_lock);
2117         if (lpni == new_lpni && lnet_peer_is_uptodate_locked(peer)) {
2118                 /* The peer NI did not change and the peer is up to date.
2119                  * Nothing more to do.
2120                  */
2121                 spin_unlock(&peer->lp_lock);
2122                 lnet_peer_ni_decref_locked(lpni);
2123                 lnet_peer_ni_decref_locked(new_lpni);
2124                 return 0;
2125         }
2126         spin_unlock(&peer->lp_lock);
2127
2128         /* Either the peer NI changed during discovery, or the peer isn't up
2129          * to date. In both cases we want to queue the message on the
2130          * (possibly new) peer's pending queue and queue the peer for discovery
2131          */
2132         msg->msg_sending = 0;
2133         msg->msg_txpeer = NULL;
2134         lnet_net_unlock(cpt);
2135         lnet_peer_queue_message(peer, msg);
2136         lnet_net_lock(cpt);
2137
2138         lnet_peer_ni_decref_locked(lpni);
2139         lnet_peer_ni_decref_locked(new_lpni);
2140
2141         CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
2142                msg, libcfs_nidstr(&peer->lp_primary_nid));
2143
2144         return LNET_DC_WAIT;
2145 }
2146
2147 static int
2148 lnet_handle_find_routed_path(struct lnet_send_data *sd,
2149                              lnet_nid_t dst_nid,
2150                              struct lnet_peer_ni **gw_lpni,
2151                              struct lnet_peer **gw_peer)
2152 {
2153         int rc;
2154         struct lnet_peer *gw;
2155         struct lnet_peer *lp;
2156         struct lnet_peer_net *lpn;
2157         struct lnet_peer_net *best_lpn = NULL;
2158         struct lnet_remotenet *rnet, *best_rnet = NULL;
2159         struct lnet_route *best_route = NULL;
2160         struct lnet_route *last_route = NULL;
2161         struct lnet_peer_ni *lpni = NULL;
2162         struct lnet_peer_ni *gwni = NULL;
2163         bool route_found = false;
2164         lnet_nid_t src_nid = (sd->sd_src_nid != LNET_NID_ANY) ? sd->sd_src_nid :
2165                 (sd->sd_best_ni != NULL)
2166                 ? lnet_nid_to_nid4(&sd->sd_best_ni->ni_nid)
2167                 : LNET_NID_ANY;
2168         int best_lpn_healthv = 0;
2169         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2170
2171         CDEBUG(D_NET, "using src nid %s for route restriction\n",
2172                libcfs_nid2str(src_nid));
2173
2174         /* If a router nid was specified then we are replying to a GET or
2175          * sending an ACK. In this case we use the gateway associated with the
2176          * specified router nid.
2177          */
2178         if (sd->sd_rtr_nid != LNET_NID_ANY) {
2179                 gwni = lnet_find_peer_ni_locked(sd->sd_rtr_nid);
2180                 if (gwni) {
2181                         gw = gwni->lpni_peer_net->lpn_peer;
2182                         lnet_peer_ni_decref_locked(gwni);
2183                         if (gw->lp_rtr_refcount)
2184                                 route_found = true;
2185                 } else {
2186                         CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n",
2187                                libcfs_nid2str(sd->sd_rtr_nid));
2188                 }
2189         }
2190
2191         if (!route_found) {
2192                 if (sd->sd_msg->msg_routing) {
2193                         /* If I'm routing this message then I need to find the
2194                          * next hop based on the destination NID
2195                          */
2196                         best_rnet = lnet_find_rnet_locked(LNET_NIDNET(sd->sd_dst_nid));
2197                         if (!best_rnet) {
2198                                 CERROR("Unable to route message to %s - Route table may be misconfigured\n",
2199                                        libcfs_nid2str(sd->sd_dst_nid));
2200                                 return -EHOSTUNREACH;
2201                         }
2202                 } else {
2203                         /* we've already looked up the initial lpni using
2204                          * dst_nid
2205                          */
2206                         lpni = sd->sd_best_lpni;
2207                         /* the peer tree must be in existence */
2208                         LASSERT(lpni && lpni->lpni_peer_net &&
2209                                 lpni->lpni_peer_net->lpn_peer);
2210                         lp = lpni->lpni_peer_net->lpn_peer;
2211
2212                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
2213                                 /* is this remote network reachable?  */
2214                                 rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
2215                                 if (!rnet)
2216                                         continue;
2217
2218                                 if (!best_lpn) {
2219                                         best_lpn = lpn;
2220                                         best_rnet = rnet;
2221                                 }
2222
2223                                 /* select the preferred peer net */
2224                                 if (best_lpn_healthv > lpn->lpn_healthv)
2225                                         continue;
2226                                 else if (best_lpn_healthv < lpn->lpn_healthv)
2227                                         goto use_lpn;
2228
2229                                 if (best_lpn_sel_prio < lpn->lpn_sel_priority)
2230                                         continue;
2231                                 else if (best_lpn_sel_prio > lpn->lpn_sel_priority)
2232                                         goto use_lpn;
2233
2234                                 if (best_lpn->lpn_seq <= lpn->lpn_seq)
2235                                         continue;
2236 use_lpn:
2237                                 best_lpn_healthv = lpn->lpn_healthv;
2238                                 best_lpn_sel_prio = lpn->lpn_sel_priority;
2239                                 best_lpn = lpn;
2240                                 best_rnet = rnet;
2241                         }
2242
2243                         if (!best_lpn) {
2244                                 CERROR("peer %s has no available nets\n",
2245                                        libcfs_nid2str(sd->sd_dst_nid));
2246                                 return -EHOSTUNREACH;
2247                         }
2248
2249                         sd->sd_best_lpni = lnet_find_best_lpni(sd->sd_best_ni,
2250                                                                sd->sd_dst_nid,
2251                                                                lp,
2252                                                                best_lpn->lpn_net_id);
2253                         if (!sd->sd_best_lpni) {
2254                                 CERROR("peer %s is unreachable\n",
2255                                        libcfs_nid2str(sd->sd_dst_nid));
2256                                 return -EHOSTUNREACH;
2257                         }
2258
2259                         /* We're attempting to round robin over the remote peer
2260                          * NI's so update the final destination we selected
2261                          */
2262                         sd->sd_final_dst_lpni = sd->sd_best_lpni;
2263
2264                         /* Increment the sequence number of the remote lpni so
2265                          * we can round robin over the different interfaces of
2266                          * the remote lpni
2267                          */
2268                         sd->sd_best_lpni->lpni_seq++;
2269                 }
2270
2271                 /*
2272                  * find the best route. Restrict the selection on the net of the
2273                  * local NI if we've already picked the local NI to send from.
2274                  * Otherwise, let's pick any route we can find and then find
2275                  * a local NI we can reach the route's gateway on. Any route we
2276                  * select will be reachable by virtue of the restriction we have
2277                  * when adding a route.
2278                  */
2279                 best_route = lnet_find_route_locked(best_rnet,
2280                                                     LNET_NIDNET(src_nid),
2281                                                     sd->sd_best_lpni,
2282                                                     &last_route, &gwni);
2283
2284                 if (!best_route) {
2285                         CERROR("no route to %s from %s\n",
2286                                libcfs_nid2str(dst_nid),
2287                                libcfs_nid2str(src_nid));
2288                         return -EHOSTUNREACH;
2289                 }
2290
2291                 if (!gwni) {
2292                         CERROR("Internal Error. Route expected to %s from %s\n",
2293                                libcfs_nid2str(dst_nid),
2294                                libcfs_nid2str(src_nid));
2295                         return -EFAULT;
2296                 }
2297
2298                 gw = best_route->lr_gateway;
2299                 LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
2300         }
2301
2302         /*
2303          * Discover this gateway if it hasn't already been discovered.
2304          * This means we might delay the message until discovery has
2305          * completed
2306          */
2307         rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt);
2308         if (rc)
2309                 return rc;
2310
2311         if (!sd->sd_best_ni) {
2312                 lpn = gwni->lpni_peer_net;
2313                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw, lpn,
2314                                                                sd->sd_msg,
2315                                                                sd->sd_md_cpt);
2316                 if (!sd->sd_best_ni) {
2317                         CERROR("Internal Error. Expected local ni on %s but non found: %s\n",
2318                                libcfs_net2str(lpn->lpn_net_id),
2319                                libcfs_nid2str(sd->sd_src_nid));
2320                         return -EFAULT;
2321                 }
2322         }
2323
2324         *gw_lpni = gwni;
2325         *gw_peer = gw;
2326
2327         /*
2328          * increment the sequence numbers since now we're sure we're
2329          * going to use this path
2330          */
2331         if (sd->sd_rtr_nid == LNET_NID_ANY) {
2332                 LASSERT(best_route && last_route);
2333                 best_route->lr_seq = last_route->lr_seq + 1;
2334                 if (best_lpn)
2335                         best_lpn->lpn_seq++;
2336         }
2337
2338         return 0;
2339 }
2340
2341 /*
2342  * Handle two cases:
2343  *
2344  * Case 1:
2345  *  Source specified
2346  *  Remote destination
2347  *  Non-MR destination
2348  *
2349  * Case 2:
2350  *  Source specified
2351  *  Remote destination
2352  *  MR destination
2353  *
2354  * The handling of these two cases is similar. Even though the destination
2355  * can be MR or non-MR, we'll deal directly with the router.
2356  */
2357 static int
2358 lnet_handle_spec_router_dst(struct lnet_send_data *sd)
2359 {
2360         int rc;
2361         struct lnet_peer_ni *gw_lpni = NULL;
2362         struct lnet_peer *gw_peer = NULL;
2363
2364         /* find local NI */
2365         sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
2366         if (!sd->sd_best_ni) {
2367                 CERROR("Can't send to %s: src %s is not a "
2368                        "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
2369                                 libcfs_nid2str(sd->sd_src_nid));
2370                 return -EINVAL;
2371         }
2372
2373         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2374                                      &gw_peer);
2375         if (rc)
2376                 return rc;
2377
2378         if (sd->sd_send_case & NMR_DST)
2379                 /*
2380                  * since the final destination is non-MR let's set its preferred
2381                  * NID before we send
2382                  */
2383                 lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni,
2384                                          sd->sd_msg);
2385
2386         /*
2387          * We're going to send to the gw found so let's set its
2388          * info
2389          */
2390         sd->sd_peer = gw_peer;
2391         sd->sd_best_lpni = gw_lpni;
2392
2393         return lnet_handle_send(sd);
2394 }
2395
2396 struct lnet_ni *
2397 lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
2398                                struct lnet_msg *msg, bool discovery)
2399 {
2400         struct lnet_peer_net *lpn = NULL;
2401         struct lnet_peer_net *best_lpn = NULL;
2402         struct lnet_net *net = NULL;
2403         struct lnet_net *best_net = NULL;
2404         struct lnet_ni *best_ni = NULL;
2405         int best_lpn_healthv = 0;
2406         int best_net_healthv = 0;
2407         int net_healthv;
2408         __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2409         __u32 lpn_sel_prio;
2410         __u32 best_net_sel_prio = LNET_MAX_SELECTION_PRIORITY;
2411         __u32 net_sel_prio;
2412         bool exit = false;
2413
2414         /*
2415          * The peer can have multiple interfaces, some of them can be on
2416          * the local network and others on a routed network. We should
2417          * prefer the local network. However if the local network is not
2418          * available then we need to try the routed network
2419          */
2420
2421         /* go through all the peer nets and find the best_ni */
2422         list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
2423                 /*
2424                  * The peer's list of nets can contain non-local nets. We
2425                  * want to only examine the local ones.
2426                  */
2427                 net = lnet_get_net_locked(lpn->lpn_net_id);
2428                 if (!net)
2429                         continue;
2430
2431                 lpn_sel_prio = lpn->lpn_sel_priority;
2432                 net_healthv = lnet_get_net_healthv_locked(net);
2433                 net_sel_prio = net->net_sel_priority;
2434
2435                 /*
2436                  * if this is a discovery message and lp_disc_net_id is
2437                  * specified then use that net to send the discovery on.
2438                  */
2439                 if (peer->lp_disc_net_id == lpn->lpn_net_id &&
2440                     discovery) {
2441                         exit = true;
2442                         goto select_lpn;
2443                 }
2444
2445                 if (!best_lpn)
2446                         goto select_lpn;
2447
2448                 /* always select the lpn with the best health */
2449                 if (best_lpn_healthv > lpn->lpn_healthv)
2450                         continue;
2451                 else if (best_lpn_healthv < lpn->lpn_healthv)
2452                         goto select_lpn;
2453
2454                 /* select the preferred peer and local nets */
2455                 if (best_lpn_sel_prio < lpn_sel_prio)
2456                         continue;
2457                 else if (best_lpn_sel_prio > lpn_sel_prio)
2458                         goto select_lpn;
2459
2460                 if (best_net_healthv > net_healthv)
2461                         continue;
2462                 else if (best_net_healthv < net_healthv)
2463                         goto select_lpn;
2464
2465                 if (best_net_sel_prio < net_sel_prio)
2466                         continue;
2467                 else if (best_net_sel_prio > net_sel_prio)
2468                         goto select_lpn;
2469
2470                 if (best_lpn->lpn_seq < lpn->lpn_seq)
2471                         continue;
2472                 else if (best_lpn->lpn_seq > lpn->lpn_seq)
2473                         goto select_lpn;
2474
2475                 /* round robin over the local networks */
2476                 if (best_net->net_seq <= net->net_seq)
2477                         continue;
2478
2479 select_lpn:
2480                 best_net_healthv = net_healthv;
2481                 best_net_sel_prio = net_sel_prio;
2482                 best_lpn_healthv = lpn->lpn_healthv;
2483                 best_lpn_sel_prio = lpn_sel_prio;
2484                 best_lpn = lpn;
2485                 best_net = net;
2486
2487                 if (exit)
2488                         break;
2489         }
2490
2491         if (best_lpn) {
2492                 /* Select the best NI on the same net as best_lpn chosen
2493                  * above
2494                  */
2495                 best_ni = lnet_find_best_ni_on_spec_net(NULL, peer, best_lpn,
2496                                                         msg, md_cpt);
2497         }
2498
2499         return best_ni;
2500 }
2501
2502 static struct lnet_ni *
2503 lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt)
2504 {
2505         struct lnet_ni *best_ni = NULL;
2506         struct lnet_peer_net *peer_net = lpni->lpni_peer_net;
2507         struct lnet_peer_ni *lpni_entry;
2508
2509         /*
2510          * We must use a consistent source address when sending to a
2511          * non-MR peer. However, a non-MR peer can have multiple NIDs
2512          * on multiple networks, and we may even need to talk to this
2513          * peer on multiple networks -- certain types of
2514          * load-balancing configuration do this.
2515          *
2516          * So we need to pick the NI the peer prefers for this
2517          * particular network.
2518          */
2519         LASSERT(peer_net);
2520         list_for_each_entry(lpni_entry, &peer_net->lpn_peer_nis,
2521                             lpni_peer_nis) {
2522                 if (lpni_entry->lpni_pref_nnids == 0)
2523                         continue;
2524                 LASSERT(lpni_entry->lpni_pref_nnids == 1);
2525                 best_ni = lnet_nid_to_ni_locked(&lpni_entry->lpni_pref.nid,
2526                                                 cpt);
2527                 break;
2528         }
2529
2530         return best_ni;
2531 }
2532
2533 /* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
2534 static int
2535 lnet_select_preferred_best_ni(struct lnet_send_data *sd)
2536 {
2537         struct lnet_ni *best_ni = NULL;
2538         struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
2539
2540         /*
2541          * We must use a consistent source address when sending to a
2542          * non-MR peer. However, a non-MR peer can have multiple NIDs
2543          * on multiple networks, and we may even need to talk to this
2544          * peer on multiple networks -- certain types of
2545          * load-balancing configuration do this.
2546          *
2547          * So we need to pick the NI the peer prefers for this
2548          * particular network.
2549          */
2550
2551         best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2552                                                        sd->sd_cpt);
2553
2554         /* if best_ni is still not set just pick one */
2555         if (!best_ni) {
2556                 best_ni =
2557                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2558                                                 sd->sd_best_lpni->lpni_peer_net,
2559                                                 sd->sd_msg,
2560                                                 sd->sd_md_cpt);
2561                 /* If there is no best_ni we don't have a route */
2562                 if (!best_ni) {
2563                         CERROR("no path to %s from net %s\n",
2564                                 libcfs_nidstr(&best_lpni->lpni_nid),
2565                                 libcfs_net2str(best_lpni->lpni_net->net_id));
2566                         return -EHOSTUNREACH;
2567                 }
2568         }
2569
2570         sd->sd_best_ni = best_ni;
2571
2572         /* Set preferred NI if necessary. */
2573         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2574
2575         return 0;
2576 }
2577
2578
2579 /*
2580  * Source not specified
2581  * Local destination
2582  * Non-MR Peer
2583  *
2584  * always use the same source NID for NMR peers
2585  * If we've talked to that peer before then we already have a preferred
2586  * source NI associated with it. Otherwise, we select a preferred local NI
2587  * and store it in the peer
2588  */
2589 static int
2590 lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
2591 {
2592         int rc = 0;
2593
2594         /* sd->sd_best_lpni is already set to the final destination */
2595
2596         /*
2597          * At this point we should've created the peer ni and peer. If we
2598          * can't find it, then something went wrong. Instead of assert
2599          * output a relevant message and fail the send
2600          */
2601         if (!sd->sd_best_lpni) {
2602                 CERROR("Internal fault. Unable to send msg %s to %s. "
2603                        "NID not known\n",
2604                        lnet_msgtyp2str(sd->sd_msg->msg_type),
2605                        libcfs_nid2str(sd->sd_dst_nid));
2606                 return -EFAULT;
2607         }
2608
2609         if (sd->sd_msg->msg_routing) {
2610                 /* If I'm forwarding this message then I can choose any NI
2611                  * on the destination peer net
2612                  */
2613                 sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL,
2614                                                                sd->sd_peer,
2615                                                                sd->sd_best_lpni->lpni_peer_net,
2616                                                                sd->sd_msg,
2617                                                                sd->sd_md_cpt);
2618                 if (!sd->sd_best_ni) {
2619                         CERROR("Unable to forward message to %s. No local NI available\n",
2620                                libcfs_nid2str(sd->sd_dst_nid));
2621                         rc = -EHOSTUNREACH;
2622                 }
2623         } else
2624                 rc = lnet_select_preferred_best_ni(sd);
2625
2626         if (!rc)
2627                 rc = lnet_handle_send(sd);
2628
2629         return rc;
2630 }
2631
2632 static int
2633 lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
2634 {
2635         /*
2636          * NOTE we've already handled the remote peer case. So we only
2637          * need to worry about the local case here.
2638          *
2639          * if we're sending a response, ACK or reply, we need to send it
2640          * to the destination NID given to us. At this point we already
2641          * have the peer_ni we're suppose to send to, so just find the
2642          * best_ni on the peer net and use that. Since we're sending to an
2643          * MR peer then we can just run the selection algorithm on our
2644          * local NIs and pick the best one.
2645          */
2646         if (sd->sd_send_case & SND_RESP) {
2647                 sd->sd_best_ni =
2648                   lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
2649                                                 sd->sd_best_lpni->lpni_peer_net,
2650                                                 sd->sd_msg,
2651                                                 sd->sd_md_cpt);
2652
2653                 if (!sd->sd_best_ni) {
2654                         /*
2655                          * We're not going to deal with not able to send
2656                          * a response to the provided final destination
2657                          */
2658                         CERROR("Can't send response to %s. "
2659                                "No local NI available\n",
2660                                 libcfs_nid2str(sd->sd_dst_nid));
2661                         return -EHOSTUNREACH;
2662                 }
2663
2664                 return lnet_handle_send(sd);
2665         }
2666
2667         /*
2668          * If we get here that means we're sending a fresh request, PUT or
2669          * GET, so we need to run our standard selection algorithm.
2670          * First find the best local interface that's on any of the peer's
2671          * networks.
2672          */
2673         sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
2674                                         sd->sd_md_cpt,
2675                                         sd->sd_msg,
2676                                         lnet_msg_discovery(sd->sd_msg));
2677         if (sd->sd_best_ni) {
2678                 sd->sd_best_lpni =
2679                   lnet_find_best_lpni(sd->sd_best_ni, sd->sd_dst_nid,
2680                                       sd->sd_peer,
2681                                       sd->sd_best_ni->ni_net->net_id);
2682
2683                 /*
2684                  * if we're successful in selecting a peer_ni on the local
2685                  * network, then send to it. Otherwise fall through and
2686                  * try and see if we can reach it over another routed
2687                  * network
2688                  */
2689                 if (sd->sd_best_lpni &&
2690                     nid_same(&sd->sd_best_lpni->lpni_nid,
2691                              &the_lnet.ln_loni->ni_nid)) {
2692                         /*
2693                          * in case we initially started with a routed
2694                          * destination, let's reset to local
2695                          */
2696                         sd->sd_send_case &= ~REMOTE_DST;
2697                         sd->sd_send_case |= LOCAL_DST;
2698                         return lnet_handle_lo_send(sd);
2699                 } else if (sd->sd_best_lpni) {
2700                         /*
2701                          * in case we initially started with a routed
2702                          * destination, let's reset to local
2703                          */
2704                         sd->sd_send_case &= ~REMOTE_DST;
2705                         sd->sd_send_case |= LOCAL_DST;
2706                         return lnet_handle_send(sd);
2707                 }
2708
2709                 CERROR("Internal Error. Expected to have a best_lpni: "
2710                        "%s -> %s\n",
2711                        libcfs_nid2str(sd->sd_src_nid),
2712                        libcfs_nid2str(sd->sd_dst_nid));
2713
2714                 return -EFAULT;
2715         }
2716
2717         /*
2718          * Peer doesn't have a local network. Let's see if there is
2719          * a remote network we can reach it on.
2720          */
2721         return PASS_THROUGH;
2722 }
2723
2724 /*
2725  * Case 1:
2726  *      Source NID not specified
2727  *      Local destination
2728  *      MR peer
2729  *
2730  * Case 2:
2731  *      Source NID not speified
2732  *      Remote destination
2733  *      MR peer
2734  *
2735  * In both of these cases if we're sending a response, ACK or REPLY, then
2736  * we need to send to the destination NID provided.
2737  *
2738  * In the remote case let's deal with MR routers.
2739  *
2740  */
2741
2742 static int
2743 lnet_handle_any_mr_dst(struct lnet_send_data *sd)
2744 {
2745         int rc = 0;
2746         struct lnet_peer *gw_peer = NULL;
2747         struct lnet_peer_ni *gw_lpni = NULL;
2748
2749         /*
2750          * handle sending a response to a remote peer here so we don't
2751          * have to worry about it if we hit lnet_handle_any_mr_dsta()
2752          */
2753         if (sd->sd_send_case & REMOTE_DST &&
2754             sd->sd_send_case & SND_RESP) {
2755                 struct lnet_peer_ni *gw;
2756                 struct lnet_peer *gw_peer;
2757
2758                 rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw,
2759                                                   &gw_peer);
2760                 if (rc < 0) {
2761                         CERROR("Can't send response to %s. "
2762                                "No route available\n",
2763                                 libcfs_nid2str(sd->sd_dst_nid));
2764                         return -EHOSTUNREACH;
2765                 } else if (rc > 0) {
2766                         return rc;
2767                 }
2768
2769                 sd->sd_best_lpni = gw;
2770                 sd->sd_peer = gw_peer;
2771
2772                 return lnet_handle_send(sd);
2773         }
2774
2775         /*
2776          * Even though the NID for the peer might not be on a local network,
2777          * since the peer is MR there could be other interfaces on the
2778          * local network. In that case we'd still like to prefer the local
2779          * network over the routed network. If we're unable to do that
2780          * then we select the best router among the different routed networks,
2781          * and if the router is MR then we can deal with it as such.
2782          */
2783         rc = lnet_handle_any_mr_dsta(sd);
2784         if (rc != PASS_THROUGH)
2785                 return rc;
2786
2787         /*
2788          * Now that we must route to the destination, we must consider the
2789          * MR case, where the destination has multiple interfaces, some of
2790          * which we can route to and others we do not. For this reason we
2791          * need to select the destination which we can route to and if
2792          * there are multiple, we need to round robin.
2793          */
2794         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2795                                           &gw_peer);
2796         if (rc)
2797                 return rc;
2798
2799         sd->sd_send_case &= ~LOCAL_DST;
2800         sd->sd_send_case |= REMOTE_DST;
2801
2802         sd->sd_peer = gw_peer;
2803         sd->sd_best_lpni = gw_lpni;
2804
2805         return lnet_handle_send(sd);
2806 }
2807
2808 /*
2809  * Source not specified
2810  * Remote destination
2811  * Non-MR peer
2812  *
2813  * Must send to the specified peer NID using the same source NID that
2814  * we've used before. If it's the first time to talk to that peer then
2815  * find the source NI and assign it as preferred to that peer
2816  */
2817 static int
2818 lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
2819 {
2820         int rc;
2821         struct lnet_peer_ni *gw_lpni = NULL;
2822         struct lnet_peer *gw_peer = NULL;
2823
2824         /*
2825          * Let's see if we have a preferred NI to talk to this NMR peer
2826          */
2827         sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
2828                                                               sd->sd_cpt);
2829
2830         /*
2831          * find the router and that'll find the best NI if we didn't find
2832          * it already.
2833          */
2834         rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
2835                                           &gw_peer);
2836         if (rc)
2837                 return rc;
2838
2839         /*
2840          * set the best_ni we've chosen as the preferred one for
2841          * this peer
2842          */
2843         lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
2844
2845         /* we'll be sending to the gw */
2846         sd->sd_best_lpni = gw_lpni;
2847         sd->sd_peer = gw_peer;
2848
2849         return lnet_handle_send(sd);
2850 }
2851
2852 static int
2853 lnet_handle_send_case_locked(struct lnet_send_data *sd)
2854 {
2855         /*
2856          * turn off the SND_RESP bit.
2857          * It will be checked in the case handling
2858          */
2859         __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
2860
2861         CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
2862                 (send_case & SRC_SPEC) ? "Specified: " : "ANY",
2863                 (send_case & SRC_SPEC) ? libcfs_nid2str(sd->sd_src_nid) : "",
2864                 (send_case & MR_DST) ? "MR: " : "NMR: ",
2865                 libcfs_nid2str(sd->sd_dst_nid),
2866                 (send_case & LOCAL_DST) ? "local" : "routed");
2867
2868         switch (send_case) {
2869         /*
2870          * For all cases where the source is specified, we should always
2871          * use the destination NID, whether it's an MR destination or not,
2872          * since we're continuing a series of related messages for the
2873          * same RPC
2874          */
2875         case SRC_SPEC_LOCAL_NMR_DST:
2876                 return lnet_handle_spec_local_nmr_dst(sd);
2877         case SRC_SPEC_LOCAL_MR_DST:
2878                 return lnet_handle_spec_local_mr_dst(sd);
2879         case SRC_SPEC_ROUTER_NMR_DST:
2880         case SRC_SPEC_ROUTER_MR_DST:
2881                 return lnet_handle_spec_router_dst(sd);
2882         case SRC_ANY_LOCAL_NMR_DST:
2883                 return lnet_handle_any_local_nmr_dst(sd);
2884         case SRC_ANY_LOCAL_MR_DST:
2885         case SRC_ANY_ROUTER_MR_DST:
2886                 return lnet_handle_any_mr_dst(sd);
2887         case SRC_ANY_ROUTER_NMR_DST:
2888                 return lnet_handle_any_router_nmr_dst(sd);
2889         default:
2890                 CERROR("Unknown send case\n");
2891                 return -1;
2892         }
2893 }
2894
2895 static int
2896 lnet_select_pathway(lnet_nid_t src_nid, lnet_nid_t dst_nid,
2897                     struct lnet_msg *msg, lnet_nid_t rtr_nid)
2898 {
2899         struct lnet_peer_ni *lpni;
2900         struct lnet_peer *peer;
2901         struct lnet_send_data send_data;
2902         int cpt, rc;
2903         int md_cpt;
2904         __u32 send_case = 0;
2905         bool final_hop;
2906         bool mr_forwarding_allowed;
2907
2908         memset(&send_data, 0, sizeof(send_data));
2909
2910         /*
2911          * get an initial CPT to use for locking. The idea here is not to
2912          * serialize the calls to select_pathway, so that as many
2913          * operations can run concurrently as possible. To do that we use
2914          * the CPT where this call is being executed. Later on when we
2915          * determine the CPT to use in lnet_message_commit, we switch the
2916          * lock and check if there was any configuration change.  If none,
2917          * then we proceed, if there is, then we restart the operation.
2918          */
2919         cpt = lnet_net_lock_current();
2920
2921         md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
2922         if (md_cpt == CFS_CPT_ANY)
2923                 md_cpt = cpt;
2924
2925 again:
2926
2927         /*
2928          * If we're being asked to send to the loopback interface, there
2929          * is no need to go through any selection. We can just shortcut
2930          * the entire process and send over lolnd
2931          */
2932         send_data.sd_msg = msg;
2933         send_data.sd_cpt = cpt;
2934         if (dst_nid == LNET_NID_LO_0) {
2935                 rc = lnet_handle_lo_send(&send_data);
2936                 lnet_net_unlock(cpt);
2937                 return rc;
2938         }
2939
2940         /*
2941          * find an existing peer_ni, or create one and mark it as having been
2942          * created due to network traffic. This call will create the
2943          * peer->peer_net->peer_ni tree.
2944          */
2945         lpni = lnet_nid2peerni_locked(dst_nid, LNET_NID_ANY, cpt);
2946         if (IS_ERR(lpni)) {
2947                 lnet_net_unlock(cpt);
2948                 return PTR_ERR(lpni);
2949         }
2950
2951         /*
2952          * Cache the original src_nid and rtr_nid. If we need to resend the
2953          * message then we'll need to know whether the src_nid was originally
2954          * specified for this message. If it was originally specified,
2955          * then we need to keep using the same src_nid since it's
2956          * continuing the same sequence of messages. Similarly, rtr_nid will
2957          * affect our choice of next hop.
2958          */
2959         msg->msg_src_nid_param = src_nid;
2960         msg->msg_rtr_nid_param = rtr_nid;
2961
2962         /*
2963          * If necessary, perform discovery on the peer that owns this peer_ni.
2964          * Note, this can result in the ownership of this peer_ni changing
2965          * to another peer object.
2966          */
2967         rc = lnet_initiate_peer_discovery(lpni, msg, cpt);
2968         if (rc) {
2969                 lnet_peer_ni_decref_locked(lpni);
2970                 lnet_net_unlock(cpt);
2971                 return rc;
2972         }
2973         lnet_peer_ni_decref_locked(lpni);
2974
2975         peer = lpni->lpni_peer_net->lpn_peer;
2976
2977         /*
2978          * Identify the different send cases
2979          */
2980         if (src_nid == LNET_NID_ANY)
2981                 send_case |= SRC_ANY;
2982         else
2983                 send_case |= SRC_SPEC;
2984
2985         if (lnet_get_net_locked(LNET_NIDNET(dst_nid)))
2986                 send_case |= LOCAL_DST;
2987         else
2988                 send_case |= REMOTE_DST;
2989
2990         final_hop = false;
2991         if (msg->msg_routing && (send_case & LOCAL_DST))
2992                 final_hop = true;
2993
2994         /* Determine whether to allow MR forwarding for this message.
2995          * NB: MR forwarding is allowed if the message originator and the
2996          * destination are both MR capable, and the destination lpni that was
2997          * originally chosen by the originator is unhealthy or down.
2998          * We check the MR capability of the destination further below
2999          */
3000         mr_forwarding_allowed = false;
3001         if (final_hop) {
3002                 struct lnet_peer *src_lp;
3003                 struct lnet_peer_ni *src_lpni;
3004
3005                 src_lpni = lnet_nid2peerni_locked(msg->msg_hdr.src_nid,
3006                                                   LNET_NID_ANY, cpt);
3007                 /* We don't fail the send if we hit any errors here. We'll just
3008                  * try to send it via non-multi-rail criteria
3009                  */
3010                 if (!IS_ERR(src_lpni)) {
3011                         /* Drop ref taken by lnet_nid2peerni_locked() */
3012                         lnet_peer_ni_decref_locked(src_lpni);
3013                         src_lp = lpni->lpni_peer_net->lpn_peer;
3014                         if (lnet_peer_is_multi_rail(src_lp) &&
3015                             !lnet_is_peer_ni_alive(lpni))
3016                                 mr_forwarding_allowed = true;
3017
3018                 }
3019                 CDEBUG(D_NET, "msg %p MR forwarding %s\n", msg,
3020                        mr_forwarding_allowed ? "allowed" : "not allowed");
3021         }
3022
3023         /*
3024          * Deal with the peer as NMR in the following cases:
3025          * 1. the peer is NMR
3026          * 2. We're trying to recover a specific peer NI
3027          * 3. I'm a router sending to the final destination and MR forwarding is
3028          *    not allowed for this message (as determined above).
3029          *    In this case the source of the message would've
3030          *    already selected the final destination so my job
3031          *    is to honor the selection.
3032          */
3033         if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery ||
3034             (final_hop && !mr_forwarding_allowed))
3035                 send_case |= NMR_DST;
3036         else
3037                 send_case |= MR_DST;
3038
3039         if (lnet_msg_is_response(msg))
3040                 send_case |= SND_RESP;
3041
3042         /* assign parameters to the send_data */
3043         send_data.sd_rtr_nid = rtr_nid;
3044         send_data.sd_src_nid = src_nid;
3045         send_data.sd_dst_nid = dst_nid;
3046         send_data.sd_best_lpni = lpni;
3047         /*
3048          * keep a pointer to the final destination in case we're going to
3049          * route, so we'll need to access it later
3050          */
3051         send_data.sd_final_dst_lpni = lpni;
3052         send_data.sd_peer = peer;
3053         send_data.sd_md_cpt = md_cpt;
3054         send_data.sd_send_case = send_case;
3055
3056         rc = lnet_handle_send_case_locked(&send_data);
3057
3058         /*
3059          * Update the local cpt since send_data.sd_cpt might've been
3060          * updated as a result of calling lnet_handle_send_case_locked().
3061          */
3062         cpt = send_data.sd_cpt;
3063
3064         if (rc == REPEAT_SEND)
3065                 goto again;
3066
3067         lnet_net_unlock(cpt);
3068
3069         return rc;
3070 }
3071
3072 int
3073 lnet_send(lnet_nid_t src_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid)
3074 {
3075         lnet_nid_t              dst_nid = msg->msg_target.nid;
3076         int                     rc;
3077
3078         /*
3079          * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
3080          * but we might want to use pre-determined router for ACK/REPLY
3081          * in the future
3082          */
3083         /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
3084         LASSERT(msg->msg_txpeer == NULL);
3085         LASSERT(msg->msg_txni == NULL);
3086         LASSERT(!msg->msg_sending);
3087         LASSERT(!msg->msg_target_is_router);
3088         LASSERT(!msg->msg_receiving);
3089
3090         msg->msg_sending = 1;
3091
3092         LASSERT(!msg->msg_tx_committed);
3093
3094         rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
3095         if (rc < 0) {
3096                 if (rc == -EHOSTUNREACH)
3097                         msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
3098                 else
3099                         msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
3100                 return rc;
3101         }
3102
3103         if (rc == LNET_CREDIT_OK)
3104                 lnet_ni_send(msg->msg_txni, msg);
3105
3106         /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
3107         return 0;
3108 }
3109
3110 enum lnet_mt_event_type {
3111         MT_TYPE_LOCAL_NI = 0,
3112         MT_TYPE_PEER_NI
3113 };
3114
3115 struct lnet_mt_event_info {
3116         enum lnet_mt_event_type mt_type;
3117         lnet_nid_t mt_nid;
3118 };
3119
3120 /* called with res_lock held */
3121 void
3122 lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
3123 {
3124         struct lnet_rsp_tracker *rspt;
3125
3126         /*
3127          * msg has a refcount on the MD so the MD is not going away.
3128          * The rspt queue for the cpt is protected by
3129          * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
3130          */
3131         if (!md->md_rspt_ptr)
3132                 return;
3133
3134         rspt = md->md_rspt_ptr;
3135
3136         /* debug code */
3137         LASSERT(rspt->rspt_cpt == cpt);
3138
3139         md->md_rspt_ptr = NULL;
3140
3141         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3142                 /*
3143                  * The monitor thread has invalidated this handle because the
3144                  * response timed out, but it failed to lookup the MD. That
3145                  * means this response tracker is on the zombie list. We can
3146                  * safely remove it under the resource lock (held by caller) and
3147                  * free the response tracker block.
3148                  */
3149                 list_del(&rspt->rspt_on_list);
3150                 lnet_rspt_free(rspt, cpt);
3151         } else {
3152                 /*
3153                  * invalidate the handle to indicate that a response has been
3154                  * received, which will then lead the monitor thread to clean up
3155                  * the rspt block.
3156                  */
3157                 LNetInvalidateMDHandle(&rspt->rspt_mdh);
3158         }
3159 }
3160
3161 void
3162 lnet_clean_zombie_rstqs(void)
3163 {
3164         struct lnet_rsp_tracker *rspt, *tmp;
3165         int i;
3166
3167         cfs_cpt_for_each(i, lnet_cpt_table()) {
3168                 list_for_each_entry_safe(rspt, tmp,
3169                                          the_lnet.ln_mt_zombie_rstqs[i],
3170                                          rspt_on_list) {
3171                         list_del(&rspt->rspt_on_list);
3172                         lnet_rspt_free(rspt, i);
3173                 }
3174         }
3175
3176         cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
3177 }
3178
3179 static void
3180 lnet_finalize_expired_responses(void)
3181 {
3182         struct lnet_libmd *md;
3183         struct lnet_rsp_tracker *rspt, *tmp;
3184         ktime_t now;
3185         int i;
3186
3187         if (the_lnet.ln_mt_rstq == NULL)
3188                 return;
3189
3190         cfs_cpt_for_each(i, lnet_cpt_table()) {
3191                 LIST_HEAD(local_queue);
3192
3193                 lnet_net_lock(i);
3194                 if (!the_lnet.ln_mt_rstq[i]) {
3195                         lnet_net_unlock(i);
3196                         continue;
3197                 }
3198                 list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
3199                 lnet_net_unlock(i);
3200
3201                 now = ktime_get();
3202
3203                 list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
3204                         /*
3205                          * The rspt mdh will be invalidated when a response
3206                          * is received or whenever we want to discard the
3207                          * block the monitor thread will walk the queue
3208                          * and clean up any rsts with an invalid mdh.
3209                          * The monitor thread will walk the queue until
3210                          * the first unexpired rspt block. This means that
3211                          * some rspt blocks which received their
3212                          * corresponding responses will linger in the
3213                          * queue until they are cleaned up eventually.
3214                          */
3215                         lnet_res_lock(i);
3216                         if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
3217                                 lnet_res_unlock(i);
3218                                 list_del(&rspt->rspt_on_list);
3219                                 lnet_rspt_free(rspt, i);
3220                                 continue;
3221                         }
3222
3223                         if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
3224                             the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
3225                                 struct lnet_peer_ni *lpni;
3226                                 struct lnet_nid nid;
3227
3228                                 md = lnet_handle2md(&rspt->rspt_mdh);
3229                                 if (!md) {
3230                                         /* MD has been queued for unlink, but
3231                                          * rspt hasn't been detached (Note we've
3232                                          * checked above that the rspt_mdh is
3233                                          * valid). Since we cannot lookup the MD
3234                                          * we're unable to detach the rspt
3235                                          * ourselves. Thus, move the rspt to the
3236                                          * zombie list where we'll wait for
3237                                          * either:
3238                                          *   1. The remaining operations on the
3239                                          *   MD to complete. In this case the
3240                                          *   final operation will result in
3241                                          *   lnet_msg_detach_md()->
3242                                          *   lnet_detach_rsp_tracker() where
3243                                          *   we will clean up this response
3244                                          *   tracker.
3245                                          *   2. LNet to shutdown. In this case
3246                                          *   we'll wait until after all LND Nets
3247                                          *   have shutdown and then we can
3248                                          *   safely free any remaining response
3249                                          *   tracker blocks on the zombie list.
3250                                          * Note: We need to hold the resource
3251                                          * lock when adding to the zombie list
3252                                          * because we may have concurrent access
3253                                          * with lnet_detach_rsp_tracker().
3254                                          */
3255                                         LNetInvalidateMDHandle(&rspt->rspt_mdh);
3256                                         list_move(&rspt->rspt_on_list,
3257                                                   the_lnet.ln_mt_zombie_rstqs[i]);
3258                                         lnet_res_unlock(i);
3259                                         continue;
3260                                 }
3261                                 LASSERT(md->md_rspt_ptr == rspt);
3262                                 md->md_rspt_ptr = NULL;
3263                                 lnet_res_unlock(i);
3264
3265                                 LNetMDUnlink(rspt->rspt_mdh);
3266
3267                                 nid = rspt->rspt_next_hop_nid;
3268
3269                                 list_del(&rspt->rspt_on_list);
3270                                 lnet_rspt_free(rspt, i);
3271
3272                                 /* If we're shutting down we just want to clean
3273                                  * up the rspt blocks
3274                                  */
3275                                 if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
3276                                         continue;
3277
3278                                 lnet_net_lock(i);
3279                                 the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
3280                                 lnet_net_unlock(i);
3281
3282                                 CDEBUG(D_NET,
3283                                        "Response timeout: md = %p: nid = %s\n",
3284                                        md, libcfs_nidstr(&nid));
3285
3286                                 /*
3287                                  * If there is a timeout on the response
3288                                  * from the next hop decrement its health
3289                                  * value so that we don't use it
3290                                  */
3291                                 lnet_net_lock(0);
3292                                 lpni = lnet_peer_ni_find_locked(&nid);
3293                                 if (lpni) {
3294                                         lnet_handle_remote_failure_locked(lpni);
3295                                         lnet_peer_ni_decref_locked(lpni);
3296                                 }
3297                                 lnet_net_unlock(0);
3298                         } else {
3299                                 lnet_res_unlock(i);
3300                                 break;
3301                         }
3302                 }
3303
3304                 if (!list_empty(&local_queue)) {
3305                         lnet_net_lock(i);
3306                         list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
3307                         lnet_net_unlock(i);
3308                 }
3309         }
3310 }
3311
3312 static void
3313 lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
3314 {
3315         struct lnet_msg *msg;
3316
3317         while (!list_empty(resendq)) {
3318                 struct lnet_peer_ni *lpni;
3319
3320                 msg = list_entry(resendq->next, struct lnet_msg,
3321                                  msg_list);
3322
3323                 list_del_init(&msg->msg_list);
3324
3325                 lpni = lnet_find_peer_ni_locked(msg->msg_hdr.dest_nid);
3326                 if (!lpni) {
3327                         lnet_net_unlock(cpt);
3328                         CERROR("Expected that a peer is already created for %s\n",
3329                                libcfs_nid2str(msg->msg_hdr.dest_nid));
3330                         msg->msg_no_resend = true;
3331                         lnet_finalize(msg, -EFAULT);
3332                         lnet_net_lock(cpt);
3333                 } else {
3334                         int rc;
3335
3336                         lnet_peer_ni_decref_locked(lpni);
3337
3338                         lnet_net_unlock(cpt);
3339                         CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
3340                                libcfs_nid2str(msg->msg_src_nid_param),
3341                                libcfs_id2str(msg->msg_target),
3342                                lnet_msgtyp2str(msg->msg_type),
3343                                msg->msg_recovery,
3344                                msg->msg_retry_count);
3345                         rc = lnet_send(msg->msg_src_nid_param, msg,
3346                                        msg->msg_rtr_nid_param);
3347                         if (rc) {
3348                                 CERROR("Error sending %s to %s: %d\n",
3349                                        lnet_msgtyp2str(msg->msg_type),
3350                                        libcfs_id2str(msg->msg_target), rc);
3351                                 msg->msg_no_resend = true;
3352                                 lnet_finalize(msg, rc);
3353                         }
3354                         lnet_net_lock(cpt);
3355                         if (!rc)
3356                                 the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
3357                 }
3358         }
3359 }
3360
3361 static void
3362 lnet_resend_pending_msgs(void)
3363 {
3364         int i;
3365
3366         cfs_cpt_for_each(i, lnet_cpt_table()) {
3367                 lnet_net_lock(i);
3368                 lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
3369                 lnet_net_unlock(i);
3370         }
3371 }
3372
3373 /* called with cpt and ni_lock held */
3374 static void
3375 lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
3376 {
3377         struct lnet_handle_md recovery_mdh;
3378
3379         LNetInvalidateMDHandle(&recovery_mdh);
3380
3381         if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
3382             force) {
3383                 recovery_mdh = ni->ni_ping_mdh;
3384                 LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3385         }
3386         lnet_ni_unlock(ni);
3387         lnet_net_unlock(cpt);
3388         if (!LNetMDHandleIsInvalid(recovery_mdh))
3389                 LNetMDUnlink(recovery_mdh);
3390         lnet_net_lock(cpt);
3391         lnet_ni_lock(ni);
3392 }
3393
3394 static void
3395 lnet_recover_local_nis(void)
3396 {
3397         struct lnet_mt_event_info *ev_info;
3398         LIST_HEAD(processed_list);
3399         LIST_HEAD(local_queue);
3400         struct lnet_handle_md mdh;
3401         struct lnet_ni *tmp;
3402         struct lnet_ni *ni;
3403         lnet_nid_t nid;
3404         int healthv;
3405         int rc;
3406         time64_t now;
3407
3408         /*
3409          * splice the recovery queue on a local queue. We will iterate
3410          * through the local queue and update it as needed. Once we're
3411          * done with the traversal, we'll splice the local queue back on
3412          * the head of the ln_mt_localNIRecovq. Any newly added local NIs
3413          * will be traversed in the next iteration.
3414          */
3415         lnet_net_lock(0);
3416         list_splice_init(&the_lnet.ln_mt_localNIRecovq,
3417                          &local_queue);
3418         lnet_net_unlock(0);
3419
3420         now = ktime_get_seconds();
3421
3422         list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
3423                 /*
3424                  * if an NI is being deleted or it is now healthy, there
3425                  * is no need to keep it around in the recovery queue.
3426                  * The monitor thread is the only thread responsible for
3427                  * removing the NI from the recovery queue.
3428                  * Multiple threads can be adding NIs to the recovery
3429                  * queue.
3430                  */
3431                 healthv = atomic_read(&ni->ni_healthv);
3432
3433                 lnet_net_lock(0);
3434                 lnet_ni_lock(ni);
3435                 if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
3436                     healthv == LNET_MAX_HEALTH_VALUE) {
3437                         list_del_init(&ni->ni_recovery);
3438                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
3439                         lnet_ni_unlock(ni);
3440                         lnet_ni_decref_locked(ni, 0);
3441                         lnet_net_unlock(0);
3442                         continue;
3443                 }
3444
3445                 /*
3446                  * if the local NI failed recovery we must unlink the md.
3447                  * But we want to keep the local_ni on the recovery queue
3448                  * so we can continue the attempts to recover it.
3449                  */
3450                 if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
3451                         lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3452                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
3453                 }
3454
3455
3456                 lnet_ni_unlock(ni);
3457
3458                 if (now < ni->ni_next_ping) {
3459                         lnet_net_unlock(0);
3460                         continue;
3461                 }
3462
3463                 lnet_net_unlock(0);
3464
3465                 CDEBUG(D_NET, "attempting to recover local ni: %s\n",
3466                        libcfs_nidstr(&ni->ni_nid));
3467
3468                 lnet_ni_lock(ni);
3469                 if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
3470                         ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
3471                         lnet_ni_unlock(ni);
3472
3473                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3474                         if (!ev_info) {
3475                                 CERROR("out of memory. Can't recover %s\n",
3476                                        libcfs_nidstr(&ni->ni_nid));
3477                                 lnet_ni_lock(ni);
3478                                 ni->ni_recovery_state &=
3479                                   ~LNET_NI_RECOVERY_PENDING;
3480                                 lnet_ni_unlock(ni);
3481                                 continue;
3482                         }
3483
3484                         mdh = ni->ni_ping_mdh;
3485                         /*
3486                          * Invalidate the ni mdh in case it's deleted.
3487                          * We'll unlink the mdh in this case below.
3488                          */
3489                         LNetInvalidateMDHandle(&ni->ni_ping_mdh);
3490                         /* FIXME need to handle large-addr nid */
3491                         nid = lnet_nid_to_nid4(&ni->ni_nid);
3492
3493                         /*
3494                          * remove the NI from the local queue and drop the
3495                          * reference count to it while we're recovering
3496                          * it. The reason for that, is that the NI could
3497                          * be deleted, and the way the code is structured
3498                          * is if we don't drop the NI, then the deletion
3499                          * code will enter a loop waiting for the
3500                          * reference count to be removed while holding the
3501                          * ln_mutex_lock(). When we look up the peer to
3502                          * send to in lnet_select_pathway() we will try to
3503                          * lock the ln_mutex_lock() as well, leading to
3504                          * a deadlock. By dropping the refcount and
3505                          * removing it from the list, we allow for the NI
3506                          * to be removed, then we use the cached NID to
3507                          * look it up again. If it's gone, then we just
3508                          * continue examining the rest of the queue.
3509                          */
3510                         lnet_net_lock(0);
3511                         list_del_init(&ni->ni_recovery);
3512                         lnet_ni_decref_locked(ni, 0);
3513                         lnet_net_unlock(0);
3514
3515                         ev_info->mt_type = MT_TYPE_LOCAL_NI;
3516                         ev_info->mt_nid = nid;
3517                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3518                                             ev_info, the_lnet.ln_mt_handler,
3519                                             true);
3520                         /* lookup the nid again */
3521                         lnet_net_lock(0);
3522                         ni = lnet_nid2ni_locked(nid, 0);
3523                         if (!ni) {
3524                                 /*
3525                                  * the NI has been deleted when we dropped
3526                                  * the ref count
3527                                  */
3528                                 lnet_net_unlock(0);
3529                                 LNetMDUnlink(mdh);
3530                                 continue;
3531                         }
3532                         ni->ni_ping_count++;
3533
3534                         ni->ni_ping_mdh = mdh;
3535                         lnet_ni_add_to_recoveryq_locked(ni, &processed_list,
3536                                                         now);
3537
3538                         if (rc) {
3539                                 lnet_ni_lock(ni);
3540                                 ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3541                                 lnet_ni_unlock(ni);
3542                         }
3543                         lnet_net_unlock(0);
3544                 } else
3545                         lnet_ni_unlock(ni);
3546         }
3547
3548         /*
3549          * put back the remaining NIs on the ln_mt_localNIRecovq to be
3550          * reexamined in the next iteration.
3551          */
3552         list_splice_init(&processed_list, &local_queue);
3553         lnet_net_lock(0);
3554         list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
3555         lnet_net_unlock(0);
3556 }
3557
3558 static int
3559 lnet_resendqs_create(void)
3560 {
3561         struct list_head **resendqs;
3562         resendqs = lnet_create_array_of_queues();
3563
3564         if (!resendqs)
3565                 return -ENOMEM;
3566
3567         lnet_net_lock(LNET_LOCK_EX);
3568         the_lnet.ln_mt_resendqs = resendqs;
3569         lnet_net_unlock(LNET_LOCK_EX);
3570
3571         return 0;
3572 }
3573
3574 static void
3575 lnet_clean_local_ni_recoveryq(void)
3576 {
3577         struct lnet_ni *ni;
3578
3579         /* This is only called when the monitor thread has stopped */
3580         lnet_net_lock(0);
3581
3582         while (!list_empty(&the_lnet.ln_mt_localNIRecovq)) {
3583                 ni = list_entry(the_lnet.ln_mt_localNIRecovq.next,
3584                                 struct lnet_ni, ni_recovery);
3585                 list_del_init(&ni->ni_recovery);
3586                 lnet_ni_lock(ni);
3587                 lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
3588                 lnet_ni_unlock(ni);
3589                 lnet_ni_decref_locked(ni, 0);
3590         }
3591
3592         lnet_net_unlock(0);
3593 }
3594
3595 static void
3596 lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
3597                                      bool force)
3598 {
3599         struct lnet_handle_md recovery_mdh;
3600
3601         LNetInvalidateMDHandle(&recovery_mdh);
3602
3603         if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
3604                 recovery_mdh = lpni->lpni_recovery_ping_mdh;
3605                 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3606         }
3607         spin_unlock(&lpni->lpni_lock);
3608         lnet_net_unlock(cpt);
3609         if (!LNetMDHandleIsInvalid(recovery_mdh))
3610                 LNetMDUnlink(recovery_mdh);
3611         lnet_net_lock(cpt);
3612         spin_lock(&lpni->lpni_lock);
3613 }
3614
3615 static void
3616 lnet_clean_peer_ni_recoveryq(void)
3617 {
3618         struct lnet_peer_ni *lpni, *tmp;
3619
3620         lnet_net_lock(LNET_LOCK_EX);
3621
3622         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
3623                                  lpni_recovery) {
3624                 list_del_init(&lpni->lpni_recovery);
3625                 spin_lock(&lpni->lpni_lock);
3626                 lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
3627                 spin_unlock(&lpni->lpni_lock);
3628                 lnet_peer_ni_decref_locked(lpni);
3629         }
3630
3631         lnet_net_unlock(LNET_LOCK_EX);
3632 }
3633
3634 static void
3635 lnet_clean_resendqs(void)
3636 {
3637         struct lnet_msg *msg, *tmp;
3638         LIST_HEAD(msgs);
3639         int i;
3640
3641         cfs_cpt_for_each(i, lnet_cpt_table()) {
3642                 lnet_net_lock(i);
3643                 list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
3644                 lnet_net_unlock(i);
3645                 list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
3646                         list_del_init(&msg->msg_list);
3647                         msg->msg_no_resend = true;
3648                         lnet_finalize(msg, -ESHUTDOWN);
3649                 }
3650         }
3651
3652         cfs_percpt_free(the_lnet.ln_mt_resendqs);
3653 }
3654
3655 static void
3656 lnet_recover_peer_nis(void)
3657 {
3658         struct lnet_mt_event_info *ev_info;
3659         LIST_HEAD(processed_list);
3660         LIST_HEAD(local_queue);
3661         struct lnet_handle_md mdh;
3662         struct lnet_peer_ni *lpni;
3663         struct lnet_peer_ni *tmp;
3664         lnet_nid_t nid;
3665         int healthv;
3666         int rc;
3667         time64_t now;
3668
3669         /*
3670          * Always use cpt 0 for locking across all interactions with
3671          * ln_mt_peerNIRecovq
3672          */
3673         lnet_net_lock(0);
3674         list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
3675                          &local_queue);
3676         lnet_net_unlock(0);
3677
3678         now = ktime_get_seconds();
3679
3680         list_for_each_entry_safe(lpni, tmp, &local_queue,
3681                                  lpni_recovery) {
3682                 /*
3683                  * The same protection strategy is used here as is in the
3684                  * local recovery case.
3685                  */
3686                 lnet_net_lock(0);
3687                 healthv = atomic_read(&lpni->lpni_healthv);
3688                 spin_lock(&lpni->lpni_lock);
3689                 if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
3690                     healthv == LNET_MAX_HEALTH_VALUE) {
3691                         list_del_init(&lpni->lpni_recovery);
3692                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
3693                         spin_unlock(&lpni->lpni_lock);
3694                         lnet_peer_ni_decref_locked(lpni);
3695                         lnet_net_unlock(0);
3696                         continue;
3697                 }
3698
3699                 /*
3700                  * If the peer NI has failed recovery we must unlink the
3701                  * md. But we want to keep the peer ni on the recovery
3702                  * queue so we can try to continue recovering it
3703                  */
3704                 if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
3705                         lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
3706                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
3707                 }
3708
3709                 spin_unlock(&lpni->lpni_lock);
3710
3711                 if (now < lpni->lpni_next_ping) {
3712                         lnet_net_unlock(0);
3713                         continue;
3714                 }
3715
3716                 lnet_net_unlock(0);
3717
3718                 /*
3719                  * NOTE: we're racing with peer deletion from user space.
3720                  * It's possible that a peer is deleted after we check its
3721                  * state. In this case the recovery can create a new peer
3722                  */
3723                 spin_lock(&lpni->lpni_lock);
3724                 if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
3725                     !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
3726                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
3727                         spin_unlock(&lpni->lpni_lock);
3728
3729                         LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
3730                         if (!ev_info) {
3731                                 CERROR("out of memory. Can't recover %s\n",
3732                                        libcfs_nidstr(&lpni->lpni_nid));
3733                                 spin_lock(&lpni->lpni_lock);
3734                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3735                                 spin_unlock(&lpni->lpni_lock);
3736                                 continue;
3737                         }
3738
3739                         /* look at the comments in lnet_recover_local_nis() */
3740                         mdh = lpni->lpni_recovery_ping_mdh;
3741                         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
3742                         /* FIXME handle large-addr nid */
3743                         nid = lnet_nid_to_nid4(&lpni->lpni_nid);
3744                         lnet_net_lock(0);
3745                         list_del_init(&lpni->lpni_recovery);
3746                         lnet_peer_ni_decref_locked(lpni);
3747                         lnet_net_unlock(0);
3748
3749                         ev_info->mt_type = MT_TYPE_PEER_NI;
3750                         ev_info->mt_nid = nid;
3751                         rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
3752                                             ev_info, the_lnet.ln_mt_handler,
3753                                             true);
3754                         lnet_net_lock(0);
3755                         /*
3756                          * lnet_find_peer_ni_locked() grabs a refcount for
3757                          * us. No need to take it explicitly.
3758                          */
3759                         lpni = lnet_find_peer_ni_locked(nid);
3760                         if (!lpni) {
3761                                 lnet_net_unlock(0);
3762                                 LNetMDUnlink(mdh);
3763                                 continue;
3764                         }
3765
3766                         lpni->lpni_ping_count++;
3767
3768                         lpni->lpni_recovery_ping_mdh = mdh;
3769
3770                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
3771                                                              &processed_list,
3772                                                              now);
3773                         if (rc) {
3774                                 spin_lock(&lpni->lpni_lock);
3775                                 lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3776                                 spin_unlock(&lpni->lpni_lock);
3777                         }
3778
3779                         /* Drop the ref taken by lnet_find_peer_ni_locked() */
3780                         lnet_peer_ni_decref_locked(lpni);
3781                         lnet_net_unlock(0);
3782                 } else
3783                         spin_unlock(&lpni->lpni_lock);
3784         }
3785
3786         list_splice_init(&processed_list, &local_queue);
3787         lnet_net_lock(0);
3788         list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
3789         lnet_net_unlock(0);
3790 }
3791
3792 static int
3793 lnet_monitor_thread(void *arg)
3794 {
3795         time64_t rsp_timeout = 0;
3796         time64_t now;
3797
3798         wait_for_completion(&the_lnet.ln_started);
3799         /*
3800          * The monitor thread takes care of the following:
3801          *  1. Checks the aliveness of routers
3802          *  2. Checks if there are messages on the resend queue to resend
3803          *     them.
3804          *  3. Check if there are any NIs on the local recovery queue and
3805          *     pings them
3806          *  4. Checks if there are any NIs on the remote recovery queue
3807          *     and pings them.
3808          */
3809         while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
3810                 now = ktime_get_real_seconds();
3811
3812                 if (lnet_router_checker_active())
3813                         lnet_check_routers();
3814
3815                 lnet_resend_pending_msgs();
3816
3817                 if (now >= rsp_timeout) {
3818                         lnet_finalize_expired_responses();
3819                         rsp_timeout = now + (lnet_transaction_timeout / 2);
3820                 }
3821
3822                 lnet_recover_local_nis();
3823                 lnet_recover_peer_nis();
3824
3825                 /*
3826                  * TODO do we need to check if we should sleep without
3827                  * timeout?  Technically, an active system will always
3828                  * have messages in flight so this check will always
3829                  * evaluate to false. And on an idle system do we care
3830                  * if we wake up every 1 second? Although, we've seen
3831                  * cases where we get a complaint that an idle thread
3832                  * is waking up unnecessarily.
3833                  */
3834                 wait_for_completion_interruptible_timeout(
3835                         &the_lnet.ln_mt_wait_complete,
3836                         cfs_time_seconds(1));
3837                 /* Must re-init the completion before testing anything,
3838                  * including ln_mt_state.
3839                  */
3840                 reinit_completion(&the_lnet.ln_mt_wait_complete);
3841         }
3842
3843         /* Shutting down */
3844         lnet_net_lock(LNET_LOCK_EX);
3845         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
3846         lnet_net_unlock(LNET_LOCK_EX);
3847
3848         /* signal that the monitor thread is exiting */
3849         up(&the_lnet.ln_mt_signal);
3850
3851         return 0;
3852 }
3853
3854 /*
3855  * lnet_send_ping
3856  * Sends a ping.
3857  * Returns == 0 if success
3858  * Returns > 0 if LNetMDBind or prior fails
3859  * Returns < 0 if LNetGet fails
3860  */
3861 int
3862 lnet_send_ping(lnet_nid_t dest_nid,
3863                struct lnet_handle_md *mdh, int nnis,
3864                void *user_data, lnet_handler_t handler, bool recovery)
3865 {
3866         struct lnet_md md = { NULL };
3867         struct lnet_process_id id;
3868         struct lnet_ping_buffer *pbuf;
3869         int rc;
3870
3871         if (dest_nid == LNET_NID_ANY) {
3872                 rc = -EHOSTUNREACH;
3873                 goto fail_error;
3874         }
3875
3876         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
3877         if (!pbuf) {
3878                 rc = ENOMEM;
3879                 goto fail_error;
3880         }
3881
3882         /* initialize md content */
3883         md.start     = &pbuf->pb_info;
3884         md.length    = LNET_PING_INFO_SIZE(nnis);
3885         md.threshold = 2; /* GET/REPLY */
3886         md.max_size  = 0;
3887         md.options   = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE;
3888         md.user_ptr  = user_data;
3889         md.handler   = handler;
3890
3891         rc = LNetMDBind(&md, LNET_UNLINK, mdh);
3892         if (rc) {
3893                 lnet_ping_buffer_decref(pbuf);
3894                 CERROR("Can't bind MD: %d\n", rc);
3895                 rc = -rc; /* change the rc to positive */
3896                 goto fail_error;
3897         }
3898         id.pid = LNET_PID_LUSTRE;
3899         id.nid = dest_nid;
3900
3901         rc = LNetGet(LNET_NID_ANY, *mdh, id,
3902                      LNET_RESERVED_PORTAL,
3903                      LNET_PROTO_PING_MATCHBITS, 0, recovery);
3904
3905         if (rc)
3906                 goto fail_unlink_md;
3907
3908         return 0;
3909
3910 fail_unlink_md:
3911         LNetMDUnlink(*mdh);
3912         LNetInvalidateMDHandle(mdh);
3913 fail_error:
3914         return rc;
3915 }
3916
3917 static void
3918 lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info,
3919                            int status, bool send, bool unlink_event)
3920 {
3921         lnet_nid_t nid = ev_info->mt_nid;
3922
3923         if (ev_info->mt_type == MT_TYPE_LOCAL_NI) {
3924                 struct lnet_ni *ni;
3925
3926                 lnet_net_lock(0);
3927                 ni = lnet_nid2ni_locked(nid, 0);
3928                 if (!ni) {
3929                         lnet_net_unlock(0);
3930                         return;
3931                 }
3932                 lnet_ni_lock(ni);
3933                 if (!send || (send && status != 0))
3934                         ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
3935                 if (status)
3936                         ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED;
3937                 lnet_ni_unlock(ni);
3938                 lnet_net_unlock(0);
3939
3940                 if (status != 0) {
3941                         CERROR("local NI (%s) recovery failed with %d\n",
3942                                libcfs_nid2str(nid), status);
3943                         return;
3944                 }
3945                 /*
3946                  * need to increment healthv for the ni here, because in
3947                  * the lnet_finalize() path we don't have access to this
3948                  * NI. And in order to get access to it, we'll need to
3949                  * carry forward too much information.
3950                  * In the peer case, it'll naturally be incremented
3951                  */
3952                 if (!unlink_event)
3953                         lnet_inc_healthv(&ni->ni_healthv,
3954                                          lnet_health_sensitivity);
3955         } else {
3956                 struct lnet_peer_ni *lpni;
3957                 int cpt;
3958
3959                 cpt = lnet_net_lock_current();
3960                 lpni = lnet_find_peer_ni_locked(nid);
3961                 if (!lpni) {
3962                         lnet_net_unlock(cpt);
3963                         return;
3964                 }
3965                 spin_lock(&lpni->lpni_lock);
3966                 if (!send || (send && status != 0))
3967                         lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
3968                 if (status)
3969                         lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED;
3970                 spin_unlock(&lpni->lpni_lock);
3971                 lnet_peer_ni_decref_locked(lpni);
3972                 lnet_net_unlock(cpt);
3973
3974                 if (status != 0)
3975                         CERROR("peer NI (%s) recovery failed with %d\n",
3976                                libcfs_nid2str(nid), status);
3977         }
3978 }
3979
3980 void
3981 lnet_mt_event_handler(struct lnet_event *event)
3982 {
3983         struct lnet_mt_event_info *ev_info = event->md_user_ptr;
3984         struct lnet_ping_buffer *pbuf;
3985
3986         /* TODO: remove assert */
3987         LASSERT(event->type == LNET_EVENT_REPLY ||
3988                 event->type == LNET_EVENT_SEND ||
3989                 event->type == LNET_EVENT_UNLINK);
3990
3991         CDEBUG(D_NET, "Received event: %d status: %d\n", event->type,
3992                event->status);
3993
3994         switch (event->type) {
3995         case LNET_EVENT_UNLINK:
3996                 CDEBUG(D_NET, "%s recovery ping unlinked\n",
3997                        libcfs_nid2str(ev_info->mt_nid));
3998                 /* fallthrough */
3999         case LNET_EVENT_REPLY:
4000                 lnet_handle_recovery_reply(ev_info, event->status, false,
4001                                            event->type == LNET_EVENT_UNLINK);
4002                 break;
4003         case LNET_EVENT_SEND:
4004                 CDEBUG(D_NET, "%s recovery message sent %s:%d\n",
4005                                libcfs_nid2str(ev_info->mt_nid),
4006                                (event->status) ? "unsuccessfully" :
4007                                "successfully", event->status);
4008                 lnet_handle_recovery_reply(ev_info, event->status, true, false);
4009                 break;
4010         default:
4011                 CERROR("Unexpected event: %d\n", event->type);
4012                 break;
4013         }
4014         if (event->unlinked) {
4015                 LIBCFS_FREE(ev_info, sizeof(*ev_info));
4016                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
4017                 lnet_ping_buffer_decref(pbuf);
4018         }
4019 }
4020
4021 static int
4022 lnet_rsp_tracker_create(void)
4023 {
4024         struct list_head **rstqs;
4025         rstqs = lnet_create_array_of_queues();
4026
4027         if (!rstqs)
4028                 return -ENOMEM;
4029
4030         the_lnet.ln_mt_rstq = rstqs;
4031
4032         return 0;
4033 }
4034
4035 static void
4036 lnet_rsp_tracker_clean(void)
4037 {
4038         lnet_finalize_expired_responses();
4039
4040         cfs_percpt_free(the_lnet.ln_mt_rstq);
4041         the_lnet.ln_mt_rstq = NULL;
4042 }
4043
4044 int lnet_monitor_thr_start(void)
4045 {
4046         int rc = 0;
4047         struct task_struct *task;
4048
4049         if (the_lnet.ln_mt_state != LNET_MT_STATE_SHUTDOWN)
4050                 return -EALREADY;
4051
4052         rc = lnet_resendqs_create();
4053         if (rc)
4054                 return rc;
4055
4056         rc = lnet_rsp_tracker_create();
4057         if (rc)
4058                 goto clean_queues;
4059
4060         sema_init(&the_lnet.ln_mt_signal, 0);
4061
4062         lnet_net_lock(LNET_LOCK_EX);
4063         the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
4064         lnet_net_unlock(LNET_LOCK_EX);
4065         task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
4066         if (IS_ERR(task)) {
4067                 rc = PTR_ERR(task);
4068                 CERROR("Can't start monitor thread: %d\n", rc);
4069                 goto clean_thread;
4070         }
4071
4072         return 0;
4073
4074 clean_thread:
4075         lnet_net_lock(LNET_LOCK_EX);
4076         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4077         lnet_net_unlock(LNET_LOCK_EX);
4078         /* block until event callback signals exit */
4079         down(&the_lnet.ln_mt_signal);
4080         /* clean up */
4081         lnet_net_lock(LNET_LOCK_EX);
4082         the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
4083         lnet_net_unlock(LNET_LOCK_EX);
4084         lnet_rsp_tracker_clean();
4085         lnet_clean_local_ni_recoveryq();
4086         lnet_clean_peer_ni_recoveryq();
4087         lnet_clean_resendqs();
4088         the_lnet.ln_mt_handler = NULL;
4089         return rc;
4090 clean_queues:
4091         lnet_rsp_tracker_clean();
4092         lnet_clean_local_ni_recoveryq();
4093         lnet_clean_peer_ni_recoveryq();
4094         lnet_clean_resendqs();
4095         return rc;
4096 }
4097
4098 void lnet_monitor_thr_stop(void)
4099 {
4100         if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
4101                 return;
4102
4103         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
4104         lnet_net_lock(LNET_LOCK_EX);
4105         the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
4106         lnet_net_unlock(LNET_LOCK_EX);
4107
4108         /* tell the monitor thread that we're shutting down */
4109         complete(&the_lnet.ln_mt_wait_complete);
4110
4111         /* block until monitor thread signals that it's done */
4112         down(&the_lnet.ln_mt_signal);
4113         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
4114
4115         /* perform cleanup tasks */
4116         lnet_rsp_tracker_clean();
4117         lnet_clean_local_ni_recoveryq();
4118         lnet_clean_peer_ni_recoveryq();
4119         lnet_clean_resendqs();
4120 }
4121
4122 void
4123 lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob,
4124                   __u32 msg_type)
4125 {
4126         lnet_net_lock(cpt);
4127         lnet_incr_stats(&ni->ni_stats, msg_type, LNET_STATS_TYPE_DROP);
4128         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
4129         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length += nob;
4130         lnet_net_unlock(cpt);
4131
4132         lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
4133 }
4134
4135 static void
4136 lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
4137 {
4138         struct lnet_hdr *hdr = &msg->msg_hdr;
4139
4140         if (msg->msg_wanted != 0)
4141                 lnet_setpayloadbuffer(msg);
4142
4143         lnet_build_msg_event(msg, LNET_EVENT_PUT);
4144
4145         /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
4146          * it back into the ACK during lnet_finalize() */
4147         msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
4148                         (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
4149
4150         lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
4151                      msg->msg_offset, msg->msg_wanted, hdr->payload_length);
4152 }
4153
4154 static int
4155 lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
4156 {
4157         struct lnet_hdr         *hdr = &msg->msg_hdr;
4158         struct lnet_match_info  info;
4159         int                     rc;
4160         bool                    ready_delay;
4161
4162         /* Convert put fields to host byte order */
4163         hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
4164         hdr->msg.put.ptl_index  = le32_to_cpu(hdr->msg.put.ptl_index);
4165         hdr->msg.put.offset     = le32_to_cpu(hdr->msg.put.offset);
4166
4167         /* Primary peer NID. */
4168         lnet_nid4_to_nid(msg->msg_initiator, &info.mi_id.nid);
4169         info.mi_id.pid  = hdr->src_pid;
4170         info.mi_opc     = LNET_MD_OP_PUT;
4171         info.mi_portal  = hdr->msg.put.ptl_index;
4172         info.mi_rlength = hdr->payload_length;
4173         info.mi_roffset = hdr->msg.put.offset;
4174         info.mi_mbits   = hdr->msg.put.match_bits;
4175         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
4176
4177         msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
4178         ready_delay = msg->msg_rx_ready_delay;
4179
4180  again:
4181         rc = lnet_ptl_match_md(&info, msg);
4182         switch (rc) {
4183         default:
4184                 LBUG();
4185
4186         case LNET_MATCHMD_OK:
4187                 lnet_recv_put(ni, msg);
4188                 return 0;
4189
4190         case LNET_MATCHMD_NONE:
4191                 if (ready_delay)
4192                         /* no eager_recv or has already called it, should
4193                          * have been attached on delayed list */
4194                         return 0;
4195
4196                 rc = lnet_ni_eager_recv(ni, msg);
4197                 if (rc == 0) {
4198                         ready_delay = true;
4199                         goto again;
4200                 }
4201                 /* fall through */
4202
4203         case LNET_MATCHMD_DROP:
4204                 CNETERR("Dropping PUT from %s portal %d match %llu"
4205                         " offset %d length %d: %d\n",
4206                         libcfs_idstr(&info.mi_id), info.mi_portal,
4207                         info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
4208
4209                 return -ENOENT; /* -ve: OK but no match */
4210         }
4211 }
4212
4213 static int
4214 lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
4215 {
4216         struct lnet_match_info info;
4217         struct lnet_hdr *hdr = &msg->msg_hdr;
4218         struct lnet_process_id source_id;
4219         struct lnet_handle_wire reply_wmd;
4220         int rc;
4221
4222         /* Convert get fields to host byte order */
4223         hdr->msg.get.match_bits   = le64_to_cpu(hdr->msg.get.match_bits);
4224         hdr->msg.get.ptl_index    = le32_to_cpu(hdr->msg.get.ptl_index);
4225         hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
4226         hdr->msg.get.src_offset   = le32_to_cpu(hdr->msg.get.src_offset);
4227
4228         source_id.nid = hdr->src_nid;
4229         source_id.pid = hdr->src_pid;
4230         /* Primary peer NID */
4231         lnet_nid4_to_nid(msg->msg_initiator, &info.mi_id.nid);
4232         info.mi_id.pid  = hdr->src_pid;
4233         info.mi_opc     = LNET_MD_OP_GET;
4234         info.mi_portal  = hdr->msg.get.ptl_index;
4235         info.mi_rlength = hdr->msg.get.sink_length;
4236         info.mi_roffset = hdr->msg.get.src_offset;
4237         info.mi_mbits   = hdr->msg.get.match_bits;
4238         info.mi_cpt     = lnet_cpt_of_nid(msg->msg_initiator, ni);
4239
4240         rc = lnet_ptl_match_md(&info, msg);
4241         if (rc == LNET_MATCHMD_DROP) {
4242                 CNETERR("Dropping GET from %s portal %d match %llu"
4243                         " offset %d length %d\n",
4244                         libcfs_idstr(&info.mi_id), info.mi_portal,
4245                         info.mi_mbits, info.mi_roffset, info.mi_rlength);
4246                 return -ENOENT; /* -ve: OK but no match */
4247         }
4248
4249         LASSERT(rc == LNET_MATCHMD_OK);
4250
4251         lnet_build_msg_event(msg, LNET_EVENT_GET);
4252
4253         reply_wmd = hdr->msg.get.return_wmd;
4254
4255         lnet_prep_send(msg, LNET_MSG_REPLY, source_id,
4256                        msg->msg_offset, msg->msg_wanted);
4257
4258         msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
4259
4260         if (rdma_get) {
4261                 /* The LND completes the REPLY from her recv procedure */
4262                 lnet_ni_recv(ni, msg->msg_private, msg, 0,
4263                              msg->msg_offset, msg->msg_len, msg->msg_len);
4264                 return 0;
4265         }
4266
4267         lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
4268         msg->msg_receiving = 0;
4269
4270         /* FIXME need to handle large-addr nid */
4271         rc = lnet_send(lnet_nid_to_nid4(&ni->ni_nid), msg, msg->msg_from);
4272         if (rc < 0) {
4273                 /* didn't get as far as lnet_ni_send() */
4274                 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
4275                        libcfs_nidstr(&ni->ni_nid),
4276                        libcfs_idstr(&info.mi_id), rc);
4277
4278                 lnet_finalize(msg, rc);
4279         }
4280
4281         return 0;
4282 }
4283
4284 static int
4285 lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
4286 {
4287         void *private = msg->msg_private;
4288         struct lnet_hdr *hdr = &msg->msg_hdr;
4289         struct lnet_process_id src = {0};
4290         struct lnet_libmd *md;
4291         unsigned int rlength;
4292         unsigned int mlength;
4293         int cpt;
4294
4295         cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
4296         lnet_res_lock(cpt);
4297
4298         src.nid = hdr->src_nid;
4299         src.pid = hdr->src_pid;
4300
4301         /* NB handles only looked up by creator (no flips) */
4302         md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
4303         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4304                 CNETERR("%s: Dropping REPLY from %s for %s "
4305                         "MD %#llx.%#llx\n",
4306                         libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
4307                         (md == NULL) ? "invalid" : "inactive",
4308                         hdr->msg.reply.dst_wmd.wh_interface_cookie,
4309                         hdr->msg.reply.dst_wmd.wh_object_cookie);
4310                 if (md != NULL && md->md_me != NULL)
4311                         CERROR("REPLY MD also attached to portal %d\n",
4312                                md->md_me->me_portal);
4313
4314                 lnet_res_unlock(cpt);
4315                 return -ENOENT; /* -ve: OK but no match */
4316         }
4317
4318         LASSERT(md->md_offset == 0);
4319
4320         rlength = hdr->payload_length;
4321         mlength = min(rlength, md->md_length);
4322
4323         if (mlength < rlength &&
4324             (md->md_options & LNET_MD_TRUNCATE) == 0) {
4325                 CNETERR("%s: Dropping REPLY from %s length %d "
4326                         "for MD %#llx would overflow (%d)\n",
4327                         libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
4328                         rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
4329                         mlength);
4330                 lnet_res_unlock(cpt);
4331                 return -ENOENT; /* -ve: OK but no match */
4332         }
4333
4334         CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
4335                libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
4336                mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
4337
4338         lnet_msg_attach_md(msg, md, 0, mlength);
4339
4340         if (mlength != 0)
4341                 lnet_setpayloadbuffer(msg);
4342
4343         lnet_res_unlock(cpt);
4344
4345         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
4346
4347         lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
4348         return 0;
4349 }
4350
4351 static int
4352 lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
4353 {
4354         struct lnet_hdr *hdr = &msg->msg_hdr;
4355         struct lnet_process_id src = {0};
4356         struct lnet_libmd *md;
4357         int cpt;
4358
4359         src.nid = hdr->src_nid;
4360         src.pid = hdr->src_pid;
4361
4362         /* Convert ack fields to host byte order */
4363         hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
4364         hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
4365
4366         cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
4367         lnet_res_lock(cpt);
4368
4369         /* NB handles only looked up by creator (no flips) */
4370         md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
4371         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
4372                 /* Don't moan; this is expected */
4373                 CDEBUG(D_NET,
4374                        "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
4375                        libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
4376                        (md == NULL) ? "invalid" : "inactive",
4377                        hdr->msg.ack.dst_wmd.wh_interface_cookie,
4378                        hdr->msg.ack.dst_wmd.wh_object_cookie);
4379                 if (md != NULL && md->md_me != NULL)
4380                         CERROR("Source MD also attached to portal %d\n",
4381                                md->md_me->me_portal);
4382
4383                 lnet_res_unlock(cpt);
4384                 return -ENOENT;                  /* -ve! */
4385         }
4386
4387         CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
4388                libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
4389                hdr->msg.ack.dst_wmd.wh_object_cookie);
4390
4391         lnet_msg_attach_md(msg, md, 0, 0);
4392
4393         lnet_res_unlock(cpt);
4394
4395         lnet_build_msg_event(msg, LNET_EVENT_ACK);
4396
4397         lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
4398         return 0;
4399 }
4400
4401 /**
4402  * \retval LNET_CREDIT_OK       If \a msg is forwarded
4403  * \retval LNET_CREDIT_WAIT     If \a msg is blocked because w/o buffer
4404  * \retval -ve                  error code
4405  */
4406 int
4407 lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
4408 {
4409         int     rc = 0;
4410
4411         if (!the_lnet.ln_routing)
4412                 return -ECANCELED;
4413
4414         if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
4415             lnet_msg2bufpool(msg)->rbp_credits <= 0) {
4416                 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
4417                         msg->msg_rx_ready_delay = 1;
4418                 } else {
4419                         lnet_net_unlock(msg->msg_rx_cpt);
4420                         rc = lnet_ni_eager_recv(ni, msg);
4421                         lnet_net_lock(msg->msg_rx_cpt);
4422                 }
4423         }
4424
4425         if (rc == 0)
4426                 rc = lnet_post_routed_recv_locked(msg, 0);
4427         return rc;
4428 }
4429
4430 int
4431 lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
4432 {
4433         int     rc;
4434
4435         switch (msg->msg_type) {
4436         case LNET_MSG_ACK:
4437                 rc = lnet_parse_ack(ni, msg);
4438                 break;
4439         case LNET_MSG_PUT:
4440                 rc = lnet_parse_put(ni, msg);
4441                 break;
4442         case LNET_MSG_GET:
4443                 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
4444                 break;
4445         case LNET_MSG_REPLY:
4446                 rc = lnet_parse_reply(ni, msg);
4447                 break;
4448         default: /* prevent an unused label if !kernel */
4449                 LASSERT(0);
4450                 return -EPROTO;
4451         }
4452
4453         LASSERT(rc == 0 || rc == -ENOENT);
4454         return rc;
4455 }
4456
4457 char *
4458 lnet_msgtyp2str (int type)
4459 {
4460         switch (type) {
4461         case LNET_MSG_ACK:
4462                 return ("ACK");
4463         case LNET_MSG_PUT:
4464                 return ("PUT");
4465         case LNET_MSG_GET:
4466                 return ("GET");
4467         case LNET_MSG_REPLY:
4468                 return ("REPLY");
4469         case LNET_MSG_HELLO:
4470                 return ("HELLO");
4471         default:
4472                 return ("<UNKNOWN>");
4473         }
4474 }
4475
4476 int
4477 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid4,
4478            void *private, int rdma_req)
4479 {
4480         struct lnet_peer_ni *lpni;
4481         struct lnet_msg *msg;
4482         __u32 payload_length;
4483         lnet_pid_t dest_pid;
4484         lnet_nid_t dest_nid;
4485         lnet_nid_t src_nid;
4486         struct lnet_nid from_nid;
4487         bool push = false;
4488         int for_me;
4489         __u32 type;
4490         int rc = 0;
4491         int cpt;
4492
4493         LASSERT (!in_interrupt ());
4494
4495         lnet_nid4_to_nid(from_nid4, &from_nid);
4496
4497         type = le32_to_cpu(hdr->type);
4498         src_nid = le64_to_cpu(hdr->src_nid);
4499         dest_nid = le64_to_cpu(hdr->dest_nid);
4500         dest_pid = le32_to_cpu(hdr->dest_pid);
4501         payload_length = le32_to_cpu(hdr->payload_length);
4502
4503         /* FIXME handle large-addr nids */
4504         for_me = (lnet_nid_to_nid4(&ni->ni_nid) == dest_nid);
4505         cpt = lnet_cpt_of_nid(from_nid4, ni);
4506
4507         CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
4508                 libcfs_nid2str(dest_nid),
4509                 libcfs_nidstr(&ni->ni_nid),
4510                 libcfs_nid2str(src_nid),
4511                 lnet_msgtyp2str(type),
4512                 (for_me) ? "for me" : "routed");
4513
4514         switch (type) {
4515         case LNET_MSG_ACK:
4516         case LNET_MSG_GET:
4517                 if (payload_length > 0) {
4518                         CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
4519                                libcfs_nid2str(from_nid4),
4520                                libcfs_nid2str(src_nid),
4521                                lnet_msgtyp2str(type), payload_length);
4522                         return -EPROTO;
4523                 }
4524                 break;
4525
4526         case LNET_MSG_PUT:
4527         case LNET_MSG_REPLY:
4528                 if (payload_length >
4529                     (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
4530                         CERROR("%s, src %s: bad %s payload %d "
4531                                "(%d max expected)\n",
4532                                libcfs_nid2str(from_nid4),
4533                                libcfs_nid2str(src_nid),
4534                                lnet_msgtyp2str(type),
4535                                payload_length,
4536                                for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
4537                         return -EPROTO;
4538                 }
4539                 break;
4540
4541         default:
4542                 CERROR("%s, src %s: Bad message type 0x%x\n",
4543                        libcfs_nid2str(from_nid4),
4544                        libcfs_nid2str(src_nid), type);
4545                 return -EPROTO;
4546         }
4547
4548         if (the_lnet.ln_routing &&
4549             ni->ni_net->net_last_alive != ktime_get_real_seconds()) {
4550                 lnet_ni_lock(ni);
4551                 spin_lock(&ni->ni_net->net_lock);
4552                 ni->ni_net->net_last_alive = ktime_get_real_seconds();
4553                 spin_unlock(&ni->ni_net->net_lock);
4554                 push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP);
4555                 lnet_ni_unlock(ni);
4556         }
4557
4558         if (push)
4559                 lnet_push_update_to_peers(1);
4560
4561         /* Regard a bad destination NID as a protocol error.  Senders should
4562          * know what they're doing; if they don't they're misconfigured, buggy
4563          * or malicious so we chop them off at the knees :) */
4564
4565         if (!for_me) {
4566                 if (LNET_NIDNET(dest_nid) == LNET_NID_NET(&ni->ni_nid)) {
4567                         /* should have gone direct */
4568                         CERROR("%s, src %s: Bad dest nid %s "
4569                                "(should have been sent direct)\n",
4570                                 libcfs_nid2str(from_nid4),
4571                                 libcfs_nid2str(src_nid),
4572                                 libcfs_nid2str(dest_nid));
4573                         return -EPROTO;
4574                 }
4575
4576                 if (lnet_islocalnid4(dest_nid)) {
4577                         /* dest is another local NI; sender should have used
4578                          * this node's NID on its own network */
4579                         CERROR("%s, src %s: Bad dest nid %s "
4580                                "(it's my nid but on a different network)\n",
4581                                 libcfs_nid2str(from_nid4),
4582                                 libcfs_nid2str(src_nid),
4583                                 libcfs_nid2str(dest_nid));
4584                         return -EPROTO;
4585                 }
4586
4587                 if (rdma_req && type == LNET_MSG_GET) {
4588                         CERROR("%s, src %s: Bad optimized GET for %s "
4589                                "(final destination must be me)\n",
4590                                 libcfs_nid2str(from_nid4),
4591                                 libcfs_nid2str(src_nid),
4592                                 libcfs_nid2str(dest_nid));
4593                         return -EPROTO;
4594                 }
4595
4596                 if (!the_lnet.ln_routing) {
4597                         CERROR("%s, src %s: Dropping message for %s "
4598                                "(routing not enabled)\n",
4599                                 libcfs_nid2str(from_nid4),
4600                                 libcfs_nid2str(src_nid),
4601                                 libcfs_nid2str(dest_nid));
4602                         goto drop;
4603                 }
4604         }
4605
4606         /* Message looks OK; we're not going to return an error, so we MUST
4607          * call back lnd_recv() come what may... */
4608
4609         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4610             fail_peer(src_nid, 0)) {                    /* shall we now? */
4611                 CERROR("%s, src %s: Dropping %s to simulate failure\n",
4612                        libcfs_nid2str(from_nid4), libcfs_nid2str(src_nid),
4613                        lnet_msgtyp2str(type));
4614                 goto drop;
4615         }
4616
4617         /* FIXME need to support large-addr nid */
4618         if (!list_empty(&the_lnet.ln_drop_rules) &&
4619             lnet_drop_rule_match(hdr, lnet_nid_to_nid4(&ni->ni_nid), NULL)) {
4620                 CDEBUG(D_NET,
4621                        "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
4622                        libcfs_nid2str(from_nid4), libcfs_nid2str(src_nid),
4623                        libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
4624                 goto drop;
4625         }
4626
4627         msg = lnet_msg_alloc();
4628         if (msg == NULL) {
4629                 CERROR("%s, src %s: Dropping %s (out of memory)\n",
4630                        libcfs_nid2str(from_nid4), libcfs_nid2str(src_nid),
4631                        lnet_msgtyp2str(type));
4632                 goto drop;
4633         }
4634
4635         /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
4636          * pointers NULL etc */
4637
4638         msg->msg_type = type;
4639         msg->msg_private = private;
4640         msg->msg_receiving = 1;
4641         msg->msg_rdma_get = rdma_req;
4642         msg->msg_len = msg->msg_wanted = payload_length;
4643         msg->msg_offset = 0;
4644         msg->msg_hdr = *hdr;
4645         /* for building message event */
4646         msg->msg_from = from_nid4;
4647         if (!for_me) {
4648                 msg->msg_target.pid     = dest_pid;
4649                 msg->msg_target.nid     = dest_nid;
4650                 msg->msg_routing        = 1;
4651
4652         } else {
4653                 /* convert common msg->hdr fields to host byteorder */
4654                 msg->msg_hdr.type       = type;
4655                 msg->msg_hdr.src_nid    = src_nid;
4656                 msg->msg_hdr.src_pid    = le32_to_cpu(msg->msg_hdr.src_pid);
4657                 msg->msg_hdr.dest_nid   = dest_nid;
4658                 msg->msg_hdr.dest_pid   = dest_pid;
4659                 msg->msg_hdr.payload_length = payload_length;
4660         }
4661
4662         lnet_net_lock(cpt);
4663         lpni = lnet_peerni_by_nid_locked(&from_nid, &ni->ni_nid, cpt);
4664         if (IS_ERR(lpni)) {
4665                 lnet_net_unlock(cpt);
4666                 rc = PTR_ERR(lpni);
4667                 CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
4668                        libcfs_nid2str(from_nid4), libcfs_nid2str(src_nid),
4669                        lnet_msgtyp2str(type), rc);
4670                 lnet_msg_free(msg);
4671                 if (rc == -ESHUTDOWN)
4672                         /* We are shutting down.  Don't do anything more */
4673                         return 0;
4674                 goto drop;
4675         }
4676
4677         /* If this message was forwarded to us from a router then we may need
4678          * to update router aliveness or check for an asymmetrical route
4679          * (or both)
4680          */
4681         if (((lnet_drop_asym_route && for_me) ||
4682              !lpni->lpni_peer_net->lpn_peer->lp_alive) &&
4683             LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid4)) {
4684                 __u32 src_net_id = LNET_NIDNET(src_nid);
4685                 struct lnet_peer *gw = lpni->lpni_peer_net->lpn_peer;
4686                 struct lnet_route *route;
4687                 bool found = false;
4688
4689                 list_for_each_entry(route, &gw->lp_routes, lr_gwlist) {
4690                         if (route->lr_net == src_net_id) {
4691                                 found = true;
4692                                 /* If we're transitioning the gateway from
4693                                  * dead -> alive, and discovery is disabled
4694                                  * locally or on the gateway, then we need to
4695                                  * update the cached route aliveness for each
4696                                  * route to the src_nid's net.
4697                                  *
4698                                  * Otherwise, we're only checking for
4699                                  * symmetrical route, and we can break the
4700                                  * loop
4701                                  */
4702                                 if (!gw->lp_alive &&
4703                                     lnet_is_discovery_disabled(gw))
4704                                         lnet_set_route_aliveness(route, true);
4705                                 else
4706                                         break;
4707                         }
4708                 }
4709                 if (lnet_drop_asym_route && for_me && !found) {
4710                         /* Drop ref taken by lnet_nid2peerni_locked() */
4711                         lnet_peer_ni_decref_locked(lpni);
4712                         lnet_net_unlock(cpt);
4713                         /* we would not use from_nid to route a message to
4714                          * src_nid
4715                          * => asymmetric routing detected but forbidden
4716                          */
4717                         CERROR("%s, src %s: Dropping asymmetrical route %s\n",
4718                                libcfs_nid2str(from_nid4),
4719                                libcfs_nid2str(src_nid), lnet_msgtyp2str(type));
4720                         lnet_msg_free(msg);
4721                         goto drop;
4722                 }
4723                 if (!gw->lp_alive) {
4724                         struct lnet_peer_net *lpn;
4725                         struct lnet_peer_ni *lpni2;
4726
4727                         gw->lp_alive = true;
4728                         /* Mark all remote NIs on src_nid's net UP */
4729                         lpn = lnet_peer_get_net_locked(gw, src_net_id);
4730                         if (lpn)
4731                                 list_for_each_entry(lpni2, &lpn->lpn_peer_nis,
4732                                                     lpni_peer_nis)
4733                                         lpni2->lpni_ns_status = LNET_NI_STATUS_UP;
4734                 }
4735         }
4736
4737         lpni->lpni_last_alive = ktime_get_seconds();
4738
4739         msg->msg_rxpeer = lpni;
4740         msg->msg_rxni = ni;
4741         lnet_ni_addref_locked(ni, cpt);
4742         /* Multi-Rail: Primary NID of source. */
4743         msg->msg_initiator = lnet_peer_primary_nid_locked(src_nid);
4744
4745         /*
4746          * mark the status of this lpni as UP since we received a message
4747          * from it. The ping response reports back the ns_status which is
4748          * marked on the remote as up or down and we cache it here.
4749          */
4750         msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
4751
4752         lnet_msg_commit(msg, cpt);
4753
4754         /* message delay simulation */
4755         if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
4756                      lnet_delay_rule_match_locked(hdr, msg))) {
4757                 lnet_net_unlock(cpt);
4758                 return 0;
4759         }
4760
4761         if (!for_me) {
4762                 rc = lnet_parse_forward_locked(ni, msg);
4763                 lnet_net_unlock(cpt);
4764
4765                 if (rc < 0)
4766                         goto free_drop;
4767
4768                 if (rc == LNET_CREDIT_OK) {
4769                         lnet_ni_recv(ni, msg->msg_private, msg, 0,
4770                                      0, payload_length, payload_length);
4771                 }
4772                 return 0;
4773         }
4774
4775         lnet_net_unlock(cpt);
4776
4777         rc = lnet_parse_local(ni, msg);
4778         if (rc != 0)
4779                 goto free_drop;
4780         return 0;
4781
4782  free_drop:
4783         LASSERT(msg->msg_md == NULL);
4784         lnet_finalize(msg, rc);
4785
4786  drop:
4787         lnet_drop_message(ni, cpt, private, payload_length, type);
4788         return 0;
4789 }
4790 EXPORT_SYMBOL(lnet_parse);
4791
4792 void
4793 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
4794 {
4795         while (!list_empty(head)) {
4796                 struct lnet_process_id id = {0};
4797                 struct lnet_msg *msg;
4798
4799                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4800                 list_del(&msg->msg_list);
4801
4802                 id.nid = msg->msg_hdr.src_nid;
4803                 id.pid = msg->msg_hdr.src_pid;
4804
4805                 LASSERT(msg->msg_md == NULL);
4806                 LASSERT(msg->msg_rx_delayed);
4807                 LASSERT(msg->msg_rxpeer != NULL);
4808                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4809
4810                 CWARN("Dropping delayed PUT from %s portal %d match %llu"
4811                       " offset %d length %d: %s\n",
4812                       libcfs_id2str(id),
4813                       msg->msg_hdr.msg.put.ptl_index,
4814                       msg->msg_hdr.msg.put.match_bits,
4815                       msg->msg_hdr.msg.put.offset,
4816                       msg->msg_hdr.payload_length, reason);
4817
4818                 /* NB I can't drop msg's ref on msg_rxpeer until after I've
4819                  * called lnet_drop_message(), so I just hang onto msg as well
4820                  * until that's done */
4821
4822                 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
4823                                   msg->msg_private, msg->msg_len,
4824                                   msg->msg_type);
4825
4826                 msg->msg_no_resend = true;
4827                 /*
4828                  * NB: message will not generate event because w/o attached MD,
4829                  * but we still should give error code so lnet_msg_decommit()
4830                  * can skip counters operations and other checks.
4831                  */
4832                 lnet_finalize(msg, -ENOENT);
4833         }
4834 }
4835
4836 void
4837 lnet_recv_delayed_msg_list(struct list_head *head)
4838 {
4839         while (!list_empty(head)) {
4840                 struct lnet_msg *msg;
4841                 struct lnet_process_id id;
4842
4843                 msg = list_entry(head->next, struct lnet_msg, msg_list);
4844                 list_del(&msg->msg_list);
4845
4846                 /* md won't disappear under me, since each msg
4847                  * holds a ref on it */
4848
4849                 id.nid = msg->msg_hdr.src_nid;
4850                 id.pid = msg->msg_hdr.src_pid;
4851
4852                 LASSERT(msg->msg_rx_delayed);
4853                 LASSERT(msg->msg_md != NULL);
4854                 LASSERT(msg->msg_rxpeer != NULL);
4855                 LASSERT(msg->msg_rxni != NULL);
4856                 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
4857
4858                 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
4859                        "match %llu offset %d length %d.\n",
4860                         libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
4861                         msg->msg_hdr.msg.put.match_bits,
4862                         msg->msg_hdr.msg.put.offset,
4863                         msg->msg_hdr.payload_length);
4864
4865                 lnet_recv_put(msg->msg_rxni, msg);
4866         }
4867 }
4868
4869 static void
4870 lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt,
4871                         struct lnet_libmd *md, struct lnet_handle_md mdh)
4872 {
4873         s64 timeout_ns;
4874         struct lnet_rsp_tracker *local_rspt;
4875
4876         /*
4877          * MD has a refcount taken by message so it's not going away.
4878          * The MD however can be looked up. We need to secure the access
4879          * to the md_rspt_ptr by taking the res_lock.
4880          * The rspt can be accessed without protection up to when it gets
4881          * added to the list.
4882          */
4883
4884         lnet_res_lock(cpt);
4885         local_rspt = md->md_rspt_ptr;
4886         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
4887         if (local_rspt != NULL) {
4888                 /*
4889                  * we already have an rspt attached to the md, so we'll
4890                  * update the deadline on that one.
4891                  */
4892                 lnet_rspt_free(rspt, cpt);
4893         } else {
4894                 /* new md */
4895                 rspt->rspt_mdh = mdh;
4896                 rspt->rspt_cpt = cpt;
4897                 /* store the rspt so we can access it when we get the REPLY */
4898                 md->md_rspt_ptr = rspt;
4899                 local_rspt = rspt;
4900         }
4901         local_rspt->rspt_deadline = ktime_add_ns(ktime_get(), timeout_ns);
4902
4903         /*
4904          * add to the list of tracked responses. It's added to tail of the
4905          * list in order to expire all the older entries first.
4906          */
4907         lnet_net_lock(cpt);
4908         list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
4909         lnet_net_unlock(cpt);
4910         lnet_res_unlock(cpt);
4911 }
4912
4913 /**
4914  * Initiate an asynchronous PUT operation.
4915  *
4916  * There are several events associated with a PUT: completion of the send on
4917  * the initiator node (LNET_EVENT_SEND), and when the send completes
4918  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
4919  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
4920  * used at the target node to indicate the completion of incoming data
4921  * delivery.
4922  *
4923  * The local events will be logged in the EQ associated with the MD pointed to
4924  * by \a mdh handle. Using a MD without an associated EQ results in these
4925  * events being discarded. In this case, the caller must have another
4926  * mechanism (e.g., a higher level protocol) for determining when it is safe
4927  * to modify the memory region associated with the MD.
4928  *
4929  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
4930  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
4931  *
4932  * \param self Indicates the NID of a local interface through which to send
4933  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
4934  * \param mdh A handle for the MD that describes the memory to be sent. The MD
4935  * must be "free floating" (See LNetMDBind()).
4936  * \param ack Controls whether an acknowledgment is requested.
4937  * Acknowledgments are only sent when they are requested by the initiating
4938  * process and the target MD enables them.
4939  * \param target A process identifier for the target process.
4940  * \param portal The index in the \a target's portal table.
4941  * \param match_bits The match bits to use for MD selection at the target
4942  * process.
4943  * \param offset The offset into the target MD (only used when the target
4944  * MD has the LNET_MD_MANAGE_REMOTE option set).
4945  * \param hdr_data 64 bits of user data that can be included in the message
4946  * header. This data is written to an event queue entry at the target if an
4947  * EQ is present on the matching MD.
4948  *
4949  * \retval  0      Success, and only in this case events will be generated
4950  * and logged to EQ (if it exists).
4951  * \retval -EIO    Simulated failure.
4952  * \retval -ENOMEM Memory allocation failure.
4953  * \retval -ENOENT Invalid MD object.
4954  *
4955  * \see struct lnet_event::hdr_data and lnet_event_kind_t.
4956  */
4957 int
4958 LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
4959         struct lnet_process_id target, unsigned int portal,
4960         __u64 match_bits, unsigned int offset,
4961         __u64 hdr_data)
4962 {
4963         struct lnet_msg *msg;
4964         struct lnet_libmd *md;
4965         int cpt;
4966         int rc;
4967         struct lnet_rsp_tracker *rspt = NULL;
4968
4969         LASSERT(the_lnet.ln_refcount > 0);
4970
4971         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
4972             fail_peer(target.nid, 1)) {                 /* shall we now? */
4973                 CERROR("Dropping PUT to %s: simulated failure\n",
4974                        libcfs_id2str(target));
4975                 return -EIO;
4976         }
4977
4978         msg = lnet_msg_alloc();
4979         if (msg == NULL) {
4980                 CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
4981                        libcfs_id2str(target));
4982                 return -ENOMEM;
4983         }
4984         msg->msg_vmflush = !!(current->flags & PF_MEMALLOC);
4985
4986         cpt = lnet_cpt_of_cookie(mdh.cookie);
4987
4988         if (ack == LNET_ACK_REQ) {
4989                 rspt = lnet_rspt_alloc(cpt);
4990                 if (!rspt) {
4991                         CERROR("Dropping PUT to %s: ENOMEM on response tracker\n",
4992                                 libcfs_id2str(target));
4993                         return -ENOMEM;
4994                 }
4995                 INIT_LIST_HEAD(&rspt->rspt_on_list);
4996         }
4997
4998         lnet_res_lock(cpt);
4999
5000         md = lnet_handle2md(&mdh);
5001         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5002                 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
5003                        match_bits, portal, libcfs_id2str(target),
5004                        md == NULL ? -1 : md->md_threshold);
5005                 if (md != NULL && md->md_me != NULL)
5006                         CERROR("Source MD also attached to portal %d\n",
5007                                md->md_me->me_portal);
5008                 lnet_res_unlock(cpt);
5009
5010                 if (rspt)
5011                         lnet_rspt_free(rspt, cpt);
5012
5013                 lnet_msg_free(msg);
5014                 return -ENOENT;
5015         }
5016
5017         CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
5018
5019         lnet_msg_attach_md(msg, md, 0, 0);
5020
5021         lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
5022
5023         msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
5024         msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
5025         msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
5026         msg->msg_hdr.msg.put.hdr_data = hdr_data;
5027
5028         /* NB handles only looked up by creator (no flips) */
5029         if (ack == LNET_ACK_REQ) {
5030                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5031                         the_lnet.ln_interface_cookie;
5032                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5033                         md->md_lh.lh_cookie;
5034         } else {
5035                 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
5036                         LNET_WIRE_HANDLE_COOKIE_NONE;
5037                 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
5038                         LNET_WIRE_HANDLE_COOKIE_NONE;
5039         }
5040
5041         lnet_res_unlock(cpt);
5042
5043         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5044
5045         if (rspt && lnet_response_tracking_enabled(LNET_MSG_PUT,
5046                                                    md->md_options))
5047                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5048         else if (rspt)
5049                 lnet_rspt_free(rspt, cpt);
5050
5051         if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2,
5052                                  CFS_FAIL_ONCE))
5053                 rc = -EIO;
5054         else
5055                 rc = lnet_send(self, msg, LNET_NID_ANY);
5056
5057         if (rc != 0) {
5058                 CNETERR("Error sending PUT to %s: %d\n",
5059                         libcfs_id2str(target), rc);
5060                 msg->msg_no_resend = true;
5061                 lnet_finalize(msg, rc);
5062         }
5063
5064         /* completion will be signalled by an event */
5065         return 0;
5066 }
5067 EXPORT_SYMBOL(LNetPut);
5068
5069 /*
5070  * The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
5071  * returns a msg for the LND to pass to lnet_finalize() when the sink
5072  * data has been received.
5073  *
5074  * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
5075  * lnet_finalize() is called on it, so the LND must call this first
5076  */
5077 struct lnet_msg *
5078 lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
5079 {
5080         struct lnet_msg *msg = lnet_msg_alloc();
5081         struct lnet_libmd *getmd = getmsg->msg_md;
5082         struct lnet_process_id peer_id = getmsg->msg_target;
5083         int cpt;
5084
5085         LASSERT(!getmsg->msg_target_is_router);
5086         LASSERT(!getmsg->msg_routing);
5087
5088         if (msg == NULL) {
5089                 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
5090                        libcfs_nidstr(&ni->ni_nid), libcfs_id2str(peer_id));
5091                 goto drop;
5092         }
5093
5094         cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
5095         lnet_res_lock(cpt);
5096
5097         LASSERT(getmd->md_refcount > 0);
5098
5099         if (getmd->md_threshold == 0) {
5100                 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
5101                         libcfs_nidstr(&ni->ni_nid), libcfs_id2str(peer_id),
5102                         getmd);
5103                 lnet_res_unlock(cpt);
5104                 goto drop;
5105         }
5106
5107         LASSERT(getmd->md_offset == 0);
5108
5109         CDEBUG(D_NET, "%s: Reply from %s md %p\n",
5110                libcfs_nidstr(&ni->ni_nid), libcfs_id2str(peer_id), getmd);
5111
5112         /* setup information for lnet_build_msg_event */
5113         msg->msg_initiator =
5114                 lnet_nid_to_nid4(&getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid);
5115         msg->msg_from = peer_id.nid;
5116         msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
5117         msg->msg_hdr.src_nid = peer_id.nid;
5118         msg->msg_hdr.payload_length = getmd->md_length;
5119         msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
5120
5121         lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
5122         lnet_res_unlock(cpt);
5123
5124         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
5125
5126         lnet_net_lock(cpt);
5127         lnet_msg_commit(msg, cpt);
5128         lnet_net_unlock(cpt);
5129
5130         lnet_build_msg_event(msg, LNET_EVENT_REPLY);
5131
5132         return msg;
5133
5134  drop:
5135         cpt = lnet_cpt_of_nid(peer_id.nid, ni);
5136
5137         lnet_net_lock(cpt);
5138         lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP);
5139         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_count++;
5140         the_lnet.ln_counters[cpt]->lct_common.lcc_drop_length +=
5141                 getmd->md_length;
5142         lnet_net_unlock(cpt);
5143
5144         if (msg != NULL)
5145                 lnet_msg_free(msg);
5146
5147         return NULL;
5148 }
5149 EXPORT_SYMBOL(lnet_create_reply_msg);
5150
5151 void
5152 lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
5153                        unsigned int len)
5154 {
5155         /* Set the REPLY length, now the RDMA that elides the REPLY message has
5156          * completed and I know it. */
5157         LASSERT(reply != NULL);
5158         LASSERT(reply->msg_type == LNET_MSG_GET);
5159         LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
5160
5161         /* NB I trusted my peer to RDMA.  If she tells me she's written beyond
5162          * the end of my buffer, I might as well be dead. */
5163         LASSERT(len <= reply->msg_ev.mlength);
5164
5165         reply->msg_ev.mlength = len;
5166 }
5167 EXPORT_SYMBOL(lnet_set_reply_msg_len);
5168
5169 /**
5170  * Initiate an asynchronous GET operation.
5171  *
5172  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
5173  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
5174  * the target node in the REPLY has been written to local MD.
5175  *
5176  * On the target node, an LNET_EVENT_GET is logged when the GET request
5177  * arrives and is accepted into a MD.
5178  *
5179  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
5180  * \param mdh A handle for the MD that describes the memory into which the
5181  * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
5182  *
5183  * \retval  0      Success, and only in this case events will be generated
5184  * and logged to EQ (if it exists) of the MD.
5185  * \retval -EIO    Simulated failure.
5186  * \retval -ENOMEM Memory allocation failure.
5187  * \retval -ENOENT Invalid MD object.
5188  */
5189 int
5190 LNetGet(lnet_nid_t self, struct lnet_handle_md mdh,
5191         struct lnet_process_id target, unsigned int portal,
5192         __u64 match_bits, unsigned int offset, bool recovery)
5193 {
5194         struct lnet_msg *msg;
5195         struct lnet_libmd *md;
5196         struct lnet_rsp_tracker *rspt;
5197         int cpt;
5198         int rc;
5199
5200         LASSERT(the_lnet.ln_refcount > 0);
5201
5202         if (!list_empty(&the_lnet.ln_test_peers) &&     /* normally we don't */
5203             fail_peer(target.nid, 1))                   /* shall we now? */
5204         {
5205                 CERROR("Dropping GET to %s: simulated failure\n",
5206                        libcfs_id2str(target));
5207                 return -EIO;
5208         }
5209
5210         msg = lnet_msg_alloc();
5211         if (!msg) {
5212                 CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
5213                        libcfs_id2str(target));
5214                 return -ENOMEM;
5215         }
5216
5217         cpt = lnet_cpt_of_cookie(mdh.cookie);
5218
5219         rspt = lnet_rspt_alloc(cpt);
5220         if (!rspt) {
5221                 CERROR("Dropping GET to %s: ENOMEM on response tracker\n",
5222                        libcfs_id2str(target));
5223                 return -ENOMEM;
5224         }
5225         INIT_LIST_HEAD(&rspt->rspt_on_list);
5226
5227         msg->msg_recovery = recovery;
5228
5229         lnet_res_lock(cpt);
5230
5231         md = lnet_handle2md(&mdh);
5232         if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
5233                 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
5234                        match_bits, portal, libcfs_id2str(target),
5235                        md == NULL ? -1 : md->md_threshold);
5236                 if (md != NULL && md->md_me != NULL)
5237                         CERROR("REPLY MD also attached to portal %d\n",
5238                                md->md_me->me_portal);
5239
5240                 lnet_res_unlock(cpt);
5241
5242                 lnet_msg_free(msg);
5243                 lnet_rspt_free(rspt, cpt);
5244                 return -ENOENT;
5245         }
5246
5247         CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
5248
5249         lnet_msg_attach_md(msg, md, 0, 0);
5250
5251         lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
5252
5253         msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
5254         msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
5255         msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
5256         msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
5257
5258         /* NB handles only looked up by creator (no flips) */
5259         msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
5260                 the_lnet.ln_interface_cookie;
5261         msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
5262                 md->md_lh.lh_cookie;
5263
5264         lnet_res_unlock(cpt);
5265
5266         lnet_build_msg_event(msg, LNET_EVENT_SEND);
5267
5268         if (lnet_response_tracking_enabled(LNET_MSG_GET, md->md_options))
5269                 lnet_attach_rsp_tracker(rspt, cpt, md, mdh);
5270         else
5271                 lnet_rspt_free(rspt, cpt);
5272
5273         rc = lnet_send(self, msg, LNET_NID_ANY);
5274         if (rc < 0) {
5275                 CNETERR("Error sending GET to %s: %d\n",
5276                         libcfs_id2str(target), rc);
5277                 msg->msg_no_resend = true;
5278                 lnet_finalize(msg, rc);
5279         }
5280
5281         /* completion will be signalled by an event */
5282         return 0;
5283 }
5284 EXPORT_SYMBOL(LNetGet);
5285
5286 /**
5287  * Calculate distance to node at \a dstnid.
5288  *
5289  * \param dstnid Target NID.
5290  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
5291  * is saved here.
5292  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
5293  * here.
5294  *
5295  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
5296  * local_nid_dist_zero is set, which is the default.
5297  * \retval positives Distance to target NID, i.e. number of hops plus one.
5298  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
5299  */
5300 int
5301 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
5302 {
5303         struct list_head *e;
5304         struct lnet_ni *ni = NULL;
5305         struct lnet_remotenet *rnet;
5306         __u32 dstnet = LNET_NIDNET(dstnid);
5307         int hops;
5308         int cpt;
5309         __u32 order = 2;
5310         struct list_head *rn_list;
5311         bool matched_dstnet = false;
5312
5313         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
5314          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
5315          * keep order 0 free for 0@lo and order 1 free for a local NID
5316          * match */
5317
5318         LASSERT(the_lnet.ln_refcount > 0);
5319
5320         cpt = lnet_net_lock_current();
5321
5322         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
5323                 /* FIXME support large-addr nid */
5324                 if (lnet_nid_to_nid4(&ni->ni_nid) == dstnid) {
5325                         if (srcnidp != NULL)
5326                                 *srcnidp = dstnid;
5327                         if (orderp != NULL) {
5328                                 if (dstnid == LNET_NID_LO_0)
5329                                         *orderp = 0;
5330                                 else
5331                                         *orderp = 1;
5332                         }
5333                         lnet_net_unlock(cpt);
5334
5335                         return local_nid_dist_zero ? 0 : 1;
5336                 }
5337
5338                 if (!matched_dstnet && LNET_NID_NET(&ni->ni_nid) == dstnet) {
5339                         matched_dstnet = true;
5340                         /* We matched the destination net, but we may have
5341                          * additional local NIs to inspect.
5342                          *
5343                          * We record the nid and order as appropriate, but
5344                          * they may be overwritten if we match local NI above.
5345                          */
5346                         if (srcnidp)
5347                                 /* FIXME support large-addr nids */
5348                                 *srcnidp = lnet_nid_to_nid4(&ni->ni_nid);
5349
5350                         if (orderp) {
5351                                 /* Check if ni was originally created in
5352                                  * current net namespace.
5353                                  * If not, assign order above 0xffff0000,
5354                                  * to make this ni not a priority.
5355                                  */
5356                                 if (current->nsproxy &&
5357                                     !net_eq(ni->ni_net_ns,
5358                                             current->nsproxy->net_ns))
5359                                         *orderp = order + 0xffff0000;
5360                                 else
5361                                         *orderp = order;
5362                         }
5363                 }
5364
5365                 order++;
5366         }
5367
5368         if (matched_dstnet) {
5369                 lnet_net_unlock(cpt);
5370                 return 1;
5371         }
5372
5373         rn_list = lnet_net2rnethash(dstnet);
5374         list_for_each(e, rn_list) {
5375                 rnet = list_entry(e, struct lnet_remotenet, lrn_list);
5376
5377                 if (rnet->lrn_net == dstnet) {
5378                         struct lnet_route *route;
5379                         struct lnet_route *shortest = NULL;
5380                         __u32 shortest_hops = LNET_UNDEFINED_HOPS;
5381                         __u32 route_hops;
5382
5383                         LASSERT(!list_empty(&rnet->lrn_routes));
5384
5385                         list_for_each_entry(route, &rnet->lrn_routes,
5386                                             lr_list) {
5387                                 route_hops = route->lr_hops;
5388                                 if (route_hops == LNET_UNDEFINED_HOPS)
5389                                         route_hops = 1;
5390                                 if (shortest == NULL ||
5391                                     route_hops < shortest_hops) {
5392                                         shortest = route;
5393                                         shortest_hops = route_hops;
5394                                 }
5395                         }
5396
5397                         LASSERT(shortest != NULL);
5398                         hops = shortest_hops;
5399                         if (srcnidp != NULL) {
5400                                 struct lnet_net *net;
5401                                 net = lnet_get_net_locked(shortest->lr_lnet);
5402                                 LASSERT(net);
5403                                 ni = lnet_get_next_ni_locked(net, NULL);
5404                                 /* FIXME support large-addr nids */
5405                                 *srcnidp = lnet_nid_to_nid4(&ni->ni_nid);
5406                         }
5407                         if (orderp != NULL)
5408                                 *orderp = order;
5409                         lnet_net_unlock(cpt);
5410                         return hops + 1;
5411                 }
5412                 order++;
5413         }
5414
5415         lnet_net_unlock(cpt);
5416         return -EHOSTUNREACH;
5417 }
5418 EXPORT_SYMBOL(LNetDist);