4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lnet/lnet/lib-move.c
34 * Data movement routines
37 #define DEBUG_SUBSYSTEM S_LNET
39 #include <lnet/lib-lnet.h>
40 #include <linux/nsproxy.h>
41 #include <net/net_namespace.h>
43 static int local_nid_dist_zero = 1;
44 module_param(local_nid_dist_zero, int, 0444);
45 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48 lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
52 struct list_head *next;
53 struct list_head cull;
55 /* NB: use lnet_net_lock(0) to serialize operations on test peers */
57 /* Adding a new entry */
58 LIBCFS_ALLOC(tp, sizeof(*tp));
63 tp->tp_threshold = threshold;
66 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
71 /* removing entries */
72 INIT_LIST_HEAD(&cull);
76 list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
77 tp = list_entry(el, lnet_test_peer_t, tp_list);
79 if (tp->tp_threshold == 0 || /* needs culling anyway */
80 nid == LNET_NID_ANY || /* removing all entries */
81 tp->tp_nid == nid) { /* matched this one */
82 list_del(&tp->tp_list);
83 list_add(&tp->tp_list, &cull);
89 while (!list_empty(&cull)) {
90 tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
92 list_del(&tp->tp_list);
93 LIBCFS_FREE(tp, sizeof(*tp));
99 fail_peer (lnet_nid_t nid, int outgoing)
101 lnet_test_peer_t *tp;
102 struct list_head *el;
103 struct list_head *next;
104 struct list_head cull;
107 INIT_LIST_HEAD(&cull);
109 /* NB: use lnet_net_lock(0) to serialize operations on test peers */
112 list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
113 tp = list_entry(el, lnet_test_peer_t, tp_list);
115 if (tp->tp_threshold == 0) {
118 /* only cull zombies on outgoing tests,
119 * since we may be at interrupt priority on
120 * incoming messages. */
121 list_del(&tp->tp_list);
122 list_add(&tp->tp_list, &cull);
127 if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
128 nid == tp->tp_nid) { /* fail this peer */
131 if (tp->tp_threshold != LNET_MD_THRESH_INF) {
134 tp->tp_threshold == 0) {
136 list_del(&tp->tp_list);
137 list_add(&tp->tp_list, &cull);
146 while (!list_empty(&cull)) {
147 tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
148 list_del(&tp->tp_list);
150 LIBCFS_FREE(tp, sizeof(*tp));
157 lnet_iov_nob(unsigned int niov, struct kvec *iov)
159 unsigned int nob = 0;
161 LASSERT(niov == 0 || iov != NULL);
163 nob += (iov++)->iov_len;
167 EXPORT_SYMBOL(lnet_iov_nob);
170 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
171 unsigned int nsiov, struct kvec *siov, unsigned int soffset,
174 /* NB diov, siov are READ-ONLY */
175 unsigned int this_nob;
180 /* skip complete frags before 'doffset' */
182 while (doffset >= diov->iov_len) {
183 doffset -= diov->iov_len;
189 /* skip complete frags before 'soffset' */
191 while (soffset >= siov->iov_len) {
192 soffset -= siov->iov_len;
201 this_nob = MIN(diov->iov_len - doffset,
202 siov->iov_len - soffset);
203 this_nob = MIN(this_nob, nob);
205 memcpy((char *)diov->iov_base + doffset,
206 (char *)siov->iov_base + soffset, this_nob);
209 if (diov->iov_len > doffset + this_nob) {
217 if (siov->iov_len > soffset + this_nob) {
226 EXPORT_SYMBOL(lnet_copy_iov2iov);
229 lnet_extract_iov(int dst_niov, struct kvec *dst,
230 int src_niov, struct kvec *src,
231 unsigned int offset, unsigned int len)
233 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
234 * for exactly 'len' bytes, and return the number of entries.
235 * NB not destructive to 'src' */
236 unsigned int frag_len;
239 if (len == 0) /* no data => */
240 return (0); /* no frags */
242 LASSERT(src_niov > 0);
243 while (offset >= src->iov_len) { /* skip initial frags */
244 offset -= src->iov_len;
247 LASSERT(src_niov > 0);
252 LASSERT(src_niov > 0);
253 LASSERT((int)niov <= dst_niov);
255 frag_len = src->iov_len - offset;
256 dst->iov_base = ((char *)src->iov_base) + offset;
258 if (len <= frag_len) {
263 dst->iov_len = frag_len;
273 EXPORT_SYMBOL(lnet_extract_iov);
277 lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
279 unsigned int nob = 0;
281 LASSERT(niov == 0 || kiov != NULL);
283 nob += (kiov++)->kiov_len;
287 EXPORT_SYMBOL(lnet_kiov_nob);
290 lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
291 unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
294 /* NB diov, siov are READ-ONLY */
295 unsigned int this_nob;
302 LASSERT (!in_interrupt ());
305 while (doffset >= diov->kiov_len) {
306 doffset -= diov->kiov_len;
313 while (soffset >= siov->kiov_len) {
314 soffset -= siov->kiov_len;
323 this_nob = MIN(diov->kiov_len - doffset,
324 siov->kiov_len - soffset);
325 this_nob = MIN(this_nob, nob);
328 daddr = ((char *)kmap(diov->kiov_page)) +
329 diov->kiov_offset + doffset;
331 saddr = ((char *)kmap(siov->kiov_page)) +
332 siov->kiov_offset + soffset;
334 /* Vanishing risk of kmap deadlock when mapping 2 pages.
335 * However in practice at least one of the kiovs will be mapped
336 * kernel pages and the map/unmap will be NOOPs */
338 memcpy (daddr, saddr, this_nob);
341 if (diov->kiov_len > doffset + this_nob) {
345 kunmap(diov->kiov_page);
352 if (siov->kiov_len > soffset + this_nob) {
356 kunmap(siov->kiov_page);
365 kunmap(diov->kiov_page);
367 kunmap(siov->kiov_page);
369 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
372 lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
373 unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
376 /* NB iov, kiov are READ-ONLY */
377 unsigned int this_nob;
383 LASSERT (!in_interrupt ());
386 while (iovoffset >= iov->iov_len) {
387 iovoffset -= iov->iov_len;
394 while (kiovoffset >= kiov->kiov_len) {
395 kiovoffset -= kiov->kiov_len;
404 this_nob = MIN(iov->iov_len - iovoffset,
405 kiov->kiov_len - kiovoffset);
406 this_nob = MIN(this_nob, nob);
409 addr = ((char *)kmap(kiov->kiov_page)) +
410 kiov->kiov_offset + kiovoffset;
412 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
415 if (iov->iov_len > iovoffset + this_nob) {
416 iovoffset += this_nob;
423 if (kiov->kiov_len > kiovoffset + this_nob) {
425 kiovoffset += this_nob;
427 kunmap(kiov->kiov_page);
437 kunmap(kiov->kiov_page);
439 EXPORT_SYMBOL(lnet_copy_kiov2iov);
442 lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
443 unsigned int niov, struct kvec *iov, unsigned int iovoffset,
446 /* NB kiov, iov are READ-ONLY */
447 unsigned int this_nob;
453 LASSERT (!in_interrupt ());
456 while (kiovoffset >= kiov->kiov_len) {
457 kiovoffset -= kiov->kiov_len;
464 while (iovoffset >= iov->iov_len) {
465 iovoffset -= iov->iov_len;
474 this_nob = MIN(kiov->kiov_len - kiovoffset,
475 iov->iov_len - iovoffset);
476 this_nob = MIN(this_nob, nob);
479 addr = ((char *)kmap(kiov->kiov_page)) +
480 kiov->kiov_offset + kiovoffset;
482 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
485 if (kiov->kiov_len > kiovoffset + this_nob) {
487 kiovoffset += this_nob;
489 kunmap(kiov->kiov_page);
496 if (iov->iov_len > iovoffset + this_nob) {
497 iovoffset += this_nob;
506 kunmap(kiov->kiov_page);
508 EXPORT_SYMBOL(lnet_copy_iov2kiov);
511 lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
512 int src_niov, lnet_kiov_t *src,
513 unsigned int offset, unsigned int len)
515 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
516 * for exactly 'len' bytes, and return the number of entries.
517 * NB not destructive to 'src' */
518 unsigned int frag_len;
521 if (len == 0) /* no data => */
522 return (0); /* no frags */
524 LASSERT(src_niov > 0);
525 while (offset >= src->kiov_len) { /* skip initial frags */
526 offset -= src->kiov_len;
529 LASSERT(src_niov > 0);
534 LASSERT(src_niov > 0);
535 LASSERT((int)niov <= dst_niov);
537 frag_len = src->kiov_len - offset;
538 dst->kiov_page = src->kiov_page;
539 dst->kiov_offset = src->kiov_offset + offset;
541 if (len <= frag_len) {
543 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
547 dst->kiov_len = frag_len;
548 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
558 EXPORT_SYMBOL(lnet_extract_kiov);
561 lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
562 unsigned int offset, unsigned int mlen, unsigned int rlen)
564 unsigned int niov = 0;
565 struct kvec *iov = NULL;
566 lnet_kiov_t *kiov = NULL;
569 LASSERT (!in_interrupt ());
570 LASSERT (mlen == 0 || msg != NULL);
573 LASSERT(msg->msg_receiving);
574 LASSERT(!msg->msg_sending);
575 LASSERT(rlen == msg->msg_len);
576 LASSERT(mlen <= msg->msg_len);
577 LASSERT(msg->msg_offset == offset);
578 LASSERT(msg->msg_wanted == mlen);
580 msg->msg_receiving = 0;
583 niov = msg->msg_niov;
585 kiov = msg->msg_kiov;
588 LASSERT ((iov == NULL) != (kiov == NULL));
592 rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed,
593 niov, iov, kiov, offset, mlen,
596 lnet_finalize(ni, msg, rc);
600 lnet_setpayloadbuffer(lnet_msg_t *msg)
602 lnet_libmd_t *md = msg->msg_md;
604 LASSERT(msg->msg_len > 0);
605 LASSERT(!msg->msg_routing);
607 LASSERT(msg->msg_niov == 0);
608 LASSERT(msg->msg_iov == NULL);
609 LASSERT(msg->msg_kiov == NULL);
611 msg->msg_niov = md->md_niov;
612 if ((md->md_options & LNET_MD_KIOV) != 0)
613 msg->msg_kiov = md->md_iov.kiov;
615 msg->msg_iov = md->md_iov.iov;
619 lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
620 unsigned int offset, unsigned int len)
622 msg->msg_type = type;
623 msg->msg_target = target;
625 msg->msg_offset = offset;
628 lnet_setpayloadbuffer(msg);
630 memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
631 msg->msg_hdr.type = cpu_to_le32(type);
632 /* dest_nid will be overwritten by lnet_select_pathway() */
633 msg->msg_hdr.dest_nid = cpu_to_le64(target.nid);
634 msg->msg_hdr.dest_pid = cpu_to_le32(target.pid);
635 /* src_nid will be set later */
636 msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid);
637 msg->msg_hdr.payload_length = cpu_to_le32(len);
641 lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
643 void *priv = msg->msg_private;
646 LASSERT (!in_interrupt ());
647 LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
648 (msg->msg_txcredit && msg->msg_peertxcredit));
650 rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
652 lnet_finalize(ni, msg, rc);
656 lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
660 LASSERT(!msg->msg_sending);
661 LASSERT(msg->msg_receiving);
662 LASSERT(!msg->msg_rx_ready_delay);
663 LASSERT(ni->ni_net->net_lnd->lnd_eager_recv != NULL);
665 msg->msg_rx_ready_delay = 1;
666 rc = (ni->ni_net->net_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
669 CERROR("recv from %s / send to %s aborted: "
670 "eager_recv failed %d\n",
671 libcfs_nid2str(msg->msg_rxpeer->lpni_nid),
672 libcfs_id2str(msg->msg_target), rc);
673 LASSERT(rc < 0); /* required by my callers */
680 * This function can be called from two paths:
681 * 1. when sending a message
682 * 2. when decommiting a message (lnet_msg_decommit_tx())
683 * In both these cases the peer_ni should have it's reference count
684 * acquired by the caller and therefore it is safe to drop the spin
685 * lock before calling lnd_query()
688 lnet_ni_query_locked(lnet_ni_t *ni, struct lnet_peer_ni *lp)
690 cfs_time_t last_alive = 0;
691 int cpt = lnet_cpt_of_nid_locked(lp->lpni_nid, ni);
693 LASSERT(lnet_peer_aliveness_enabled(lp));
694 LASSERT(ni->ni_net->net_lnd->lnd_query != NULL);
696 lnet_net_unlock(cpt);
697 (ni->ni_net->net_lnd->lnd_query)(ni, lp->lpni_nid, &last_alive);
700 lp->lpni_last_query = cfs_time_current();
702 if (last_alive != 0) /* NI has updated timestamp */
703 lp->lpni_last_alive = last_alive;
706 /* NB: always called with lnet_net_lock held */
708 lnet_peer_is_alive (struct lnet_peer_ni *lp, cfs_time_t now)
713 LASSERT (lnet_peer_aliveness_enabled(lp));
716 * Trust lnet_notify() if it has more recent aliveness news, but
717 * ignore the initial assumed death (see lnet_peers_start_down()).
719 spin_lock(&lp->lpni_lock);
720 if (!lp->lpni_alive && lp->lpni_alive_count > 0 &&
721 cfs_time_aftereq(lp->lpni_timestamp, lp->lpni_last_alive)) {
722 spin_unlock(&lp->lpni_lock);
727 cfs_time_add(lp->lpni_last_alive,
728 cfs_time_seconds(lp->lpni_net->net_tunables.
730 alive = cfs_time_after(deadline, now);
733 * Update obsolete lp_alive except for routers assumed to be dead
734 * initially, because router checker would update aliveness in this
735 * case, and moreover lpni_last_alive at peer creation is assumed.
737 if (alive && !lp->lpni_alive &&
738 !(lnet_isrouter(lp) && lp->lpni_alive_count == 0)) {
739 spin_unlock(&lp->lpni_lock);
740 lnet_notify_locked(lp, 0, 1, lp->lpni_last_alive);
742 spin_unlock(&lp->lpni_lock);
749 /* NB: returns 1 when alive, 0 when dead, negative when error;
750 * may drop the lnet_net_lock */
752 lnet_peer_alive_locked (struct lnet_ni *ni, struct lnet_peer_ni *lp)
754 cfs_time_t now = cfs_time_current();
756 if (!lnet_peer_aliveness_enabled(lp))
759 if (lnet_peer_is_alive(lp, now))
763 * Peer appears dead, but we should avoid frequent NI queries (at
764 * most once per lnet_queryinterval seconds).
766 if (lp->lpni_last_query != 0) {
767 static const int lnet_queryinterval = 1;
769 cfs_time_t next_query =
770 cfs_time_add(lp->lpni_last_query,
771 cfs_time_seconds(lnet_queryinterval));
773 if (cfs_time_before(now, next_query)) {
775 CWARN("Unexpected aliveness of peer %s: "
777 libcfs_nid2str(lp->lpni_nid),
778 (int)now, (int)next_query,
780 lp->lpni_net->net_tunables.lct_peer_timeout);
785 /* query NI for latest aliveness news */
786 lnet_ni_query_locked(ni, lp);
788 if (lnet_peer_is_alive(lp, now))
791 lnet_notify_locked(lp, 0, 0, lp->lpni_last_alive);
796 * \param msg The message to be sent.
797 * \param do_send True if lnet_ni_send() should be called in this function.
798 * lnet_send() is going to lnet_net_unlock immediately after this, so
799 * it sets do_send FALSE and I don't do the unlock/send/lock bit.
801 * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
802 * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
803 * \retval -EHOSTUNREACH If the next hop of the message appears dead.
804 * \retval -ECANCELED If the MD of the message has been unlinked.
807 lnet_post_send_locked(lnet_msg_t *msg, int do_send)
809 struct lnet_peer_ni *lp = msg->msg_txpeer;
810 struct lnet_ni *ni = msg->msg_txni;
811 int cpt = msg->msg_tx_cpt;
812 struct lnet_tx_queue *tq = ni->ni_tx_queues[cpt];
814 /* non-lnet_send() callers have checked before */
815 LASSERT(!do_send || msg->msg_tx_delayed);
816 LASSERT(!msg->msg_receiving);
817 LASSERT(msg->msg_tx_committed);
819 /* NB 'lp' is always the next hop */
820 if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
821 lnet_peer_alive_locked(ni, lp) == 0) {
822 the_lnet.ln_counters[cpt]->drop_count++;
823 the_lnet.ln_counters[cpt]->drop_length += msg->msg_len;
824 lnet_net_unlock(cpt);
826 atomic_inc(&msg->msg_txpeer->lpni_stats.drop_count);
828 atomic_inc(&msg->msg_txni->ni_stats.drop_count);
830 CNETERR("Dropping message for %s: peer not alive\n",
831 libcfs_id2str(msg->msg_target));
833 lnet_finalize(ni, msg, -EHOSTUNREACH);
836 return -EHOSTUNREACH;
839 if (msg->msg_md != NULL &&
840 (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
841 lnet_net_unlock(cpt);
843 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
844 "called on the MD/ME.\n",
845 libcfs_id2str(msg->msg_target));
847 lnet_finalize(ni, msg, -ECANCELED);
853 if (!msg->msg_peertxcredit) {
854 spin_lock(&lp->lpni_lock);
855 LASSERT((lp->lpni_txcredits < 0) ==
856 !list_empty(&lp->lpni_txq));
858 msg->msg_peertxcredit = 1;
859 lp->lpni_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
860 lp->lpni_txcredits--;
862 if (lp->lpni_txcredits < lp->lpni_mintxcredits)
863 lp->lpni_mintxcredits = lp->lpni_txcredits;
865 if (lp->lpni_txcredits < 0) {
866 msg->msg_tx_delayed = 1;
867 list_add_tail(&msg->msg_list, &lp->lpni_txq);
868 spin_unlock(&lp->lpni_lock);
869 return LNET_CREDIT_WAIT;
871 spin_unlock(&lp->lpni_lock);
874 if (!msg->msg_txcredit) {
875 LASSERT((tq->tq_credits < 0) ==
876 !list_empty(&tq->tq_delayed));
878 msg->msg_txcredit = 1;
880 atomic_dec(&ni->ni_tx_credits);
882 if (tq->tq_credits < tq->tq_credits_min)
883 tq->tq_credits_min = tq->tq_credits;
885 if (tq->tq_credits < 0) {
886 msg->msg_tx_delayed = 1;
887 list_add_tail(&msg->msg_list, &tq->tq_delayed);
888 return LNET_CREDIT_WAIT;
893 lnet_net_unlock(cpt);
894 lnet_ni_send(ni, msg);
897 return LNET_CREDIT_OK;
901 static lnet_rtrbufpool_t *
902 lnet_msg2bufpool(lnet_msg_t *msg)
904 lnet_rtrbufpool_t *rbp;
907 LASSERT(msg->msg_rx_committed);
909 cpt = msg->msg_rx_cpt;
910 rbp = &the_lnet.ln_rtrpools[cpt][0];
912 LASSERT(msg->msg_len <= LNET_MTU);
913 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
915 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
922 lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
924 /* lnet_parse is going to lnet_net_unlock immediately after this, so it
925 * sets do_recv FALSE and I don't do the unlock/send/lock bit.
926 * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
927 * received or OK to receive */
928 struct lnet_peer_ni *lp = msg->msg_rxpeer;
929 lnet_rtrbufpool_t *rbp;
932 LASSERT (msg->msg_iov == NULL);
933 LASSERT (msg->msg_kiov == NULL);
934 LASSERT (msg->msg_niov == 0);
935 LASSERT (msg->msg_routing);
936 LASSERT (msg->msg_receiving);
937 LASSERT (!msg->msg_sending);
939 /* non-lnet_parse callers only receive delayed messages */
940 LASSERT(!do_recv || msg->msg_rx_delayed);
942 if (!msg->msg_peerrtrcredit) {
943 spin_lock(&lp->lpni_lock);
944 LASSERT((lp->lpni_rtrcredits < 0) ==
945 !list_empty(&lp->lpni_rtrq));
947 msg->msg_peerrtrcredit = 1;
948 lp->lpni_rtrcredits--;
949 if (lp->lpni_rtrcredits < lp->lpni_minrtrcredits)
950 lp->lpni_minrtrcredits = lp->lpni_rtrcredits;
952 if (lp->lpni_rtrcredits < 0) {
953 /* must have checked eager_recv before here */
954 LASSERT(msg->msg_rx_ready_delay);
955 msg->msg_rx_delayed = 1;
956 list_add_tail(&msg->msg_list, &lp->lpni_rtrq);
957 spin_unlock(&lp->lpni_lock);
958 return LNET_CREDIT_WAIT;
960 spin_unlock(&lp->lpni_lock);
963 rbp = lnet_msg2bufpool(msg);
965 if (!msg->msg_rtrcredit) {
966 msg->msg_rtrcredit = 1;
968 if (rbp->rbp_credits < rbp->rbp_mincredits)
969 rbp->rbp_mincredits = rbp->rbp_credits;
971 if (rbp->rbp_credits < 0) {
972 /* must have checked eager_recv before here */
973 LASSERT(msg->msg_rx_ready_delay);
974 msg->msg_rx_delayed = 1;
975 list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
976 return LNET_CREDIT_WAIT;
980 LASSERT(!list_empty(&rbp->rbp_bufs));
981 rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
982 list_del(&rb->rb_list);
984 msg->msg_niov = rbp->rbp_npages;
985 msg->msg_kiov = &rb->rb_kiov[0];
988 int cpt = msg->msg_rx_cpt;
990 lnet_net_unlock(cpt);
991 lnet_ni_recv(msg->msg_rxni, msg->msg_private, msg, 1,
992 0, msg->msg_len, msg->msg_len);
995 return LNET_CREDIT_OK;
999 lnet_return_tx_credits_locked(lnet_msg_t *msg)
1001 struct lnet_peer_ni *txpeer = msg->msg_txpeer;
1002 struct lnet_ni *txni = msg->msg_txni;
1005 if (msg->msg_txcredit) {
1006 struct lnet_ni *ni = msg->msg_txni;
1007 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1009 /* give back NI txcredits */
1010 msg->msg_txcredit = 0;
1012 LASSERT((tq->tq_credits < 0) ==
1013 !list_empty(&tq->tq_delayed));
1016 atomic_inc(&ni->ni_tx_credits);
1017 if (tq->tq_credits <= 0) {
1018 msg2 = list_entry(tq->tq_delayed.next,
1019 lnet_msg_t, msg_list);
1020 list_del(&msg2->msg_list);
1022 LASSERT(msg2->msg_txni == ni);
1023 LASSERT(msg2->msg_tx_delayed);
1024 LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
1026 (void) lnet_post_send_locked(msg2, 1);
1030 if (msg->msg_peertxcredit) {
1031 /* give back peer txcredits */
1032 msg->msg_peertxcredit = 0;
1034 spin_lock(&txpeer->lpni_lock);
1035 LASSERT((txpeer->lpni_txcredits < 0) ==
1036 !list_empty(&txpeer->lpni_txq));
1038 txpeer->lpni_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
1039 LASSERT(txpeer->lpni_txqnob >= 0);
1041 txpeer->lpni_txcredits++;
1042 if (txpeer->lpni_txcredits <= 0) {
1043 msg2 = list_entry(txpeer->lpni_txq.next,
1044 lnet_msg_t, msg_list);
1045 list_del(&msg2->msg_list);
1046 spin_unlock(&txpeer->lpni_lock);
1048 LASSERT(msg2->msg_txpeer == txpeer);
1049 LASSERT(msg2->msg_tx_delayed);
1051 if (msg2->msg_tx_cpt != msg->msg_tx_cpt) {
1052 lnet_net_unlock(msg->msg_tx_cpt);
1053 lnet_net_lock(msg2->msg_tx_cpt);
1055 (void) lnet_post_send_locked(msg2, 1);
1056 if (msg2->msg_tx_cpt != msg->msg_tx_cpt) {
1057 lnet_net_unlock(msg2->msg_tx_cpt);
1058 lnet_net_lock(msg->msg_tx_cpt);
1061 spin_unlock(&txpeer->lpni_lock);
1066 msg->msg_txni = NULL;
1067 lnet_ni_decref_locked(txni, msg->msg_tx_cpt);
1070 if (txpeer != NULL) {
1073 * Once the patch for the health comes in we need to set
1074 * the health of the peer ni to bad when we fail to send
1076 * int status = msg->msg_ev.status;
1078 * lnet_set_peer_ni_health_locked(txpeer, false)
1080 msg->msg_txpeer = NULL;
1081 lnet_peer_ni_decref_locked(txpeer);
1086 lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp)
1090 if (list_empty(&rbp->rbp_msgs))
1092 msg = list_entry(rbp->rbp_msgs.next,
1093 lnet_msg_t, msg_list);
1094 list_del(&msg->msg_list);
1096 (void)lnet_post_routed_recv_locked(msg, 1);
1100 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1105 lnet_net_unlock(cpt);
1107 list_for_each_entry_safe(msg, tmp, list, msg_list) {
1108 lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
1109 0, 0, 0, msg->msg_hdr.payload_length);
1110 list_del_init(&msg->msg_list);
1111 lnet_finalize(NULL, msg, -ECANCELED);
1118 lnet_return_rx_credits_locked(lnet_msg_t *msg)
1120 struct lnet_peer_ni *rxpeer = msg->msg_rxpeer;
1121 struct lnet_ni *rxni = msg->msg_rxni;
1124 if (msg->msg_rtrcredit) {
1125 /* give back global router credits */
1127 lnet_rtrbufpool_t *rbp;
1129 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1130 * there until it gets one allocated, or aborts the wait
1132 LASSERT(msg->msg_kiov != NULL);
1134 rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
1137 msg->msg_kiov = NULL;
1138 msg->msg_rtrcredit = 0;
1140 LASSERT(rbp == lnet_msg2bufpool(msg));
1142 LASSERT((rbp->rbp_credits > 0) ==
1143 !list_empty(&rbp->rbp_bufs));
1145 /* If routing is now turned off, we just drop this buffer and
1146 * don't bother trying to return credits. */
1147 if (!the_lnet.ln_routing) {
1148 lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1152 /* It is possible that a user has lowered the desired number of
1153 * buffers in this pool. Make sure we never put back
1154 * more buffers than the stated number. */
1155 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
1156 /* Discard this buffer so we don't have too
1158 lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1159 rbp->rbp_nbuffers--;
1161 list_add(&rb->rb_list, &rbp->rbp_bufs);
1163 if (rbp->rbp_credits <= 0)
1164 lnet_schedule_blocked_locked(rbp);
1169 if (msg->msg_peerrtrcredit) {
1170 /* give back peer router credits */
1171 msg->msg_peerrtrcredit = 0;
1173 spin_lock(&rxpeer->lpni_lock);
1174 LASSERT((rxpeer->lpni_rtrcredits < 0) ==
1175 !list_empty(&rxpeer->lpni_rtrq));
1177 rxpeer->lpni_rtrcredits++;
1179 /* drop all messages which are queued to be routed on that
1181 if (!the_lnet.ln_routing) {
1182 struct list_head drop;
1183 INIT_LIST_HEAD(&drop);
1184 list_splice_init(&rxpeer->lpni_rtrq, &drop);
1185 spin_unlock(&rxpeer->lpni_lock);
1186 lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
1187 } else if (rxpeer->lpni_rtrcredits <= 0) {
1188 msg2 = list_entry(rxpeer->lpni_rtrq.next,
1189 lnet_msg_t, msg_list);
1190 list_del(&msg2->msg_list);
1191 spin_unlock(&rxpeer->lpni_lock);
1192 (void) lnet_post_routed_recv_locked(msg2, 1);
1194 spin_unlock(&rxpeer->lpni_lock);
1198 msg->msg_rxni = NULL;
1199 lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
1201 if (rxpeer != NULL) {
1202 msg->msg_rxpeer = NULL;
1203 lnet_peer_ni_decref_locked(rxpeer);
1208 lnet_compare_peers(struct lnet_peer_ni *p1, struct lnet_peer_ni *p2)
1210 if (p1->lpni_txqnob < p2->lpni_txqnob)
1213 if (p1->lpni_txqnob > p2->lpni_txqnob)
1216 if (p1->lpni_txcredits > p2->lpni_txcredits)
1219 if (p1->lpni_txcredits < p2->lpni_txcredits)
1226 lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
1228 struct lnet_peer_ni *p1 = r1->lr_gateway;
1229 struct lnet_peer_ni *p2 = r2->lr_gateway;
1230 int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1231 int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
1234 if (r1->lr_priority < r2->lr_priority)
1237 if (r1->lr_priority > r2->lr_priority)
1240 if (r1_hops < r2_hops)
1243 if (r1_hops > r2_hops)
1246 rc = lnet_compare_peers(p1, p2);
1250 if (r1->lr_seq - r2->lr_seq <= 0)
1256 static struct lnet_peer_ni *
1257 lnet_find_route_locked(struct lnet_net *net, lnet_nid_t target,
1260 lnet_remotenet_t *rnet;
1261 lnet_route_t *route;
1262 lnet_route_t *best_route;
1263 lnet_route_t *last_route;
1264 struct lnet_peer_ni *lpni_best;
1265 struct lnet_peer_ni *lp;
1268 /* If @rtr_nid is not LNET_NID_ANY, return the gateway with
1269 * rtr_nid nid, otherwise find the best gateway I can use */
1271 rnet = lnet_find_rnet_locked(LNET_NIDNET(target));
1276 best_route = last_route = NULL;
1277 list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1278 lp = route->lr_gateway;
1280 if (!lnet_is_route_alive(route))
1283 if (net != NULL && lp->lpni_net != net)
1286 if (lp->lpni_nid == rtr_nid) /* it's pre-determined router */
1289 if (lpni_best == NULL) {
1290 best_route = last_route = route;
1295 /* no protection on below fields, but it's harmless */
1296 if (last_route->lr_seq - route->lr_seq < 0)
1299 rc = lnet_compare_routes(route, best_route);
1307 /* set sequence number on the best router to the latest sequence + 1
1308 * so we can round-robin all routers, it's race and inaccurate but
1309 * harmless and functional */
1310 if (best_route != NULL)
1311 best_route->lr_seq = last_route->lr_seq + 1;
1315 static struct lnet_ni *
1316 lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *cur_ni,
1319 struct lnet_ni *ni = NULL, *best_ni = cur_ni;
1320 unsigned int shortest_distance;
1323 if (best_ni == NULL) {
1324 shortest_distance = UINT_MAX;
1325 best_credits = INT_MIN;
1327 shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt,
1328 best_ni->ni_dev_cpt);
1329 best_credits = atomic_read(&best_ni->ni_tx_credits);
1332 while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
1333 unsigned int distance;
1336 if (!lnet_is_ni_healthy_locked(ni))
1339 ni_credits = atomic_read(&ni->ni_tx_credits);
1342 * calculate the distance from the CPT on which
1343 * the message memory is allocated to the CPT of
1344 * the NI's physical device
1346 distance = cfs_cpt_distance(lnet_cpt_table(),
1351 * All distances smaller than the NUMA range
1352 * are treated equally.
1354 if (distance < lnet_numa_range)
1355 distance = lnet_numa_range;
1358 * Select on shorter distance, then available
1359 * credits, then round-robin.
1361 if (distance > shortest_distance) {
1363 } else if (distance < shortest_distance) {
1364 shortest_distance = distance;
1365 } else if (ni_credits < best_credits) {
1367 } else if (ni_credits == best_credits) {
1368 if (best_ni && (best_ni)->ni_seq <= ni->ni_seq)
1372 best_credits = ni_credits;
1379 * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery,
1380 * because such traffic is required to perform discovery. We therefore
1381 * exclude all GET and PUT on that portal. We also exclude all ACK and
1382 * REPLY traffic, but that is because the portal is not tracked in the
1383 * message structure for these message types. We could restrict this
1384 * further by also checking for LNET_PROTO_PING_MATCHBITS.
1387 lnet_msg_discovery(struct lnet_msg *msg)
1389 if (msg->msg_type == LNET_MSG_PUT) {
1390 if (msg->msg_hdr.msg.put.ptl_index != LNET_RESERVED_PORTAL)
1392 } else if (msg->msg_type == LNET_MSG_GET) {
1393 if (msg->msg_hdr.msg.get.ptl_index != LNET_RESERVED_PORTAL)
1400 lnet_select_pathway(lnet_nid_t src_nid, lnet_nid_t dst_nid,
1401 struct lnet_msg *msg, lnet_nid_t rtr_nid)
1403 struct lnet_ni *best_ni;
1404 struct lnet_peer_ni *best_lpni;
1405 struct lnet_peer_ni *best_gw;
1406 struct lnet_peer_ni *lpni;
1407 struct lnet_peer_ni *final_dst;
1408 struct lnet_peer *peer;
1409 struct lnet_peer_net *peer_net;
1410 struct lnet_net *local_net;
1417 int best_lpni_credits;
1421 * get an initial CPT to use for locking. The idea here is not to
1422 * serialize the calls to select_pathway, so that as many
1423 * operations can run concurrently as possible. To do that we use
1424 * the CPT where this call is being executed. Later on when we
1425 * determine the CPT to use in lnet_message_commit, we switch the
1426 * lock and check if there was any configuration change. If none,
1427 * then we proceed, if there is, then we restart the operation.
1429 cpt = lnet_net_lock_current();
1431 md_cpt = lnet_cpt_of_md(msg->msg_md);
1432 if (md_cpt == CFS_CPT_ANY)
1443 local_found = false;
1446 * lnet_nid2peerni_locked() is the path that will find an
1447 * existing peer_ni, or create one and mark it as having been
1448 * created due to network traffic.
1450 lpni = lnet_nid2peerni_locked(dst_nid, LNET_NID_ANY, cpt);
1452 lnet_net_unlock(cpt);
1453 return PTR_ERR(lpni);
1456 * Now that we have a peer_ni, check if we want to discover
1457 * the peer. Traffic to the LNET_RESERVED_PORTAL should not
1458 * trigger discovery.
1460 peer = lpni->lpni_peer_net->lpn_peer;
1461 if (lnet_msg_discovery(msg) && !lnet_peer_is_uptodate(peer)) {
1462 rc = lnet_discover_peer_locked(lpni, cpt);
1464 lnet_peer_ni_decref_locked(lpni);
1465 lnet_net_unlock(cpt);
1468 /* The peer may have changed. */
1469 peer = lpni->lpni_peer_net->lpn_peer;
1471 lnet_peer_ni_decref_locked(lpni);
1473 /* If peer is not healthy then can not send anything to it */
1474 if (!lnet_is_peer_healthy_locked(peer)) {
1475 lnet_net_unlock(cpt);
1476 return -EHOSTUNREACH;
1480 * STEP 1: first jab at determining best_ni
1481 * if src_nid is explicitly specified, then best_ni is already
1482 * pre-determiend for us. Otherwise we need to select the best
1483 * one to use later on
1485 if (src_nid != LNET_NID_ANY) {
1486 best_ni = lnet_nid2ni_locked(src_nid, cpt);
1488 lnet_net_unlock(cpt);
1489 LCONSOLE_WARN("Can't send to %s: src %s is not a "
1490 "local nid\n", libcfs_nid2str(dst_nid),
1491 libcfs_nid2str(src_nid));
1496 if (msg->msg_type == LNET_MSG_REPLY ||
1497 msg->msg_type == LNET_MSG_ACK ||
1498 !lnet_peer_is_multi_rail(peer) ||
1501 * for replies we want to respond on the same peer_ni we
1502 * received the message on if possible. If not, then pick
1503 * a peer_ni to send to
1505 * if the peer is non-multi-rail then you want to send to
1506 * the dst_nid provided as well.
1508 * If the best_ni has already been determined, IE the
1509 * src_nid has been specified, then use the
1510 * destination_nid provided as well, since we're
1511 * continuing a series of related messages for the same
1514 * It is expected to find the lpni using dst_nid, since we
1515 * created it earlier.
1517 best_lpni = lnet_find_peer_ni_locked(dst_nid);
1519 lnet_peer_ni_decref_locked(best_lpni);
1521 if (best_lpni && !lnet_get_net_locked(LNET_NIDNET(dst_nid))) {
1523 * this lpni is not on a local network so we need
1524 * to route this reply.
1526 best_gw = lnet_find_route_locked(NULL,
1527 best_lpni->lpni_nid,
1531 * RULE: Each node considers only the next-hop
1533 * We're going to route the message, so change the peer to
1536 LASSERT(best_gw->lpni_peer_net);
1537 LASSERT(best_gw->lpni_peer_net->lpn_peer);
1538 peer = best_gw->lpni_peer_net->lpn_peer;
1541 * if the router is not multi-rail then use the best_gw
1542 * found to send the message to
1544 if (!lnet_peer_is_multi_rail(peer))
1545 best_lpni = best_gw;
1553 } else if (!best_lpni) {
1554 lnet_net_unlock(cpt);
1555 CERROR("unable to send msg_type %d to "
1556 "originating %s. Destination NID not in DB\n",
1557 msg->msg_type, libcfs_nid2str(dst_nid));
1563 * We must use a consistent source address when sending to a
1564 * non-MR peer. However, a non-MR peer can have multiple NIDs
1565 * on multiple networks, and we may even need to talk to this
1566 * peer on multiple networks -- certain types of
1567 * load-balancing configuration do this.
1569 * So we need to pick the NI the peer prefers for this
1570 * particular network.
1572 if (!lnet_peer_is_multi_rail(peer)) {
1574 lnet_net_unlock(cpt);
1575 CERROR("no route to %s\n",
1576 libcfs_nid2str(dst_nid));
1577 return -EHOSTUNREACH;
1580 /* best ni is already set if src_nid was provided */
1582 /* Get the target peer_ni */
1583 peer_net = lnet_peer_get_net_locked(peer,
1584 LNET_NIDNET(dst_nid));
1585 list_for_each_entry(lpni, &peer_net->lpn_peer_nis,
1587 if (lpni->lpni_pref_nnids == 0)
1589 LASSERT(lpni->lpni_pref_nnids == 1);
1590 best_ni = lnet_nid2ni_locked(
1591 lpni->lpni_pref.nid, cpt);
1595 /* if best_ni is still not set just pick one */
1597 best_ni = lnet_net2ni_locked(
1598 best_lpni->lpni_net->net_id, cpt);
1599 /* If there is no best_ni we don't have a route */
1601 lnet_net_unlock(cpt);
1602 CERROR("no path to %s from net %s\n",
1603 libcfs_nid2str(best_lpni->lpni_nid),
1604 libcfs_net2str(best_lpni->lpni_net->net_id));
1605 return -EHOSTUNREACH;
1607 lpni = list_entry(peer_net->lpn_peer_nis.next,
1608 struct lnet_peer_ni,
1611 /* Set preferred NI if necessary. */
1612 if (lpni->lpni_pref_nnids == 0)
1613 lnet_peer_ni_set_non_mr_pref_nid(lpni, best_ni->ni_nid);
1617 * if we already found a best_ni because src_nid is specified and
1618 * best_lpni because we are replying to a message then just send
1621 if (best_ni && best_lpni)
1625 * If we already found a best_ni because src_nid is specified then
1626 * pick the peer then send the message
1632 * pick the best_ni by going through all the possible networks of
1633 * that peer and see which local NI is best suited to talk to that
1636 * Locally connected networks will always be preferred over
1637 * a routed network. If there are only routed paths to the peer,
1638 * then the best route is chosen. If all routes are equal then
1639 * they are used in round robin.
1641 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1642 if (!lnet_is_peer_net_healthy_locked(peer_net))
1645 local_net = lnet_get_net_locked(peer_net->lpn_net_id);
1646 if (!local_net && !routing && !local_found) {
1647 struct lnet_peer_ni *net_gw;
1649 lpni = list_entry(peer_net->lpn_peer_nis.next,
1650 struct lnet_peer_ni,
1653 net_gw = lnet_find_route_locked(NULL,
1661 * lnet_find_route_locked() call
1662 * will return the best_Gw on the
1663 * lpni->lpni_nid network.
1664 * However, best_gw and net_gw can
1665 * be on different networks.
1666 * Therefore need to compare them
1667 * to pick the better of either.
1669 if (lnet_compare_peers(best_gw, net_gw) > 0)
1671 if (best_gw->lpni_gw_seq <= net_gw->lpni_gw_seq)
1686 * a gw on this network is found, but there could be
1687 * other better gateways on other networks. So don't pick
1688 * the best_ni until we determine the best_gw.
1693 /* if no local_net found continue */
1698 * Iterate through the NIs in this local Net and select
1699 * the NI to send from. The selection is determined by
1700 * these 3 criterion in the following priority:
1702 * 2. NI available credits
1705 best_ni = lnet_get_best_ni(local_net, best_ni, md_cpt);
1708 if (!best_ni && !best_gw) {
1709 lnet_net_unlock(cpt);
1710 LCONSOLE_WARN("No local ni found to send from to %s\n",
1711 libcfs_nid2str(dst_nid));
1716 best_ni = lnet_get_best_ni(best_gw->lpni_net, best_ni, md_cpt);
1717 LASSERT(best_gw && best_ni);
1720 * We're going to route the message, so change the peer to
1723 LASSERT(best_gw->lpni_peer_net);
1724 LASSERT(best_gw->lpni_peer_net->lpn_peer);
1725 best_gw->lpni_gw_seq++;
1726 peer = best_gw->lpni_peer_net->lpn_peer;
1730 * Now that we selected the NI to use increment its sequence
1731 * number so the Round Robin algorithm will detect that it has
1732 * been used and pick the next NI.
1738 * At this point the best_ni is on a local network on which
1739 * the peer has a peer_ni as well
1741 peer_net = lnet_peer_get_net_locked(peer,
1742 best_ni->ni_net->net_id);
1744 * peer_net is not available or the src_nid is explicitly defined
1745 * and the peer_net for that src_nid is unhealthy. find a route to
1746 * the destination nid.
1749 (src_nid != LNET_NID_ANY &&
1750 !lnet_is_peer_net_healthy_locked(peer_net))) {
1751 best_gw = lnet_find_route_locked(best_ni->ni_net,
1755 * if no route is found for that network then
1756 * move onto the next peer_ni in the peer
1759 lnet_net_unlock(cpt);
1760 LCONSOLE_WARN("No route to peer from %s\n",
1761 libcfs_nid2str(best_ni->ni_nid));
1762 return -EHOSTUNREACH;
1765 CDEBUG(D_NET, "Best route to %s via %s for %s %d\n",
1766 libcfs_nid2str(dst_nid),
1767 libcfs_nid2str(best_gw->lpni_nid),
1768 lnet_msgtyp2str(msg->msg_type), msg->msg_len);
1772 * RULE: Each node considers only the next-hop
1774 * We're going to route the message, so change the peer to
1777 LASSERT(best_gw->lpni_peer_net);
1778 LASSERT(best_gw->lpni_peer_net->lpn_peer);
1779 peer = best_gw->lpni_peer_net->lpn_peer;
1780 } else if (!lnet_is_peer_net_healthy_locked(peer_net)) {
1782 * this peer_net is unhealthy but we still have an opportunity
1783 * to find another peer_net that we can use
1785 __u32 net_id = peer_net->lpn_net_id;
1786 LCONSOLE_WARN("peer net %s unhealthy\n",
1787 libcfs_net2str(net_id));
1792 * Look at the peer NIs for the destination peer that connect
1793 * to the chosen net. If a peer_ni is preferred when using the
1794 * best_ni to communicate, we use that one. If there is no
1795 * preferred peer_ni, or there are multiple preferred peer_ni,
1796 * the available transmit credits are used. If the transmit
1797 * credits are equal, we round-robin over the peer_ni.
1800 best_lpni_credits = INT_MIN;
1803 while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
1805 * if this peer ni is not healthy just skip it, no point in
1806 * examining it further
1808 if (!lnet_is_peer_ni_healthy_locked(lpni))
1810 ni_is_pref = lnet_peer_is_pref_nid_locked(lpni,
1813 /* if this is a preferred peer use it */
1814 if (!preferred && ni_is_pref) {
1816 } else if (preferred && !ni_is_pref) {
1818 * this is not the preferred peer so let's ignore
1822 } else if (lpni->lpni_txcredits < best_lpni_credits) {
1824 * We already have a peer that has more credits
1825 * available than this one. No need to consider
1826 * this peer further.
1829 } else if (lpni->lpni_txcredits == best_lpni_credits) {
1831 * The best peer found so far and the current peer
1832 * have the same number of available credits let's
1833 * make sure to select between them using Round
1837 if (best_lpni->lpni_seq <= lpni->lpni_seq)
1843 best_lpni_credits = lpni->lpni_txcredits;
1846 /* if we still can't find a peer ni then we can't reach it */
1848 __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
1849 LNET_NIDNET(dst_nid);
1850 lnet_net_unlock(cpt);
1851 LCONSOLE_WARN("no peer_ni found on peer net %s\n",
1852 libcfs_net2str(net_id));
1853 return -EHOSTUNREACH;
1858 /* Shortcut for loopback. */
1859 if (best_ni == the_lnet.ln_loni) {
1860 /* No send credit hassles with LOLND */
1861 lnet_ni_addref_locked(best_ni, cpt);
1862 msg->msg_hdr.dest_nid = cpu_to_le64(best_ni->ni_nid);
1863 if (!msg->msg_routing)
1864 msg->msg_hdr.src_nid = cpu_to_le64(best_ni->ni_nid);
1865 msg->msg_target.nid = best_ni->ni_nid;
1866 lnet_msg_commit(msg, cpt);
1867 msg->msg_txni = best_ni;
1868 lnet_net_unlock(cpt);
1870 return LNET_CREDIT_OK;
1873 routing = routing || routing2;
1876 * Increment sequence number of the peer selected so that we
1877 * pick the next one in Round Robin.
1879 best_lpni->lpni_seq++;
1882 * grab a reference on the peer_ni so it sticks around even if
1883 * we need to drop and relock the lnet_net_lock below.
1885 lnet_peer_ni_addref_locked(best_lpni);
1888 * Use lnet_cpt_of_nid() to determine the CPT used to commit the
1889 * message. This ensures that we get a CPT that is correct for
1890 * the NI when the NI has been restricted to a subset of all CPTs.
1891 * If the selected CPT differs from the one currently locked, we
1892 * must unlock and relock the lnet_net_lock(), and then check whether
1893 * the configuration has changed. We don't have a hold on the best_ni
1894 * yet, and it may have vanished.
1896 cpt2 = lnet_cpt_of_nid_locked(best_lpni->lpni_nid, best_ni);
1898 __u32 seq = lnet_get_dlc_seq_locked();
1899 lnet_net_unlock(cpt);
1902 if (seq != lnet_get_dlc_seq_locked()) {
1903 lnet_peer_ni_decref_locked(best_lpni);
1909 * store the best_lpni in the message right away to avoid having
1910 * to do the same operation under different conditions
1912 msg->msg_txpeer = best_lpni;
1913 msg->msg_txni = best_ni;
1916 * grab a reference for the best_ni since now it's in use in this
1917 * send. the reference will need to be dropped when the message is
1918 * finished in lnet_finalize()
1920 lnet_ni_addref_locked(msg->msg_txni, cpt);
1923 * Always set the target.nid to the best peer picked. Either the
1924 * nid will be one of the preconfigured NIDs, or the same NID as
1925 * what was originally set in the target or it will be the NID of
1926 * a router if this message should be routed
1928 msg->msg_target.nid = msg->msg_txpeer->lpni_nid;
1931 * lnet_msg_commit assigns the correct cpt to the message, which
1932 * is used to decrement the correct refcount on the ni when it's
1933 * time to return the credits
1935 lnet_msg_commit(msg, cpt);
1938 * If we are routing the message then we don't need to overwrite
1939 * the src_nid since it would've been set at the origin. Otherwise
1940 * we are the originator so we need to set it.
1942 if (!msg->msg_routing)
1943 msg->msg_hdr.src_nid = cpu_to_le64(msg->msg_txni->ni_nid);
1946 msg->msg_target_is_router = 1;
1947 msg->msg_target.pid = LNET_PID_LUSTRE;
1949 * since we're routing we want to ensure that the
1950 * msg_hdr.dest_nid is set to the final destination. When
1951 * the router receives this message it knows how to route
1954 msg->msg_hdr.dest_nid =
1955 cpu_to_le64(final_dst ? final_dst->lpni_nid : dst_nid);
1958 * if we're not routing set the dest_nid to the best peer
1959 * ni that we picked earlier in the algorithm.
1961 msg->msg_hdr.dest_nid = cpu_to_le64(msg->msg_txpeer->lpni_nid);
1964 rc = lnet_post_send_locked(msg, 0);
1966 lnet_net_unlock(cpt);
1972 lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
1974 lnet_nid_t dst_nid = msg->msg_target.nid;
1978 * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
1979 * but we might want to use pre-determined router for ACK/REPLY
1982 /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
1983 LASSERT (msg->msg_txpeer == NULL);
1984 LASSERT (!msg->msg_sending);
1985 LASSERT (!msg->msg_target_is_router);
1986 LASSERT (!msg->msg_receiving);
1988 msg->msg_sending = 1;
1990 LASSERT(!msg->msg_tx_committed);
1992 rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
1996 if (rc == LNET_CREDIT_OK)
1997 lnet_ni_send(msg->msg_txni, msg);
1999 /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT */
2004 lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob)
2007 the_lnet.ln_counters[cpt]->drop_count++;
2008 the_lnet.ln_counters[cpt]->drop_length += nob;
2009 lnet_net_unlock(cpt);
2011 lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
2015 lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
2017 lnet_hdr_t *hdr = &msg->msg_hdr;
2019 if (msg->msg_wanted != 0)
2020 lnet_setpayloadbuffer(msg);
2022 lnet_build_msg_event(msg, LNET_EVENT_PUT);
2024 /* Must I ACK? If so I'll grab the ack_wmd out of the header and put
2025 * it back into the ACK during lnet_finalize() */
2026 msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
2027 (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
2029 lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
2030 msg->msg_offset, msg->msg_wanted, hdr->payload_length);
2034 lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
2036 lnet_hdr_t *hdr = &msg->msg_hdr;
2037 struct lnet_match_info info;
2041 /* Convert put fields to host byte order */
2042 hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
2043 hdr->msg.put.ptl_index = le32_to_cpu(hdr->msg.put.ptl_index);
2044 hdr->msg.put.offset = le32_to_cpu(hdr->msg.put.offset);
2046 /* Primary peer NID. */
2047 info.mi_id.nid = msg->msg_initiator;
2048 info.mi_id.pid = hdr->src_pid;
2049 info.mi_opc = LNET_MD_OP_PUT;
2050 info.mi_portal = hdr->msg.put.ptl_index;
2051 info.mi_rlength = hdr->payload_length;
2052 info.mi_roffset = hdr->msg.put.offset;
2053 info.mi_mbits = hdr->msg.put.match_bits;
2054 info.mi_cpt = lnet_cpt_of_nid(msg->msg_rxpeer->lpni_nid, ni);
2056 msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
2057 ready_delay = msg->msg_rx_ready_delay;
2060 rc = lnet_ptl_match_md(&info, msg);
2065 case LNET_MATCHMD_OK:
2066 lnet_recv_put(ni, msg);
2069 case LNET_MATCHMD_NONE:
2071 /* no eager_recv or has already called it, should
2072 * have been attached on delayed list */
2075 rc = lnet_ni_eager_recv(ni, msg);
2082 case LNET_MATCHMD_DROP:
2083 CNETERR("Dropping PUT from %s portal %d match %llu"
2084 " offset %d length %d: %d\n",
2085 libcfs_id2str(info.mi_id), info.mi_portal,
2086 info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
2088 return -ENOENT; /* -ve: OK but no match */
2093 lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
2095 struct lnet_match_info info;
2096 lnet_hdr_t *hdr = &msg->msg_hdr;
2097 lnet_process_id_t source_id;
2098 struct lnet_handle_wire reply_wmd;
2101 /* Convert get fields to host byte order */
2102 hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits);
2103 hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index);
2104 hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
2105 hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset);
2107 source_id.nid = hdr->src_nid;
2108 source_id.pid = hdr->src_pid;
2109 /* Primary peer NID */
2110 info.mi_id.nid = msg->msg_initiator;
2111 info.mi_id.pid = hdr->src_pid;
2112 info.mi_opc = LNET_MD_OP_GET;
2113 info.mi_portal = hdr->msg.get.ptl_index;
2114 info.mi_rlength = hdr->msg.get.sink_length;
2115 info.mi_roffset = hdr->msg.get.src_offset;
2116 info.mi_mbits = hdr->msg.get.match_bits;
2117 info.mi_cpt = lnet_cpt_of_nid(msg->msg_rxpeer->lpni_nid, ni);
2119 rc = lnet_ptl_match_md(&info, msg);
2120 if (rc == LNET_MATCHMD_DROP) {
2121 CNETERR("Dropping GET from %s portal %d match %llu"
2122 " offset %d length %d\n",
2123 libcfs_id2str(info.mi_id), info.mi_portal,
2124 info.mi_mbits, info.mi_roffset, info.mi_rlength);
2125 return -ENOENT; /* -ve: OK but no match */
2128 LASSERT(rc == LNET_MATCHMD_OK);
2130 lnet_build_msg_event(msg, LNET_EVENT_GET);
2132 reply_wmd = hdr->msg.get.return_wmd;
2134 lnet_prep_send(msg, LNET_MSG_REPLY, source_id,
2135 msg->msg_offset, msg->msg_wanted);
2137 msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
2140 /* The LND completes the REPLY from her recv procedure */
2141 lnet_ni_recv(ni, msg->msg_private, msg, 0,
2142 msg->msg_offset, msg->msg_len, msg->msg_len);
2146 lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
2147 msg->msg_receiving = 0;
2149 rc = lnet_send(ni->ni_nid, msg, LNET_NID_ANY);
2151 /* didn't get as far as lnet_ni_send() */
2152 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
2153 libcfs_nid2str(ni->ni_nid),
2154 libcfs_id2str(info.mi_id), rc);
2156 lnet_finalize(ni, msg, rc);
2163 lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
2165 void *private = msg->msg_private;
2166 lnet_hdr_t *hdr = &msg->msg_hdr;
2167 lnet_process_id_t src = {0};
2173 cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
2176 src.nid = hdr->src_nid;
2177 src.pid = hdr->src_pid;
2179 /* NB handles only looked up by creator (no flips) */
2180 md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
2181 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2182 CNETERR("%s: Dropping REPLY from %s for %s "
2184 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
2185 (md == NULL) ? "invalid" : "inactive",
2186 hdr->msg.reply.dst_wmd.wh_interface_cookie,
2187 hdr->msg.reply.dst_wmd.wh_object_cookie);
2188 if (md != NULL && md->md_me != NULL)
2189 CERROR("REPLY MD also attached to portal %d\n",
2190 md->md_me->me_portal);
2192 lnet_res_unlock(cpt);
2193 return -ENOENT; /* -ve: OK but no match */
2196 LASSERT(md->md_offset == 0);
2198 rlength = hdr->payload_length;
2199 mlength = MIN(rlength, (int)md->md_length);
2201 if (mlength < rlength &&
2202 (md->md_options & LNET_MD_TRUNCATE) == 0) {
2203 CNETERR("%s: Dropping REPLY from %s length %d "
2204 "for MD %#llx would overflow (%d)\n",
2205 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
2206 rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
2208 lnet_res_unlock(cpt);
2209 return -ENOENT; /* -ve: OK but no match */
2212 CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
2213 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
2214 mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
2216 lnet_msg_attach_md(msg, md, 0, mlength);
2219 lnet_setpayloadbuffer(msg);
2221 lnet_res_unlock(cpt);
2223 lnet_build_msg_event(msg, LNET_EVENT_REPLY);
2225 lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
2230 lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
2232 lnet_hdr_t *hdr = &msg->msg_hdr;
2233 lnet_process_id_t src = {0};
2237 src.nid = hdr->src_nid;
2238 src.pid = hdr->src_pid;
2240 /* Convert ack fields to host byte order */
2241 hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
2242 hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
2244 cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
2247 /* NB handles only looked up by creator (no flips) */
2248 md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
2249 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2250 /* Don't moan; this is expected */
2252 "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
2253 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
2254 (md == NULL) ? "invalid" : "inactive",
2255 hdr->msg.ack.dst_wmd.wh_interface_cookie,
2256 hdr->msg.ack.dst_wmd.wh_object_cookie);
2257 if (md != NULL && md->md_me != NULL)
2258 CERROR("Source MD also attached to portal %d\n",
2259 md->md_me->me_portal);
2261 lnet_res_unlock(cpt);
2262 return -ENOENT; /* -ve! */
2265 CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
2266 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
2267 hdr->msg.ack.dst_wmd.wh_object_cookie);
2269 lnet_msg_attach_md(msg, md, 0, 0);
2271 lnet_res_unlock(cpt);
2273 lnet_build_msg_event(msg, LNET_EVENT_ACK);
2275 lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
2280 * \retval LNET_CREDIT_OK If \a msg is forwarded
2281 * \retval LNET_CREDIT_WAIT If \a msg is blocked because w/o buffer
2282 * \retval -ve error code
2285 lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
2289 if (!the_lnet.ln_routing)
2292 if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
2293 lnet_msg2bufpool(msg)->rbp_credits <= 0) {
2294 if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
2295 msg->msg_rx_ready_delay = 1;
2297 lnet_net_unlock(msg->msg_rx_cpt);
2298 rc = lnet_ni_eager_recv(ni, msg);
2299 lnet_net_lock(msg->msg_rx_cpt);
2304 rc = lnet_post_routed_recv_locked(msg, 0);
2309 lnet_parse_local(lnet_ni_t *ni, lnet_msg_t *msg)
2313 switch (msg->msg_type) {
2315 rc = lnet_parse_ack(ni, msg);
2318 rc = lnet_parse_put(ni, msg);
2321 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
2323 case LNET_MSG_REPLY:
2324 rc = lnet_parse_reply(ni, msg);
2326 default: /* prevent an unused label if !kernel */
2331 LASSERT(rc == 0 || rc == -ENOENT);
2336 lnet_msgtyp2str (int type)
2345 case LNET_MSG_REPLY:
2347 case LNET_MSG_HELLO:
2350 return ("<UNKNOWN>");
2355 lnet_print_hdr(lnet_hdr_t * hdr)
2357 lnet_process_id_t src = {0};
2358 lnet_process_id_t dst = {0};
2359 char *type_str = lnet_msgtyp2str(hdr->type);
2361 src.nid = hdr->src_nid;
2362 src.pid = hdr->src_pid;
2364 dst.nid = hdr->dest_nid;
2365 dst.pid = hdr->dest_pid;
2367 CWARN("P3 Header at %p of type %s\n", hdr, type_str);
2368 CWARN(" From %s\n", libcfs_id2str(src));
2369 CWARN(" To %s\n", libcfs_id2str(dst));
2371 switch (hdr->type) {
2376 CWARN(" Ptl index %d, ack md %#llx.%#llx, "
2377 "match bits %llu\n",
2378 hdr->msg.put.ptl_index,
2379 hdr->msg.put.ack_wmd.wh_interface_cookie,
2380 hdr->msg.put.ack_wmd.wh_object_cookie,
2381 hdr->msg.put.match_bits);
2382 CWARN(" Length %d, offset %d, hdr data %#llx\n",
2383 hdr->payload_length, hdr->msg.put.offset,
2384 hdr->msg.put.hdr_data);
2388 CWARN(" Ptl index %d, return md %#llx.%#llx, "
2389 "match bits %llu\n", hdr->msg.get.ptl_index,
2390 hdr->msg.get.return_wmd.wh_interface_cookie,
2391 hdr->msg.get.return_wmd.wh_object_cookie,
2392 hdr->msg.get.match_bits);
2393 CWARN(" Length %d, src offset %d\n",
2394 hdr->msg.get.sink_length,
2395 hdr->msg.get.src_offset);
2399 CWARN(" dst md %#llx.%#llx, "
2400 "manipulated length %d\n",
2401 hdr->msg.ack.dst_wmd.wh_interface_cookie,
2402 hdr->msg.ack.dst_wmd.wh_object_cookie,
2403 hdr->msg.ack.mlength);
2406 case LNET_MSG_REPLY:
2407 CWARN(" dst md %#llx.%#llx, "
2409 hdr->msg.reply.dst_wmd.wh_interface_cookie,
2410 hdr->msg.reply.dst_wmd.wh_object_cookie,
2411 hdr->payload_length);
2417 lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
2418 void *private, int rdma_req)
2423 struct lnet_msg *msg;
2424 lnet_pid_t dest_pid;
2425 lnet_nid_t dest_nid;
2427 struct lnet_peer_ni *lpni;
2428 __u32 payload_length;
2431 LASSERT (!in_interrupt ());
2433 type = le32_to_cpu(hdr->type);
2434 src_nid = le64_to_cpu(hdr->src_nid);
2435 dest_nid = le64_to_cpu(hdr->dest_nid);
2436 dest_pid = le32_to_cpu(hdr->dest_pid);
2437 payload_length = le32_to_cpu(hdr->payload_length);
2439 for_me = (ni->ni_nid == dest_nid);
2440 cpt = lnet_cpt_of_nid(from_nid, ni);
2445 if (payload_length > 0) {
2446 CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
2447 libcfs_nid2str(from_nid),
2448 libcfs_nid2str(src_nid),
2449 lnet_msgtyp2str(type), payload_length);
2455 case LNET_MSG_REPLY:
2456 if (payload_length >
2457 (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
2458 CERROR("%s, src %s: bad %s payload %d "
2459 "(%d max expected)\n",
2460 libcfs_nid2str(from_nid),
2461 libcfs_nid2str(src_nid),
2462 lnet_msgtyp2str(type),
2464 for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
2470 CERROR("%s, src %s: Bad message type 0x%x\n",
2471 libcfs_nid2str(from_nid),
2472 libcfs_nid2str(src_nid), type);
2476 if (the_lnet.ln_routing &&
2477 ni->ni_last_alive != ktime_get_real_seconds()) {
2478 /* NB: so far here is the only place to set NI status to "up */
2480 ni->ni_last_alive = ktime_get_real_seconds();
2481 if (ni->ni_status != NULL &&
2482 ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
2483 ni->ni_status->ns_status = LNET_NI_STATUS_UP;
2487 /* Regard a bad destination NID as a protocol error. Senders should
2488 * know what they're doing; if they don't they're misconfigured, buggy
2489 * or malicious so we chop them off at the knees :) */
2492 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
2493 /* should have gone direct */
2494 CERROR("%s, src %s: Bad dest nid %s "
2495 "(should have been sent direct)\n",
2496 libcfs_nid2str(from_nid),
2497 libcfs_nid2str(src_nid),
2498 libcfs_nid2str(dest_nid));
2502 if (lnet_islocalnid(dest_nid)) {
2503 /* dest is another local NI; sender should have used
2504 * this node's NID on its own network */
2505 CERROR("%s, src %s: Bad dest nid %s "
2506 "(it's my nid but on a different network)\n",
2507 libcfs_nid2str(from_nid),
2508 libcfs_nid2str(src_nid),
2509 libcfs_nid2str(dest_nid));
2513 if (rdma_req && type == LNET_MSG_GET) {
2514 CERROR("%s, src %s: Bad optimized GET for %s "
2515 "(final destination must be me)\n",
2516 libcfs_nid2str(from_nid),
2517 libcfs_nid2str(src_nid),
2518 libcfs_nid2str(dest_nid));
2522 if (!the_lnet.ln_routing) {
2523 CERROR("%s, src %s: Dropping message for %s "
2524 "(routing not enabled)\n",
2525 libcfs_nid2str(from_nid),
2526 libcfs_nid2str(src_nid),
2527 libcfs_nid2str(dest_nid));
2532 /* Message looks OK; we're not going to return an error, so we MUST
2533 * call back lnd_recv() come what may... */
2535 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
2536 fail_peer(src_nid, 0)) { /* shall we now? */
2537 CERROR("%s, src %s: Dropping %s to simulate failure\n",
2538 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2539 lnet_msgtyp2str(type));
2543 if (!list_empty(&the_lnet.ln_drop_rules) &&
2544 lnet_drop_rule_match(hdr)) {
2545 CDEBUG(D_NET, "%s, src %s, dst %s: Dropping %s to simulate"
2546 "silent message loss\n",
2547 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2548 libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
2553 msg = lnet_msg_alloc();
2555 CERROR("%s, src %s: Dropping %s (out of memory)\n",
2556 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2557 lnet_msgtyp2str(type));
2561 /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
2562 * pointers NULL etc */
2564 msg->msg_type = type;
2565 msg->msg_private = private;
2566 msg->msg_receiving = 1;
2567 msg->msg_rdma_get = rdma_req;
2568 msg->msg_len = msg->msg_wanted = payload_length;
2569 msg->msg_offset = 0;
2570 msg->msg_hdr = *hdr;
2571 /* for building message event */
2572 msg->msg_from = from_nid;
2574 msg->msg_target.pid = dest_pid;
2575 msg->msg_target.nid = dest_nid;
2576 msg->msg_routing = 1;
2579 /* convert common msg->hdr fields to host byteorder */
2580 msg->msg_hdr.type = type;
2581 msg->msg_hdr.src_nid = src_nid;
2582 msg->msg_hdr.src_pid = le32_to_cpu(msg->msg_hdr.src_pid);
2583 msg->msg_hdr.dest_nid = dest_nid;
2584 msg->msg_hdr.dest_pid = dest_pid;
2585 msg->msg_hdr.payload_length = payload_length;
2587 /* Multi-Rail: Primary NID of source. */
2588 msg->msg_initiator = lnet_peer_primary_nid(src_nid);
2591 lpni = lnet_nid2peerni_locked(from_nid, ni->ni_nid, cpt);
2593 lnet_net_unlock(cpt);
2594 CERROR("%s, src %s: Dropping %s "
2595 "(error %ld looking up sender)\n",
2596 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2597 lnet_msgtyp2str(type), PTR_ERR(lpni));
2599 if (rc == -ESHUTDOWN)
2600 /* We are shutting down. Don't do anything more */
2604 msg->msg_rxpeer = lpni;
2606 lnet_ni_addref_locked(ni, cpt);
2608 if (lnet_isrouter(msg->msg_rxpeer)) {
2609 lnet_peer_set_alive(msg->msg_rxpeer);
2610 if (avoid_asym_router_failure &&
2611 LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
2612 /* received a remote message from router, update
2613 * remote NI status on this router.
2614 * NB: multi-hop routed message will be ignored.
2616 lnet_router_ni_update_locked(msg->msg_rxpeer,
2617 LNET_NIDNET(src_nid));
2621 lnet_msg_commit(msg, cpt);
2623 /* message delay simulation */
2624 if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
2625 lnet_delay_rule_match_locked(hdr, msg))) {
2626 lnet_net_unlock(cpt);
2631 rc = lnet_parse_forward_locked(ni, msg);
2632 lnet_net_unlock(cpt);
2637 if (rc == LNET_CREDIT_OK) {
2638 lnet_ni_recv(ni, msg->msg_private, msg, 0,
2639 0, payload_length, payload_length);
2644 lnet_net_unlock(cpt);
2646 rc = lnet_parse_local(ni, msg);
2652 LASSERT(msg->msg_md == NULL);
2653 lnet_finalize(ni, msg, rc);
2656 lnet_drop_message(ni, cpt, private, payload_length);
2659 EXPORT_SYMBOL(lnet_parse);
2662 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
2664 while (!list_empty(head)) {
2665 lnet_process_id_t id = {0};
2668 msg = list_entry(head->next, lnet_msg_t, msg_list);
2669 list_del(&msg->msg_list);
2671 id.nid = msg->msg_hdr.src_nid;
2672 id.pid = msg->msg_hdr.src_pid;
2674 LASSERT(msg->msg_md == NULL);
2675 LASSERT(msg->msg_rx_delayed);
2676 LASSERT(msg->msg_rxpeer != NULL);
2677 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
2679 CWARN("Dropping delayed PUT from %s portal %d match %llu"
2680 " offset %d length %d: %s\n",
2682 msg->msg_hdr.msg.put.ptl_index,
2683 msg->msg_hdr.msg.put.match_bits,
2684 msg->msg_hdr.msg.put.offset,
2685 msg->msg_hdr.payload_length, reason);
2687 /* NB I can't drop msg's ref on msg_rxpeer until after I've
2688 * called lnet_drop_message(), so I just hang onto msg as well
2689 * until that's done */
2691 lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
2692 msg->msg_private, msg->msg_len);
2694 * NB: message will not generate event because w/o attached MD,
2695 * but we still should give error code so lnet_msg_decommit()
2696 * can skip counters operations and other checks.
2698 lnet_finalize(msg->msg_rxni, msg, -ENOENT);
2703 lnet_recv_delayed_msg_list(struct list_head *head)
2705 while (!list_empty(head)) {
2707 lnet_process_id_t id;
2709 msg = list_entry(head->next, lnet_msg_t, msg_list);
2710 list_del(&msg->msg_list);
2712 /* md won't disappear under me, since each msg
2713 * holds a ref on it */
2715 id.nid = msg->msg_hdr.src_nid;
2716 id.pid = msg->msg_hdr.src_pid;
2718 LASSERT(msg->msg_rx_delayed);
2719 LASSERT(msg->msg_md != NULL);
2720 LASSERT(msg->msg_rxpeer != NULL);
2721 LASSERT(msg->msg_rxni != NULL);
2722 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
2724 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
2725 "match %llu offset %d length %d.\n",
2726 libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
2727 msg->msg_hdr.msg.put.match_bits,
2728 msg->msg_hdr.msg.put.offset,
2729 msg->msg_hdr.payload_length);
2731 lnet_recv_put(msg->msg_rxni, msg);
2736 * Initiate an asynchronous PUT operation.
2738 * There are several events associated with a PUT: completion of the send on
2739 * the initiator node (LNET_EVENT_SEND), and when the send completes
2740 * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
2741 * that the operation was accepted by the target. The event LNET_EVENT_PUT is
2742 * used at the target node to indicate the completion of incoming data
2745 * The local events will be logged in the EQ associated with the MD pointed to
2746 * by \a mdh handle. Using a MD without an associated EQ results in these
2747 * events being discarded. In this case, the caller must have another
2748 * mechanism (e.g., a higher level protocol) for determining when it is safe
2749 * to modify the memory region associated with the MD.
2751 * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
2752 * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
2754 * \param self Indicates the NID of a local interface through which to send
2755 * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
2756 * \param mdh A handle for the MD that describes the memory to be sent. The MD
2757 * must be "free floating" (See LNetMDBind()).
2758 * \param ack Controls whether an acknowledgment is requested.
2759 * Acknowledgments are only sent when they are requested by the initiating
2760 * process and the target MD enables them.
2761 * \param target A process identifier for the target process.
2762 * \param portal The index in the \a target's portal table.
2763 * \param match_bits The match bits to use for MD selection at the target
2765 * \param offset The offset into the target MD (only used when the target
2766 * MD has the LNET_MD_MANAGE_REMOTE option set).
2767 * \param hdr_data 64 bits of user data that can be included in the message
2768 * header. This data is written to an event queue entry at the target if an
2769 * EQ is present on the matching MD.
2771 * \retval 0 Success, and only in this case events will be generated
2772 * and logged to EQ (if it exists).
2773 * \retval -EIO Simulated failure.
2774 * \retval -ENOMEM Memory allocation failure.
2775 * \retval -ENOENT Invalid MD object.
2777 * \see lnet_event_t::hdr_data and lnet_event_kind_t.
2780 LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
2781 lnet_process_id_t target, unsigned int portal,
2782 __u64 match_bits, unsigned int offset,
2785 struct lnet_msg *msg;
2786 struct lnet_libmd *md;
2790 LASSERT(the_lnet.ln_refcount > 0);
2792 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
2793 fail_peer(target.nid, 1)) { /* shall we now? */
2794 CERROR("Dropping PUT to %s: simulated failure\n",
2795 libcfs_id2str(target));
2799 msg = lnet_msg_alloc();
2801 CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
2802 libcfs_id2str(target));
2805 msg->msg_vmflush = !!memory_pressure_get();
2807 cpt = lnet_cpt_of_cookie(mdh.cookie);
2810 md = lnet_handle2md(&mdh);
2811 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2812 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
2813 match_bits, portal, libcfs_id2str(target),
2814 md == NULL ? -1 : md->md_threshold);
2815 if (md != NULL && md->md_me != NULL)
2816 CERROR("Source MD also attached to portal %d\n",
2817 md->md_me->me_portal);
2818 lnet_res_unlock(cpt);
2824 CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
2826 lnet_msg_attach_md(msg, md, 0, 0);
2828 lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
2830 msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
2831 msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
2832 msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
2833 msg->msg_hdr.msg.put.hdr_data = hdr_data;
2835 /* NB handles only looked up by creator (no flips) */
2836 if (ack == LNET_ACK_REQ) {
2837 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2838 the_lnet.ln_interface_cookie;
2839 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2840 md->md_lh.lh_cookie;
2842 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2843 LNET_WIRE_HANDLE_COOKIE_NONE;
2844 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2845 LNET_WIRE_HANDLE_COOKIE_NONE;
2848 lnet_res_unlock(cpt);
2850 lnet_build_msg_event(msg, LNET_EVENT_SEND);
2852 rc = lnet_send(self, msg, LNET_NID_ANY);
2854 CNETERR("Error sending PUT to %s: %d\n",
2855 libcfs_id2str(target), rc);
2856 lnet_finalize(NULL, msg, rc);
2859 /* completion will be signalled by an event */
2862 EXPORT_SYMBOL(LNetPut);
2865 lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *getmsg)
2867 /* The LND can DMA direct to the GET md (i.e. no REPLY msg). This
2868 * returns a msg for the LND to pass to lnet_finalize() when the sink
2869 * data has been received.
2871 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
2872 * lnet_finalize() is called on it, so the LND must call this first */
2874 struct lnet_msg *msg = lnet_msg_alloc();
2875 struct lnet_libmd *getmd = getmsg->msg_md;
2876 lnet_process_id_t peer_id = getmsg->msg_target;
2879 LASSERT(!getmsg->msg_target_is_router);
2880 LASSERT(!getmsg->msg_routing);
2883 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
2884 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
2888 cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
2891 LASSERT(getmd->md_refcount > 0);
2893 if (getmd->md_threshold == 0) {
2894 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
2895 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
2897 lnet_res_unlock(cpt);
2901 LASSERT(getmd->md_offset == 0);
2903 CDEBUG(D_NET, "%s: Reply from %s md %p\n",
2904 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
2906 /* setup information for lnet_build_msg_event */
2907 msg->msg_initiator = lnet_peer_primary_nid(peer_id.nid);
2908 /* Cheaper: msg->msg_initiator = getmsg->msg_txpeer->lp_nid; */
2909 msg->msg_from = peer_id.nid;
2910 msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
2911 msg->msg_hdr.src_nid = peer_id.nid;
2912 msg->msg_hdr.payload_length = getmd->md_length;
2913 msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
2915 lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
2916 lnet_res_unlock(cpt);
2918 cpt = lnet_cpt_of_nid(peer_id.nid, ni);
2921 lnet_msg_commit(msg, cpt);
2922 lnet_net_unlock(cpt);
2924 lnet_build_msg_event(msg, LNET_EVENT_REPLY);
2929 cpt = lnet_cpt_of_nid(peer_id.nid, ni);
2932 the_lnet.ln_counters[cpt]->drop_count++;
2933 the_lnet.ln_counters[cpt]->drop_length += getmd->md_length;
2934 lnet_net_unlock(cpt);
2941 EXPORT_SYMBOL(lnet_create_reply_msg);
2944 lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
2946 /* Set the REPLY length, now the RDMA that elides the REPLY message has
2947 * completed and I know it. */
2948 LASSERT(reply != NULL);
2949 LASSERT(reply->msg_type == LNET_MSG_GET);
2950 LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
2952 /* NB I trusted my peer to RDMA. If she tells me she's written beyond
2953 * the end of my buffer, I might as well be dead. */
2954 LASSERT(len <= reply->msg_ev.mlength);
2956 reply->msg_ev.mlength = len;
2958 EXPORT_SYMBOL(lnet_set_reply_msg_len);
2961 * Initiate an asynchronous GET operation.
2963 * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
2964 * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
2965 * the target node in the REPLY has been written to local MD.
2967 * On the target node, an LNET_EVENT_GET is logged when the GET request
2968 * arrives and is accepted into a MD.
2970 * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
2971 * \param mdh A handle for the MD that describes the memory into which the
2972 * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
2974 * \retval 0 Success, and only in this case events will be generated
2975 * and logged to EQ (if it exists) of the MD.
2976 * \retval -EIO Simulated failure.
2977 * \retval -ENOMEM Memory allocation failure.
2978 * \retval -ENOENT Invalid MD object.
2981 LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
2982 lnet_process_id_t target, unsigned int portal,
2983 __u64 match_bits, unsigned int offset)
2985 struct lnet_msg *msg;
2986 struct lnet_libmd *md;
2990 LASSERT(the_lnet.ln_refcount > 0);
2992 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
2993 fail_peer(target.nid, 1)) /* shall we now? */
2995 CERROR("Dropping GET to %s: simulated failure\n",
2996 libcfs_id2str(target));
3000 msg = lnet_msg_alloc();
3002 CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
3003 libcfs_id2str(target));
3007 cpt = lnet_cpt_of_cookie(mdh.cookie);
3010 md = lnet_handle2md(&mdh);
3011 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
3012 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
3013 match_bits, portal, libcfs_id2str(target),
3014 md == NULL ? -1 : md->md_threshold);
3015 if (md != NULL && md->md_me != NULL)
3016 CERROR("REPLY MD also attached to portal %d\n",
3017 md->md_me->me_portal);
3019 lnet_res_unlock(cpt);
3025 CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
3027 lnet_msg_attach_md(msg, md, 0, 0);
3029 lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
3031 msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
3032 msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
3033 msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
3034 msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
3036 /* NB handles only looked up by creator (no flips) */
3037 msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
3038 the_lnet.ln_interface_cookie;
3039 msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
3040 md->md_lh.lh_cookie;
3042 lnet_res_unlock(cpt);
3044 lnet_build_msg_event(msg, LNET_EVENT_SEND);
3046 rc = lnet_send(self, msg, LNET_NID_ANY);
3048 CNETERR("Error sending GET to %s: %d\n",
3049 libcfs_id2str(target), rc);
3050 lnet_finalize(NULL, msg, rc);
3053 /* completion will be signalled by an event */
3056 EXPORT_SYMBOL(LNetGet);
3059 * Calculate distance to node at \a dstnid.
3061 * \param dstnid Target NID.
3062 * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
3064 * \param orderp If not NULL, order of the route to reach \a dstnid is saved
3067 * \retval 0 If \a dstnid belongs to a local interface, and reserved option
3068 * local_nid_dist_zero is set, which is the default.
3069 * \retval positives Distance to target NID, i.e. number of hops plus one.
3070 * \retval -EHOSTUNREACH If \a dstnid is not reachable.
3073 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
3075 struct list_head *e;
3076 struct lnet_ni *ni = NULL;
3077 lnet_remotenet_t *rnet;
3078 __u32 dstnet = LNET_NIDNET(dstnid);
3082 struct list_head *rn_list;
3084 /* if !local_nid_dist_zero, I don't return a distance of 0 ever
3085 * (when lustre sees a distance of 0, it substitutes 0@lo), so I
3086 * keep order 0 free for 0@lo and order 1 free for a local NID
3089 LASSERT(the_lnet.ln_refcount > 0);
3091 cpt = lnet_net_lock_current();
3093 while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
3094 if (ni->ni_nid == dstnid) {
3095 if (srcnidp != NULL)
3097 if (orderp != NULL) {
3098 if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
3103 lnet_net_unlock(cpt);
3105 return local_nid_dist_zero ? 0 : 1;
3108 if (LNET_NIDNET(ni->ni_nid) == dstnet) {
3109 /* Check if ni was originally created in
3110 * current net namespace.
3111 * If not, assign order above 0xffff0000,
3112 * to make this ni not a priority. */
3113 if (!net_eq(ni->ni_net_ns, current->nsproxy->net_ns))
3114 order += 0xffff0000;
3116 if (srcnidp != NULL)
3117 *srcnidp = ni->ni_nid;
3120 lnet_net_unlock(cpt);
3127 rn_list = lnet_net2rnethash(dstnet);
3128 list_for_each(e, rn_list) {
3129 rnet = list_entry(e, lnet_remotenet_t, lrn_list);
3131 if (rnet->lrn_net == dstnet) {
3132 lnet_route_t *route;
3133 lnet_route_t *shortest = NULL;
3134 __u32 shortest_hops = LNET_UNDEFINED_HOPS;
3137 LASSERT(!list_empty(&rnet->lrn_routes));
3139 list_for_each_entry(route, &rnet->lrn_routes,
3141 route_hops = route->lr_hops;
3142 if (route_hops == LNET_UNDEFINED_HOPS)
3144 if (shortest == NULL ||
3145 route_hops < shortest_hops) {
3147 shortest_hops = route_hops;
3151 LASSERT(shortest != NULL);
3152 hops = shortest_hops;
3153 if (srcnidp != NULL) {
3154 ni = lnet_get_next_ni_locked(
3155 shortest->lr_gateway->lpni_net,
3157 *srcnidp = ni->ni_nid;
3161 lnet_net_unlock(cpt);
3167 lnet_net_unlock(cpt);
3168 return -EHOSTUNREACH;
3170 EXPORT_SYMBOL(LNetDist);