4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * lnet/lnet/lib-move.c
36 * Data movement routines
39 #define DEBUG_SUBSYSTEM S_LNET
41 #include <lnet/lib-lnet.h>
43 static int local_nid_dist_zero = 1;
44 CFS_MODULE_PARM(local_nid_dist_zero, "i", int, 0444,
48 lnet_fail_nid (lnet_nid_t nid, unsigned int threshold)
55 LASSERT (the_lnet.ln_init);
58 /* Adding a new entry */
59 LIBCFS_ALLOC(tp, sizeof(*tp));
64 tp->tp_threshold = threshold;
67 cfs_list_add_tail (&tp->tp_list, &the_lnet.ln_test_peers);
72 /* removing entries */
73 CFS_INIT_LIST_HEAD (&cull);
77 cfs_list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
78 tp = cfs_list_entry (el, lnet_test_peer_t, tp_list);
80 if (tp->tp_threshold == 0 || /* needs culling anyway */
81 nid == LNET_NID_ANY || /* removing all entries */
82 tp->tp_nid == nid) /* matched this one */
84 cfs_list_del (&tp->tp_list);
85 cfs_list_add (&tp->tp_list, &cull);
91 while (!cfs_list_empty (&cull)) {
92 tp = cfs_list_entry (cull.next, lnet_test_peer_t, tp_list);
94 cfs_list_del (&tp->tp_list);
95 LIBCFS_FREE(tp, sizeof (*tp));
101 fail_peer (lnet_nid_t nid, int outgoing)
103 lnet_test_peer_t *tp;
109 CFS_INIT_LIST_HEAD (&cull);
113 cfs_list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
114 tp = cfs_list_entry (el, lnet_test_peer_t, tp_list);
116 if (tp->tp_threshold == 0) {
119 /* only cull zombies on outgoing tests,
120 * since we may be at interrupt priority on
121 * incoming messages. */
122 cfs_list_del (&tp->tp_list);
123 cfs_list_add (&tp->tp_list, &cull);
128 if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
129 nid == tp->tp_nid) { /* fail this peer */
132 if (tp->tp_threshold != LNET_MD_THRESH_INF) {
135 tp->tp_threshold == 0) {
137 cfs_list_del (&tp->tp_list);
138 cfs_list_add (&tp->tp_list, &cull);
147 while (!cfs_list_empty (&cull)) {
148 tp = cfs_list_entry (cull.next, lnet_test_peer_t, tp_list);
149 cfs_list_del (&tp->tp_list);
151 LIBCFS_FREE(tp, sizeof (*tp));
158 lnet_iov_nob (unsigned int niov, struct iovec *iov)
160 unsigned int nob = 0;
163 nob += (iov++)->iov_len;
169 lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov, unsigned int doffset,
170 unsigned int nsiov, struct iovec *siov, unsigned int soffset,
173 /* NB diov, siov are READ-ONLY */
174 unsigned int this_nob;
179 /* skip complete frags before 'doffset' */
181 while (doffset >= diov->iov_len) {
182 doffset -= diov->iov_len;
188 /* skip complete frags before 'soffset' */
190 while (soffset >= siov->iov_len) {
191 soffset -= siov->iov_len;
200 this_nob = MIN(diov->iov_len - doffset,
201 siov->iov_len - soffset);
202 this_nob = MIN(this_nob, nob);
204 memcpy ((char *)diov->iov_base + doffset,
205 (char *)siov->iov_base + soffset, this_nob);
208 if (diov->iov_len > doffset + this_nob) {
216 if (siov->iov_len > soffset + this_nob) {
227 lnet_extract_iov (int dst_niov, struct iovec *dst,
228 int src_niov, struct iovec *src,
229 unsigned int offset, unsigned int len)
231 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
232 * for exactly 'len' bytes, and return the number of entries.
233 * NB not destructive to 'src' */
234 unsigned int frag_len;
237 if (len == 0) /* no data => */
238 return (0); /* no frags */
240 LASSERT (src_niov > 0);
241 while (offset >= src->iov_len) { /* skip initial frags */
242 offset -= src->iov_len;
245 LASSERT (src_niov > 0);
250 LASSERT (src_niov > 0);
251 LASSERT ((int)niov <= dst_niov);
253 frag_len = src->iov_len - offset;
254 dst->iov_base = ((char *)src->iov_base) + offset;
256 if (len <= frag_len) {
261 dst->iov_len = frag_len;
274 lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
281 lnet_copy_kiov2kiov (unsigned int ndkiov, lnet_kiov_t *dkiov, unsigned int doffset,
282 unsigned int nskiov, lnet_kiov_t *skiov, unsigned int soffset,
289 lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset,
290 unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
297 lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
298 unsigned int niov, struct iovec *iov, unsigned int iovoffset,
305 lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
306 int src_niov, lnet_kiov_t *src,
307 unsigned int offset, unsigned int len)
312 #else /* __KERNEL__ */
315 lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
317 unsigned int nob = 0;
320 nob += (kiov++)->kiov_len;
326 lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
327 unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
330 /* NB diov, siov are READ-ONLY */
331 unsigned int this_nob;
338 LASSERT (!cfs_in_interrupt ());
341 while (doffset >= diov->kiov_len) {
342 doffset -= diov->kiov_len;
349 while (soffset >= siov->kiov_len) {
350 soffset -= siov->kiov_len;
359 this_nob = MIN(diov->kiov_len - doffset,
360 siov->kiov_len - soffset);
361 this_nob = MIN(this_nob, nob);
364 daddr = ((char *)cfs_kmap(diov->kiov_page)) +
365 diov->kiov_offset + doffset;
367 saddr = ((char *)cfs_kmap(siov->kiov_page)) +
368 siov->kiov_offset + soffset;
370 /* Vanishing risk of kmap deadlock when mapping 2 pages.
371 * However in practice at least one of the kiovs will be mapped
372 * kernel pages and the map/unmap will be NOOPs */
374 memcpy (daddr, saddr, this_nob);
377 if (diov->kiov_len > doffset + this_nob) {
381 cfs_kunmap(diov->kiov_page);
388 if (siov->kiov_len > soffset + this_nob) {
392 cfs_kunmap(siov->kiov_page);
401 cfs_kunmap(diov->kiov_page);
403 cfs_kunmap(siov->kiov_page);
407 lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset,
408 unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
411 /* NB iov, kiov are READ-ONLY */
412 unsigned int this_nob;
418 LASSERT (!cfs_in_interrupt ());
421 while (iovoffset >= iov->iov_len) {
422 iovoffset -= iov->iov_len;
429 while (kiovoffset >= kiov->kiov_len) {
430 kiovoffset -= kiov->kiov_len;
439 this_nob = MIN(iov->iov_len - iovoffset,
440 kiov->kiov_len - kiovoffset);
441 this_nob = MIN(this_nob, nob);
444 addr = ((char *)cfs_kmap(kiov->kiov_page)) +
445 kiov->kiov_offset + kiovoffset;
447 memcpy ((char *)iov->iov_base + iovoffset, addr, this_nob);
450 if (iov->iov_len > iovoffset + this_nob) {
451 iovoffset += this_nob;
458 if (kiov->kiov_len > kiovoffset + this_nob) {
460 kiovoffset += this_nob;
462 cfs_kunmap(kiov->kiov_page);
472 cfs_kunmap(kiov->kiov_page);
476 lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
477 unsigned int niov, struct iovec *iov, unsigned int iovoffset,
480 /* NB kiov, iov are READ-ONLY */
481 unsigned int this_nob;
487 LASSERT (!cfs_in_interrupt ());
490 while (kiovoffset >= kiov->kiov_len) {
491 kiovoffset -= kiov->kiov_len;
498 while (iovoffset >= iov->iov_len) {
499 iovoffset -= iov->iov_len;
508 this_nob = MIN(kiov->kiov_len - kiovoffset,
509 iov->iov_len - iovoffset);
510 this_nob = MIN(this_nob, nob);
513 addr = ((char *)cfs_kmap(kiov->kiov_page)) +
514 kiov->kiov_offset + kiovoffset;
516 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
519 if (kiov->kiov_len > kiovoffset + this_nob) {
521 kiovoffset += this_nob;
523 cfs_kunmap(kiov->kiov_page);
530 if (iov->iov_len > iovoffset + this_nob) {
531 iovoffset += this_nob;
540 cfs_kunmap(kiov->kiov_page);
544 lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
545 int src_niov, lnet_kiov_t *src,
546 unsigned int offset, unsigned int len)
548 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
549 * for exactly 'len' bytes, and return the number of entries.
550 * NB not destructive to 'src' */
551 unsigned int frag_len;
554 if (len == 0) /* no data => */
555 return (0); /* no frags */
557 LASSERT (src_niov > 0);
558 while (offset >= src->kiov_len) { /* skip initial frags */
559 offset -= src->kiov_len;
562 LASSERT (src_niov > 0);
567 LASSERT (src_niov > 0);
568 LASSERT ((int)niov <= dst_niov);
570 frag_len = src->kiov_len - offset;
571 dst->kiov_page = src->kiov_page;
572 dst->kiov_offset = src->kiov_offset + offset;
574 if (len <= frag_len) {
576 LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
580 dst->kiov_len = frag_len;
581 LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
594 lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
595 unsigned int offset, unsigned int mlen, unsigned int rlen)
597 unsigned int niov = 0;
598 struct iovec *iov = NULL;
599 lnet_kiov_t *kiov = NULL;
602 LASSERT (!cfs_in_interrupt ());
603 LASSERT (mlen == 0 || msg != NULL);
606 LASSERT(msg->msg_receiving);
607 LASSERT(!msg->msg_sending);
608 LASSERT(rlen == msg->msg_len);
609 LASSERT(mlen <= msg->msg_len);
610 LASSERT(msg->msg_offset == offset);
611 LASSERT(msg->msg_wanted == mlen);
613 msg->msg_receiving = 0;
616 niov = msg->msg_niov;
618 kiov = msg->msg_kiov;
621 LASSERT ((iov == NULL) != (kiov == NULL));
625 rc = (ni->ni_lnd->lnd_recv)(ni, private, msg, delayed,
626 niov, iov, kiov, offset, mlen, rlen);
628 lnet_finalize(ni, msg, rc);
632 lnet_setpayloadbuffer(lnet_msg_t *msg)
634 lnet_libmd_t *md = msg->msg_md;
636 LASSERT (msg->msg_len > 0);
637 LASSERT (!msg->msg_routing);
638 LASSERT (md != NULL);
639 LASSERT (msg->msg_niov == 0);
640 LASSERT (msg->msg_iov == NULL);
641 LASSERT (msg->msg_kiov == NULL);
643 msg->msg_niov = md->md_niov;
644 if ((md->md_options & LNET_MD_KIOV) != 0)
645 msg->msg_kiov = md->md_iov.kiov;
647 msg->msg_iov = md->md_iov.iov;
651 lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
652 unsigned int offset, unsigned int len)
654 msg->msg_type = type;
655 msg->msg_target = target;
657 msg->msg_offset = offset;
660 lnet_setpayloadbuffer(msg);
662 memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
663 msg->msg_hdr.type = cpu_to_le32(type);
664 msg->msg_hdr.dest_nid = cpu_to_le64(target.nid);
665 msg->msg_hdr.dest_pid = cpu_to_le32(target.pid);
666 /* src_nid will be set later */
667 msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid);
668 msg->msg_hdr.payload_length = cpu_to_le32(len);
672 lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
674 void *priv = msg->msg_private;
677 LASSERT (!cfs_in_interrupt ());
678 LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
679 (msg->msg_txcredit && msg->msg_peertxcredit));
681 rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
683 lnet_finalize(ni, msg, rc);
687 lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
691 LASSERT(!msg->msg_sending);
692 LASSERT(msg->msg_receiving);
693 LASSERT(ni->ni_lnd->lnd_eager_recv != NULL);
695 msg->msg_rx_ready_delay = 1;
696 rc = (ni->ni_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
699 CERROR("recv from %s / send to %s aborted: "
700 "eager_recv failed %d\n",
701 libcfs_nid2str(msg->msg_rxpeer->lp_nid),
702 libcfs_id2str(msg->msg_target), rc);
703 LASSERT(rc < 0); /* required by my callers */
709 /* NB: caller shall hold a ref on 'lp' as I'd drop LNET_LOCK */
711 lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
713 cfs_time_t last_alive = 0;
715 LASSERT (lnet_peer_aliveness_enabled(lp));
716 LASSERT (ni->ni_lnd->lnd_query != NULL);
717 LASSERT (the_lnet.ln_routing == 1);
720 (ni->ni_lnd->lnd_query)(ni, lp->lp_nid, &last_alive);
723 lp->lp_last_query = cfs_time_current();
725 if (last_alive != 0) /* NI has updated timestamp */
726 lp->lp_last_alive = last_alive;
729 /* NB: always called with LNET_LOCK held */
731 lnet_peer_is_alive (lnet_peer_t *lp, cfs_time_t now)
736 LASSERT (lnet_peer_aliveness_enabled(lp));
737 LASSERT (the_lnet.ln_routing == 1);
739 /* Trust lnet_notify() if it has more recent aliveness news, but
740 * ignore the initial assumed death (see lnet_peers_start_down()).
742 if (!lp->lp_alive && lp->lp_alive_count > 0 &&
743 cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive))
746 deadline = cfs_time_add(lp->lp_last_alive,
747 cfs_time_seconds(lp->lp_ni->ni_peertimeout));
748 alive = cfs_time_after(deadline, now);
750 /* Update obsolete lp_alive except for routers assumed to be dead
751 * initially, because router checker would update aliveness in this
752 * case, and moreover lp_last_alive at peer creation is assumed.
754 if (alive && !lp->lp_alive &&
755 !(lnet_isrouter(lp) && lp->lp_alive_count == 0))
756 lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
762 /* NB: returns 1 when alive, 0 when dead, negative when error;
763 * may drop the LNET_LOCK */
765 lnet_peer_alive_locked (lnet_peer_t *lp)
767 cfs_time_t now = cfs_time_current();
769 /* LU-630: only router checks peer health. */
770 if (the_lnet.ln_routing == 0)
773 if (!lnet_peer_aliveness_enabled(lp))
776 if (lnet_peer_is_alive(lp, now))
779 /* Peer appears dead, but we should avoid frequent NI queries (at
780 * most once per lnet_queryinterval seconds). */
781 if (lp->lp_last_query != 0) {
782 static const int lnet_queryinterval = 1;
784 cfs_time_t next_query =
785 cfs_time_add(lp->lp_last_query,
786 cfs_time_seconds(lnet_queryinterval));
788 if (cfs_time_before(now, next_query)) {
790 CWARN("Unexpected aliveness of peer %s: "
792 libcfs_nid2str(lp->lp_nid),
793 (int)now, (int)next_query,
795 lp->lp_ni->ni_peertimeout);
800 /* query NI for latest aliveness news */
801 lnet_ni_query_locked(lp->lp_ni, lp);
803 if (lnet_peer_is_alive(lp, now))
806 lnet_notify_locked(lp, 0, 0, lp->lp_last_alive);
811 lnet_post_send_locked (lnet_msg_t *msg, int do_send)
813 /* lnet_send is going to LNET_UNLOCK immediately after this, so it sets
814 * do_send FALSE and I don't do the unlock/send/lock bit. I return
815 * EAGAIN if msg blocked, EHOSTUNREACH if msg_txpeer appears dead, and
816 * 0 if sent or OK to send */
817 lnet_peer_t *lp = msg->msg_txpeer;
818 lnet_ni_t *ni = lp->lp_ni;
820 /* non-lnet_send() callers have checked before */
821 LASSERT(!do_send || msg->msg_tx_delayed);
822 LASSERT(!msg->msg_receiving);
824 /* NB 'lp' is always the next hop */
825 if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
826 lnet_peer_alive_locked(lp) == 0) {
827 the_lnet.ln_counters.drop_count++;
828 the_lnet.ln_counters.drop_length += msg->msg_len;
831 CNETERR("Dropping message for %s: peer not alive\n",
832 libcfs_id2str(msg->msg_target));
834 lnet_finalize(ni, msg, -EHOSTUNREACH);
840 if (!msg->msg_peertxcredit) {
841 LASSERT ((lp->lp_txcredits < 0) ==
842 !cfs_list_empty(&lp->lp_txq));
844 msg->msg_peertxcredit = 1;
845 lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
848 if (lp->lp_txcredits < lp->lp_mintxcredits)
849 lp->lp_mintxcredits = lp->lp_txcredits;
851 if (lp->lp_txcredits < 0) {
852 msg->msg_tx_delayed = 1;
853 cfs_list_add_tail(&msg->msg_list, &lp->lp_txq);
858 if (!msg->msg_txcredit) {
859 LASSERT ((ni->ni_txcredits < 0) ==
860 !cfs_list_empty(&ni->ni_txq));
862 msg->msg_txcredit = 1;
865 if (ni->ni_txcredits < ni->ni_mintxcredits)
866 ni->ni_mintxcredits = ni->ni_txcredits;
868 if (ni->ni_txcredits < 0) {
869 msg->msg_tx_delayed = 1;
870 cfs_list_add_tail(&msg->msg_list, &ni->ni_txq);
877 lnet_ni_send(ni, msg);
886 lnet_msg2bufpool(lnet_msg_t *msg)
888 lnet_rtrbufpool_t *rbp = &the_lnet.ln_rtrpools[0];
890 LASSERT (msg->msg_len <= LNET_MTU);
891 while (msg->msg_len > (unsigned int)rbp->rbp_npages * CFS_PAGE_SIZE) {
893 LASSERT (rbp < &the_lnet.ln_rtrpools[LNET_NRBPOOLS]);
900 lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
902 /* lnet_parse is going to LNET_UNLOCK immediately after this, so it
903 * sets do_recv FALSE and I don't do the unlock/send/lock bit. I
904 * return EAGAIN if msg blocked and 0 if received or OK to receive */
905 lnet_peer_t *lp = msg->msg_rxpeer;
906 lnet_rtrbufpool_t *rbp;
909 LASSERT (msg->msg_iov == NULL);
910 LASSERT (msg->msg_kiov == NULL);
911 LASSERT (msg->msg_niov == 0);
912 LASSERT (msg->msg_routing);
913 LASSERT (msg->msg_receiving);
914 LASSERT (!msg->msg_sending);
916 /* non-lnet_parse callers only receive delayed messages */
917 LASSERT(!do_recv || msg->msg_rx_delayed);
919 if (!msg->msg_peerrtrcredit) {
920 LASSERT ((lp->lp_rtrcredits < 0) ==
921 !cfs_list_empty(&lp->lp_rtrq));
923 msg->msg_peerrtrcredit = 1;
925 if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
926 lp->lp_minrtrcredits = lp->lp_rtrcredits;
928 if (lp->lp_rtrcredits < 0) {
929 /* must have checked eager_recv before here */
930 LASSERT(msg->msg_rx_ready_delay);
931 msg->msg_rx_delayed = 1;
932 cfs_list_add_tail(&msg->msg_list, &lp->lp_rtrq);
937 rbp = lnet_msg2bufpool(msg);
939 if (!msg->msg_rtrcredit) {
940 LASSERT ((rbp->rbp_credits < 0) ==
941 !cfs_list_empty(&rbp->rbp_msgs));
943 msg->msg_rtrcredit = 1;
945 if (rbp->rbp_credits < rbp->rbp_mincredits)
946 rbp->rbp_mincredits = rbp->rbp_credits;
948 if (rbp->rbp_credits < 0) {
949 /* must have checked eager_recv before here */
950 LASSERT(msg->msg_rx_ready_delay);
951 msg->msg_rx_delayed = 1;
952 cfs_list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
957 LASSERT (!cfs_list_empty(&rbp->rbp_bufs));
958 rb = cfs_list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
959 cfs_list_del(&rb->rb_list);
961 msg->msg_niov = rbp->rbp_npages;
962 msg->msg_kiov = &rb->rb_kiov[0];
966 lnet_ni_recv(lp->lp_ni, msg->msg_private, msg, 1,
967 0, msg->msg_len, msg->msg_len);
975 lnet_return_tx_credits_locked(lnet_msg_t *msg)
977 lnet_peer_t *txpeer = msg->msg_txpeer;
981 if (msg->msg_txcredit) {
982 /* give back NI txcredits */
983 msg->msg_txcredit = 0;
986 LASSERT((ni->ni_txcredits < 0) == !cfs_list_empty(&ni->ni_txq));
989 if (ni->ni_txcredits <= 0) {
990 msg2 = cfs_list_entry(ni->ni_txq.next, lnet_msg_t,
992 cfs_list_del(&msg2->msg_list);
994 LASSERT(msg2->msg_txpeer->lp_ni == ni);
995 LASSERT(msg2->msg_tx_delayed);
997 (void) lnet_post_send_locked(msg2, 1);
1001 if (msg->msg_peertxcredit) {
1002 /* give back peer txcredits */
1003 msg->msg_peertxcredit = 0;
1005 LASSERT((txpeer->lp_txcredits < 0) ==
1006 !cfs_list_empty(&txpeer->lp_txq));
1008 txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
1009 LASSERT (txpeer->lp_txqnob >= 0);
1011 txpeer->lp_txcredits++;
1012 if (txpeer->lp_txcredits <= 0) {
1013 msg2 = cfs_list_entry(txpeer->lp_txq.next,
1014 lnet_msg_t, msg_list);
1015 cfs_list_del(&msg2->msg_list);
1017 LASSERT(msg2->msg_txpeer == txpeer);
1018 LASSERT(msg2->msg_tx_delayed);
1020 (void) lnet_post_send_locked(msg2, 1);
1024 if (txpeer != NULL) {
1025 msg->msg_txpeer = NULL;
1026 lnet_peer_decref_locked(txpeer);
1031 lnet_return_rx_credits_locked(lnet_msg_t *msg)
1033 lnet_peer_t *rxpeer = msg->msg_rxpeer;
1037 if (msg->msg_rtrcredit) {
1038 /* give back global router credits */
1040 lnet_rtrbufpool_t *rbp;
1042 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1043 * there until it gets one allocated, or aborts the wait
1045 LASSERT (msg->msg_kiov != NULL);
1047 rb = cfs_list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
1049 LASSERT (rbp == lnet_msg2bufpool(msg));
1051 msg->msg_kiov = NULL;
1052 msg->msg_rtrcredit = 0;
1054 LASSERT((rbp->rbp_credits < 0) ==
1055 !cfs_list_empty(&rbp->rbp_msgs));
1056 LASSERT((rbp->rbp_credits > 0) ==
1057 !cfs_list_empty(&rbp->rbp_bufs));
1059 cfs_list_add(&rb->rb_list, &rbp->rbp_bufs);
1061 if (rbp->rbp_credits <= 0) {
1062 msg2 = cfs_list_entry(rbp->rbp_msgs.next,
1063 lnet_msg_t, msg_list);
1064 cfs_list_del(&msg2->msg_list);
1066 (void) lnet_post_routed_recv_locked(msg2, 1);
1070 if (msg->msg_peerrtrcredit) {
1071 /* give back peer router credits */
1072 msg->msg_peerrtrcredit = 0;
1074 LASSERT((rxpeer->lp_rtrcredits < 0) ==
1075 !cfs_list_empty(&rxpeer->lp_rtrq));
1077 rxpeer->lp_rtrcredits++;
1078 if (rxpeer->lp_rtrcredits <= 0) {
1079 msg2 = cfs_list_entry(rxpeer->lp_rtrq.next,
1080 lnet_msg_t, msg_list);
1081 cfs_list_del(&msg2->msg_list);
1083 (void) lnet_post_routed_recv_locked(msg2, 1);
1087 LASSERT (!msg->msg_rtrcredit);
1088 LASSERT (!msg->msg_peerrtrcredit);
1090 if (rxpeer != NULL) {
1091 msg->msg_rxpeer = NULL;
1092 lnet_peer_decref_locked(rxpeer);
1097 lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
1099 lnet_peer_t *p1 = r1->lr_gateway;
1100 lnet_peer_t *p2 = r2->lr_gateway;
1102 if (r1->lr_hops < r2->lr_hops)
1105 if (r1->lr_hops > r2->lr_hops)
1108 if (p1->lp_txqnob < p2->lp_txqnob)
1111 if (p1->lp_txqnob > p2->lp_txqnob)
1114 if (p1->lp_txcredits > p2->lp_txcredits)
1117 if (p1->lp_txcredits < p2->lp_txcredits)
1123 static lnet_peer_t *
1124 lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target)
1126 lnet_remotenet_t *rnet;
1128 lnet_route_t *rtr_best;
1129 lnet_route_t *rtr_last;
1130 struct lnet_peer *lp_best;
1131 struct lnet_peer *lp;
1134 rnet = lnet_find_net_locked(LNET_NIDNET(target));
1139 rtr_best = rtr_last = NULL;
1140 cfs_list_for_each_entry(rtr, &rnet->lrn_routes, lr_list) {
1141 lp = rtr->lr_gateway;
1143 if (!lp->lp_alive || /* gateway is down */
1144 (lp->lp_ping_version == LNET_PROTO_PING_VERSION &&
1145 rtr->lr_downis != 0)) /* NI to target is down */
1148 if (ni != NULL && lp->lp_ni != ni)
1151 if (lp_best == NULL) {
1152 rtr_best = rtr_last = rtr;
1157 rc = lnet_compare_routes(rtr, rtr_best);
1165 if (rtr_best != NULL) {
1166 /* Place selected route at the end of the route list to ensure
1167 * fairness; everything else being equal... */
1168 cfs_list_del(&rtr_best->lr_list);
1169 cfs_list_add_tail(&rtr_best->lr_list, &rnet->lrn_routes);
1176 lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg)
1178 lnet_nid_t dst_nid = msg->msg_target.nid;
1180 lnet_ni_t *local_ni;
1184 LASSERT (msg->msg_txpeer == NULL);
1185 LASSERT (!msg->msg_sending);
1186 LASSERT (!msg->msg_target_is_router);
1187 LASSERT (!msg->msg_receiving);
1189 msg->msg_sending = 1;
1191 /* NB! ni != NULL == interface pre-determined (ACK/REPLY) */
1195 if (the_lnet.ln_shutdown) {
1200 if (src_nid == LNET_NID_ANY) {
1203 src_ni = lnet_nid2ni_locked(src_nid);
1204 if (src_ni == NULL) {
1206 LCONSOLE_WARN("Can't send to %s: src %s is not a "
1207 "local nid\n", libcfs_nid2str(dst_nid),
1208 libcfs_nid2str(src_nid));
1211 LASSERT (!msg->msg_routing);
1214 lnet_msg_commit(msg, 1);
1215 /* Is this for someone on a local network? */
1216 local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid));
1218 if (local_ni != NULL) {
1219 if (src_ni == NULL) {
1221 src_nid = src_ni->ni_nid;
1222 } else if (src_ni == local_ni) {
1223 lnet_ni_decref_locked(local_ni);
1225 lnet_ni_decref_locked(local_ni);
1226 lnet_ni_decref_locked(src_ni);
1228 LCONSOLE_WARN("No route to %s via from %s\n",
1229 libcfs_nid2str(dst_nid),
1230 libcfs_nid2str(src_nid));
1234 LASSERT (src_nid != LNET_NID_ANY);
1236 if (!msg->msg_routing)
1237 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1239 if (src_ni == the_lnet.ln_loni) {
1240 /* No send credit hassles with LOLND */
1242 lnet_ni_send(src_ni, msg);
1243 lnet_ni_decref(src_ni);
1247 rc = lnet_nid2peer_locked(&lp, dst_nid);
1248 lnet_ni_decref_locked(src_ni); /* lp has ref on src_ni; lose mine */
1251 LCONSOLE_WARN("Error %d finding peer %s\n", rc,
1252 libcfs_nid2str(dst_nid));
1253 /* ENOMEM or shutting down */
1256 LASSERT (lp->lp_ni == src_ni);
1262 * - once application finishes computation, check here to update
1263 * router states before it waits for pending IO in LNetEQPoll
1264 * - recursion breaker: router checker sends no message
1265 * to remote networks */
1266 if (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING)
1267 lnet_router_checker();
1271 /* sending to a remote network */
1272 lp = lnet_find_route_locked(src_ni, dst_nid);
1275 lnet_ni_decref_locked(src_ni);
1278 LCONSOLE_WARN("No route to %s via %s "
1279 "(all routers down)\n",
1280 libcfs_id2str(msg->msg_target),
1281 libcfs_nid2str(src_nid));
1282 return -EHOSTUNREACH;
1285 CDEBUG(D_NET, "Best route to %s via %s for %s %d\n",
1286 libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid),
1287 lnet_msgtyp2str(msg->msg_type), msg->msg_len);
1289 if (src_ni == NULL) {
1291 src_nid = src_ni->ni_nid;
1293 LASSERT (src_ni == lp->lp_ni);
1294 lnet_ni_decref_locked(src_ni);
1297 lnet_peer_addref_locked(lp);
1299 LASSERT (src_nid != LNET_NID_ANY);
1301 if (!msg->msg_routing) {
1302 /* I'm the source and now I know which NI to send on */
1303 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1306 msg->msg_target_is_router = 1;
1307 msg->msg_target.nid = lp->lp_nid;
1308 msg->msg_target.pid = LUSTRE_SRV_LNET_PID;
1311 /* 'lp' is our best choice of peer */
1313 LASSERT (!msg->msg_peertxcredit);
1314 LASSERT (!msg->msg_txcredit);
1315 LASSERT (msg->msg_txpeer == NULL);
1317 msg->msg_txpeer = lp; /* msg takes my ref on lp */
1319 rc = lnet_post_send_locked(msg, 0);
1322 if (rc == EHOSTUNREACH)
1323 return -EHOSTUNREACH;
1326 lnet_ni_send(src_ni, msg);
1332 lnet_drop_message (lnet_ni_t *ni, void *private, unsigned int nob)
1335 the_lnet.ln_counters.drop_count++;
1336 the_lnet.ln_counters.drop_length += nob;
1339 lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
1343 lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
1345 lnet_hdr_t *hdr = &msg->msg_hdr;
1347 if (msg->msg_wanted != 0)
1348 lnet_setpayloadbuffer(msg);
1350 lnet_build_msg_event(msg, LNET_EVENT_PUT);
1352 /* Must I ACK? If so I'll grab the ack_wmd out of the header and put
1353 * it back into the ACK during lnet_finalize() */
1354 msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
1355 (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
1357 lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
1358 msg->msg_offset, msg->msg_wanted, hdr->payload_length);
1362 lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
1364 lnet_hdr_t *hdr = &msg->msg_hdr;
1365 struct lnet_match_info info;
1368 /* Convert put fields to host byte order */
1369 hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
1370 hdr->msg.put.ptl_index = le32_to_cpu(hdr->msg.put.ptl_index);
1371 hdr->msg.put.offset = le32_to_cpu(hdr->msg.put.offset);
1373 info.mi_id.nid = hdr->src_nid;
1374 info.mi_id.pid = hdr->src_pid;
1375 info.mi_opc = LNET_MD_OP_PUT;
1376 info.mi_portal = hdr->msg.put.ptl_index;
1377 info.mi_rlength = hdr->payload_length;
1378 info.mi_roffset = hdr->msg.put.offset;
1379 info.mi_mbits = hdr->msg.put.match_bits;
1381 msg->msg_rx_ready_delay = ni->ni_lnd->lnd_eager_recv == NULL;
1384 rc = lnet_ptl_match_md(&info, msg);
1389 case LNET_MATCHMD_OK:
1390 lnet_recv_put(ni, msg);
1393 case LNET_MATCHMD_NONE:
1394 if (msg->msg_rx_delayed) /* attached on delayed list */
1397 rc = lnet_ni_eager_recv(ni, msg);
1402 case LNET_MATCHMD_DROP:
1403 CNETERR("Dropping PUT from %s portal %d match "LPU64
1404 " offset %d length %d: %d\n",
1405 libcfs_id2str(info.mi_id), info.mi_portal,
1406 info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
1408 return ENOENT; /* +ve: OK but no match */
1413 lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
1415 struct lnet_match_info info;
1416 lnet_hdr_t *hdr = &msg->msg_hdr;
1417 lnet_handle_wire_t reply_wmd;
1420 /* Convert get fields to host byte order */
1421 hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits);
1422 hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index);
1423 hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
1424 hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset);
1426 info.mi_id.nid = hdr->src_nid;
1427 info.mi_id.pid = hdr->src_pid;
1428 info.mi_opc = LNET_MD_OP_GET;
1429 info.mi_portal = hdr->msg.get.ptl_index;
1430 info.mi_rlength = hdr->msg.get.sink_length;
1431 info.mi_roffset = hdr->msg.get.src_offset;
1432 info.mi_mbits = hdr->msg.get.match_bits;
1434 rc = lnet_ptl_match_md(&info, msg);
1435 if (rc == LNET_MATCHMD_DROP) {
1436 CNETERR("Dropping GET from %s portal %d match "LPU64
1437 " offset %d length %d\n",
1438 libcfs_id2str(info.mi_id), info.mi_portal,
1439 info.mi_mbits, info.mi_roffset, info.mi_rlength);
1440 return ENOENT; /* +ve: OK but no match */
1443 LASSERT(rc == LNET_MATCHMD_OK);
1445 lnet_build_msg_event(msg, LNET_EVENT_GET);
1447 reply_wmd = hdr->msg.get.return_wmd;
1449 lnet_prep_send(msg, LNET_MSG_REPLY, info.mi_id,
1450 msg->msg_offset, msg->msg_wanted);
1452 msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
1455 /* The LND completes the REPLY from her recv procedure */
1456 lnet_ni_recv(ni, msg->msg_private, msg, 0,
1457 msg->msg_offset, msg->msg_len, msg->msg_len);
1461 lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
1462 msg->msg_receiving = 0;
1464 rc = lnet_send(ni->ni_nid, msg);
1466 /* didn't get as far as lnet_ni_send() */
1467 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
1468 libcfs_nid2str(ni->ni_nid),
1469 libcfs_id2str(info.mi_id), rc);
1471 lnet_finalize(ni, msg, rc);
1478 lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
1480 void *private = msg->msg_private;
1481 lnet_hdr_t *hdr = &msg->msg_hdr;
1482 lnet_process_id_t src = {0};
1489 src.nid = hdr->src_nid;
1490 src.pid = hdr->src_pid;
1492 /* NB handles only looked up by creator (no flips) */
1493 md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
1494 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
1495 CNETERR("%s: Dropping REPLY from %s for %s "
1496 "MD "LPX64"."LPX64"\n",
1497 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1498 (md == NULL) ? "invalid" : "inactive",
1499 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1500 hdr->msg.reply.dst_wmd.wh_object_cookie);
1501 if (md != NULL && md->md_me != NULL)
1502 CERROR("REPLY MD also attached to portal %d\n",
1503 md->md_me->me_portal);
1506 return ENOENT; /* +ve: OK but no match */
1509 LASSERT (md->md_offset == 0);
1511 rlength = hdr->payload_length;
1512 mlength = MIN(rlength, (int)md->md_length);
1514 if (mlength < rlength &&
1515 (md->md_options & LNET_MD_TRUNCATE) == 0) {
1516 CNETERR("%s: Dropping REPLY from %s length %d "
1517 "for MD "LPX64" would overflow (%d)\n",
1518 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1519 rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
1522 return ENOENT; /* +ve: OK but no match */
1525 CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md "LPX64"\n",
1526 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1527 mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
1529 lnet_msg_attach_md(msg, md, 0, mlength);
1532 lnet_setpayloadbuffer(msg);
1536 lnet_build_msg_event(msg, LNET_EVENT_REPLY);
1538 lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
1543 lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
1545 lnet_hdr_t *hdr = &msg->msg_hdr;
1546 lnet_process_id_t src = {0};
1549 src.nid = hdr->src_nid;
1550 src.pid = hdr->src_pid;
1552 /* Convert ack fields to host byte order */
1553 hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
1554 hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
1558 /* NB handles only looked up by creator (no flips) */
1559 md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
1560 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
1561 /* Don't moan; this is expected */
1563 "%s: Dropping ACK from %s to %s MD "LPX64"."LPX64"\n",
1564 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1565 (md == NULL) ? "invalid" : "inactive",
1566 hdr->msg.ack.dst_wmd.wh_interface_cookie,
1567 hdr->msg.ack.dst_wmd.wh_object_cookie);
1568 if (md != NULL && md->md_me != NULL)
1569 CERROR("Source MD also attached to portal %d\n",
1570 md->md_me->me_portal);
1573 return ENOENT; /* +ve! */
1576 CDEBUG(D_NET, "%s: ACK from %s into md "LPX64"\n",
1577 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1578 hdr->msg.ack.dst_wmd.wh_object_cookie);
1580 lnet_msg_attach_md(msg, md, 0, 0);
1584 lnet_build_msg_event(msg, LNET_EVENT_ACK);
1586 lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
1591 lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
1596 if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
1597 lnet_msg2bufpool(msg)->rbp_credits <= 0) {
1598 if (ni->ni_lnd->lnd_eager_recv == NULL) {
1599 msg->msg_rx_ready_delay = 1;
1602 rc = lnet_ni_eager_recv(ni, msg);
1608 rc = lnet_post_routed_recv_locked(msg, 0);
1616 lnet_msgtyp2str (int type)
1625 case LNET_MSG_REPLY:
1627 case LNET_MSG_HELLO:
1630 return ("<UNKNOWN>");
1635 lnet_print_hdr(lnet_hdr_t * hdr)
1637 lnet_process_id_t src = {0};
1638 lnet_process_id_t dst = {0};
1639 char *type_str = lnet_msgtyp2str (hdr->type);
1641 src.nid = hdr->src_nid;
1642 src.pid = hdr->src_pid;
1644 dst.nid = hdr->dest_nid;
1645 dst.pid = hdr->dest_pid;
1647 CWARN("P3 Header at %p of type %s\n", hdr, type_str);
1648 CWARN(" From %s\n", libcfs_id2str(src));
1649 CWARN(" To %s\n", libcfs_id2str(dst));
1651 switch (hdr->type) {
1656 CWARN(" Ptl index %d, ack md "LPX64"."LPX64", "
1657 "match bits "LPU64"\n",
1658 hdr->msg.put.ptl_index,
1659 hdr->msg.put.ack_wmd.wh_interface_cookie,
1660 hdr->msg.put.ack_wmd.wh_object_cookie,
1661 hdr->msg.put.match_bits);
1662 CWARN(" Length %d, offset %d, hdr data "LPX64"\n",
1663 hdr->payload_length, hdr->msg.put.offset,
1664 hdr->msg.put.hdr_data);
1668 CWARN(" Ptl index %d, return md "LPX64"."LPX64", "
1669 "match bits "LPU64"\n", hdr->msg.get.ptl_index,
1670 hdr->msg.get.return_wmd.wh_interface_cookie,
1671 hdr->msg.get.return_wmd.wh_object_cookie,
1672 hdr->msg.get.match_bits);
1673 CWARN(" Length %d, src offset %d\n",
1674 hdr->msg.get.sink_length,
1675 hdr->msg.get.src_offset);
1679 CWARN(" dst md "LPX64"."LPX64", "
1680 "manipulated length %d\n",
1681 hdr->msg.ack.dst_wmd.wh_interface_cookie,
1682 hdr->msg.ack.dst_wmd.wh_object_cookie,
1683 hdr->msg.ack.mlength);
1686 case LNET_MSG_REPLY:
1687 CWARN(" dst md "LPX64"."LPX64", "
1689 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1690 hdr->msg.reply.dst_wmd.wh_object_cookie,
1691 hdr->payload_length);
1697 lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
1698 void *private, int rdma_req)
1703 lnet_pid_t dest_pid;
1704 lnet_nid_t dest_nid;
1706 __u32 payload_length;
1709 LASSERT (!cfs_in_interrupt ());
1711 type = le32_to_cpu(hdr->type);
1712 src_nid = le64_to_cpu(hdr->src_nid);
1713 dest_nid = le64_to_cpu(hdr->dest_nid);
1714 dest_pid = le32_to_cpu(hdr->dest_pid);
1715 payload_length = le32_to_cpu(hdr->payload_length);
1717 for_me = (ni->ni_nid == dest_nid);
1722 if (payload_length > 0) {
1723 CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
1724 libcfs_nid2str(from_nid),
1725 libcfs_nid2str(src_nid),
1726 lnet_msgtyp2str(type), payload_length);
1732 case LNET_MSG_REPLY:
1733 if (payload_length > (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
1734 CERROR("%s, src %s: bad %s payload %d "
1735 "(%d max expected)\n",
1736 libcfs_nid2str(from_nid),
1737 libcfs_nid2str(src_nid),
1738 lnet_msgtyp2str(type),
1740 for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
1746 CERROR("%s, src %s: Bad message type 0x%x\n",
1747 libcfs_nid2str(from_nid),
1748 libcfs_nid2str(src_nid), type);
1752 if (the_lnet.ln_routing &&
1753 ni->ni_last_alive != cfs_time_current_sec()) {
1756 /* NB: so far here is the only place to set NI status to "up */
1757 ni->ni_last_alive = cfs_time_current_sec();
1758 if (ni->ni_status != NULL &&
1759 ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
1760 ni->ni_status->ns_status = LNET_NI_STATUS_UP;
1764 /* Regard a bad destination NID as a protocol error. Senders should
1765 * know what they're doing; if they don't they're misconfigured, buggy
1766 * or malicious so we chop them off at the knees :) */
1769 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
1770 /* should have gone direct */
1771 CERROR ("%s, src %s: Bad dest nid %s "
1772 "(should have been sent direct)\n",
1773 libcfs_nid2str(from_nid),
1774 libcfs_nid2str(src_nid),
1775 libcfs_nid2str(dest_nid));
1779 if (lnet_islocalnid(dest_nid)) {
1780 /* dest is another local NI; sender should have used
1781 * this node's NID on its own network */
1782 CERROR ("%s, src %s: Bad dest nid %s "
1783 "(it's my nid but on a different network)\n",
1784 libcfs_nid2str(from_nid),
1785 libcfs_nid2str(src_nid),
1786 libcfs_nid2str(dest_nid));
1790 if (rdma_req && type == LNET_MSG_GET) {
1791 CERROR ("%s, src %s: Bad optimized GET for %s "
1792 "(final destination must be me)\n",
1793 libcfs_nid2str(from_nid),
1794 libcfs_nid2str(src_nid),
1795 libcfs_nid2str(dest_nid));
1799 if (!the_lnet.ln_routing) {
1800 CERROR ("%s, src %s: Dropping message for %s "
1801 "(routing not enabled)\n",
1802 libcfs_nid2str(from_nid),
1803 libcfs_nid2str(src_nid),
1804 libcfs_nid2str(dest_nid));
1809 /* Message looks OK; we're not going to return an error, so we MUST
1810 * call back lnd_recv() come what may... */
1812 if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
1813 fail_peer (src_nid, 0)) /* shall we now? */
1815 CERROR("%s, src %s: Dropping %s to simulate failure\n",
1816 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1817 lnet_msgtyp2str(type));
1821 msg = lnet_msg_alloc();
1823 CERROR("%s, src %s: Dropping %s (out of memory)\n",
1824 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1825 lnet_msgtyp2str(type));
1829 /* msg zeroed in lnet_msg_alloc; i.e. flags all clear, pointers NULL etc */
1831 msg->msg_type = type;
1832 msg->msg_private = private;
1833 msg->msg_receiving = 1;
1834 msg->msg_len = msg->msg_wanted = payload_length;
1835 msg->msg_offset = 0;
1836 msg->msg_hdr = *hdr;
1837 /* for building message event */
1838 msg->msg_from = from_nid;
1840 msg->msg_target.pid = dest_pid;
1841 msg->msg_target.nid = dest_nid;
1842 msg->msg_routing = 1;
1845 /* convert common msg->hdr fields to host byteorder */
1846 msg->msg_hdr.type = type;
1847 msg->msg_hdr.src_nid = src_nid;
1848 msg->msg_hdr.src_pid = le32_to_cpu(msg->msg_hdr.src_pid);
1849 msg->msg_hdr.dest_nid = dest_nid;
1850 msg->msg_hdr.dest_pid = dest_pid;
1851 msg->msg_hdr.payload_length = payload_length;
1855 rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid);
1858 CERROR("%s, src %s: Dropping %s "
1859 "(error %d looking up sender)\n",
1860 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1861 lnet_msgtyp2str(type), rc);
1866 lnet_msg_commit(msg, 0);
1869 rc = lnet_parse_forward_locked(ni, msg);
1875 lnet_ni_recv(ni, msg->msg_private, msg, 0,
1876 0, payload_length, payload_length);
1885 rc = lnet_parse_ack(ni, msg);
1888 rc = lnet_parse_put(ni, msg);
1891 rc = lnet_parse_get(ni, msg, rdma_req);
1893 case LNET_MSG_REPLY:
1894 rc = lnet_parse_reply(ni, msg);
1899 goto free_drop; /* prevent an unused label if !kernel */
1905 LASSERT (rc == ENOENT);
1908 LASSERT(msg->msg_md == NULL);
1909 lnet_finalize(ni, msg, rc);
1912 lnet_drop_message(ni, private, payload_length);
1917 lnet_drop_delayed_msg_list(cfs_list_t *head, char *reason)
1919 while (!cfs_list_empty(head)) {
1920 lnet_process_id_t id = {0};
1923 msg = cfs_list_entry(head->next, lnet_msg_t, msg_list);
1924 cfs_list_del(&msg->msg_list);
1926 id.nid = msg->msg_hdr.src_nid;
1927 id.pid = msg->msg_hdr.src_pid;
1929 LASSERT(msg->msg_md == NULL);
1930 LASSERT(msg->msg_rx_delayed);
1931 LASSERT(msg->msg_rxpeer != NULL);
1932 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
1934 CWARN("Dropping delayed PUT from %s portal %d match "LPU64
1935 " offset %d length %d: %s\n",
1937 msg->msg_hdr.msg.put.ptl_index,
1938 msg->msg_hdr.msg.put.match_bits,
1939 msg->msg_hdr.msg.put.offset,
1940 msg->msg_hdr.payload_length, reason);
1942 /* NB I can't drop msg's ref on msg_rxpeer until after I've
1943 * called lnet_drop_message(), so I just hang onto msg as well
1944 * until that's done */
1946 lnet_drop_message(msg->msg_rxpeer->lp_ni,
1947 msg->msg_private, msg->msg_len);
1950 lnet_peer_decref_locked(msg->msg_rxpeer);
1958 lnet_recv_delayed_msg_list(cfs_list_t *head)
1960 while (!cfs_list_empty(head)) {
1962 lnet_process_id_t id;
1964 msg = cfs_list_entry(head->next, lnet_msg_t, msg_list);
1965 cfs_list_del(&msg->msg_list);
1967 /* md won't disappear under me, since each msg
1968 * holds a ref on it */
1970 id.nid = msg->msg_hdr.src_nid;
1971 id.pid = msg->msg_hdr.src_pid;
1973 LASSERT(msg->msg_rx_delayed);
1974 LASSERT(msg->msg_md != NULL);
1975 LASSERT(msg->msg_rxpeer != NULL);
1976 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
1978 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
1979 "match "LPU64" offset %d length %d.\n",
1980 libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
1981 msg->msg_hdr.msg.put.match_bits,
1982 msg->msg_hdr.msg.put.offset,
1983 msg->msg_hdr.payload_length);
1985 lnet_recv_put(msg->msg_rxpeer->lp_ni, msg);
1990 * Initiate an asynchronous PUT operation.
1992 * There are several events associated with a PUT: completion of the send on
1993 * the initiator node (LNET_EVENT_SEND), and when the send completes
1994 * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
1995 * that the operation was accepted by the target. The event LNET_EVENT_PUT is
1996 * used at the target node to indicate the completion of incoming data
1999 * The local events will be logged in the EQ associated with the MD pointed to
2000 * by \a mdh handle. Using a MD without an associated EQ results in these
2001 * events being discarded. In this case, the caller must have another
2002 * mechanism (e.g., a higher level protocol) for determining when it is safe
2003 * to modify the memory region associated with the MD.
2005 * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
2006 * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
2008 * \param self Indicates the NID of a local interface through which to send
2009 * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
2010 * \param mdh A handle for the MD that describes the memory to be sent. The MD
2011 * must be "free floating" (See LNetMDBind()).
2012 * \param ack Controls whether an acknowledgment is requested.
2013 * Acknowledgments are only sent when they are requested by the initiating
2014 * process and the target MD enables them.
2015 * \param target A process identifier for the target process.
2016 * \param portal The index in the \a target's portal table.
2017 * \param match_bits The match bits to use for MD selection at the target
2019 * \param offset The offset into the target MD (only used when the target
2020 * MD has the LNET_MD_MANAGE_REMOTE option set).
2021 * \param hdr_data 64 bits of user data that can be included in the message
2022 * header. This data is written to an event queue entry at the target if an
2023 * EQ is present on the matching MD.
2025 * \retval 0 Success, and only in this case events will be generated
2026 * and logged to EQ (if it exists).
2027 * \retval -EIO Simulated failure.
2028 * \retval -ENOMEM Memory allocation failure.
2029 * \retval -ENOENT Invalid MD object.
2031 * \see lnet_event_t::hdr_data and lnet_event_kind_t.
2034 LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
2035 lnet_process_id_t target, unsigned int portal,
2036 __u64 match_bits, unsigned int offset,
2043 LASSERT (the_lnet.ln_init);
2044 LASSERT (the_lnet.ln_refcount > 0);
2046 if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
2047 fail_peer (target.nid, 1)) /* shall we now? */
2049 CERROR("Dropping PUT to %s: simulated failure\n",
2050 libcfs_id2str(target));
2054 msg = lnet_msg_alloc();
2056 CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
2057 libcfs_id2str(target));
2060 msg->msg_vmflush = !!cfs_memory_pressure_get();
2064 md = lnet_handle2md(&mdh);
2065 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2066 CERROR("Dropping PUT ("LPU64":%d:%s): MD (%d) invalid\n",
2067 match_bits, portal, libcfs_id2str(target),
2068 md == NULL ? -1 : md->md_threshold);
2069 if (md != NULL && md->md_me != NULL)
2070 CERROR("Source MD also attached to portal %d\n",
2071 md->md_me->me_portal);
2080 CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
2082 lnet_msg_attach_md(msg, md, 0, 0);
2084 lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
2086 msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
2087 msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
2088 msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
2089 msg->msg_hdr.msg.put.hdr_data = hdr_data;
2091 /* NB handles only looked up by creator (no flips) */
2092 if (ack == LNET_ACK_REQ) {
2093 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2094 the_lnet.ln_interface_cookie;
2095 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2096 md->md_lh.lh_cookie;
2098 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2099 LNET_WIRE_HANDLE_COOKIE_NONE;
2100 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2101 LNET_WIRE_HANDLE_COOKIE_NONE;
2106 lnet_build_msg_event(msg, LNET_EVENT_SEND);
2108 rc = lnet_send(self, msg);
2110 CNETERR( "Error sending PUT to %s: %d\n",
2111 libcfs_id2str(target), rc);
2112 lnet_finalize (NULL, msg, rc);
2115 /* completion will be signalled by an event */
2120 lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *getmsg)
2122 /* The LND can DMA direct to the GET md (i.e. no REPLY msg). This
2123 * returns a msg for the LND to pass to lnet_finalize() when the sink
2124 * data has been received.
2126 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
2127 * lnet_finalize() is called on it, so the LND must call this first */
2129 lnet_msg_t *msg = lnet_msg_alloc();
2130 lnet_libmd_t *getmd = getmsg->msg_md;
2131 lnet_process_id_t peer_id = getmsg->msg_target;
2133 LASSERT (!getmsg->msg_target_is_router);
2134 LASSERT (!getmsg->msg_routing);
2138 LASSERT (getmd->md_refcount > 0);
2141 CERROR ("%s: Dropping REPLY from %s: can't allocate msg\n",
2142 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
2146 if (getmd->md_threshold == 0) {
2147 CERROR ("%s: Dropping REPLY from %s for inactive MD %p\n",
2148 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
2154 LASSERT (getmd->md_offset == 0);
2156 CDEBUG(D_NET, "%s: Reply from %s md %p\n",
2157 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
2159 /* setup information for lnet_build_msg_event */
2160 msg->msg_from = peer_id.nid;
2161 msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
2162 msg->msg_hdr.src_nid = peer_id.nid;
2163 msg->msg_hdr.payload_length = getmd->md_length;
2164 msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
2166 lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
2170 lnet_msg_commit(msg, 0);
2173 lnet_build_msg_event(msg, LNET_EVENT_REPLY);
2179 the_lnet.ln_counters.drop_count++;
2180 the_lnet.ln_counters.drop_length += getmd->md_length;
2190 lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
2192 /* Set the REPLY length, now the RDMA that elides the REPLY message has
2193 * completed and I know it. */
2194 LASSERT (reply != NULL);
2195 LASSERT (reply->msg_type == LNET_MSG_GET);
2196 LASSERT (reply->msg_ev.type == LNET_EVENT_REPLY);
2198 /* NB I trusted my peer to RDMA. If she tells me she's written beyond
2199 * the end of my buffer, I might as well be dead. */
2200 LASSERT (len <= reply->msg_ev.mlength);
2202 reply->msg_ev.mlength = len;
2206 * Initiate an asynchronous GET operation.
2208 * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
2209 * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
2210 * the target node in the REPLY has been written to local MD.
2212 * On the target node, an LNET_EVENT_GET is logged when the GET request
2213 * arrives and is accepted into a MD.
2215 * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
2216 * \param mdh A handle for the MD that describes the memory into which the
2217 * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
2219 * \retval 0 Success, and only in this case events will be generated
2220 * and logged to EQ (if it exists) of the MD.
2221 * \retval -EIO Simulated failure.
2222 * \retval -ENOMEM Memory allocation failure.
2223 * \retval -ENOENT Invalid MD object.
2226 LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
2227 lnet_process_id_t target, unsigned int portal,
2228 __u64 match_bits, unsigned int offset)
2234 LASSERT (the_lnet.ln_init);
2235 LASSERT (the_lnet.ln_refcount > 0);
2237 if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
2238 fail_peer (target.nid, 1)) /* shall we now? */
2240 CERROR("Dropping GET to %s: simulated failure\n",
2241 libcfs_id2str(target));
2245 msg = lnet_msg_alloc();
2247 CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
2248 libcfs_id2str(target));
2254 md = lnet_handle2md(&mdh);
2255 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2256 CERROR("Dropping GET ("LPU64":%d:%s): MD (%d) invalid\n",
2257 match_bits, portal, libcfs_id2str(target),
2258 md == NULL ? -1 : md->md_threshold);
2259 if (md != NULL && md->md_me != NULL)
2260 CERROR("REPLY MD also attached to portal %d\n",
2261 md->md_me->me_portal);
2270 CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
2272 lnet_msg_attach_md(msg, md, 0, 0);
2274 lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
2276 msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
2277 msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
2278 msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
2279 msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
2281 /* NB handles only looked up by creator (no flips) */
2282 msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
2283 the_lnet.ln_interface_cookie;
2284 msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
2285 md->md_lh.lh_cookie;
2289 lnet_build_msg_event(msg, LNET_EVENT_SEND);
2291 rc = lnet_send(self, msg);
2293 CNETERR( "Error sending GET to %s: %d\n",
2294 libcfs_id2str(target), rc);
2295 lnet_finalize (NULL, msg, rc);
2298 /* completion will be signalled by an event */
2303 * Calculate distance to node at \a dstnid.
2305 * \param dstnid Target NID.
2306 * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
2308 * \param orderp If not NULL, order of the route to reach \a dstnid is saved
2311 * \retval 0 If \a dstnid belongs to a local interface, and reserved option
2312 * local_nid_dist_zero is set, which is the default.
2313 * \retval positives Distance to target NID, i.e. number of hops plus one.
2314 * \retval -EHOSTUNREACH If \a dstnid is not reachable.
2317 LNetDist (lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
2321 lnet_remotenet_t *rnet;
2322 __u32 dstnet = LNET_NIDNET(dstnid);
2326 /* if !local_nid_dist_zero, I don't return a distance of 0 ever
2327 * (when lustre sees a distance of 0, it substitutes 0@lo), so I
2328 * keep order 0 free for 0@lo and order 1 free for a local NID
2331 LASSERT (the_lnet.ln_init);
2332 LASSERT (the_lnet.ln_refcount > 0);
2336 cfs_list_for_each (e, &the_lnet.ln_nis) {
2337 ni = cfs_list_entry(e, lnet_ni_t, ni_list);
2339 if (ni->ni_nid == dstnid) {
2340 if (srcnidp != NULL)
2342 if (orderp != NULL) {
2343 if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
2350 return local_nid_dist_zero ? 0 : 1;
2353 if (LNET_NIDNET(ni->ni_nid) == dstnet) {
2354 if (srcnidp != NULL)
2355 *srcnidp = ni->ni_nid;
2365 cfs_list_for_each (e, &the_lnet.ln_remote_nets) {
2366 rnet = cfs_list_entry(e, lnet_remotenet_t, lrn_list);
2368 if (rnet->lrn_net == dstnet) {
2369 lnet_route_t *route;
2370 lnet_route_t *shortest = NULL;
2372 LASSERT (!cfs_list_empty(&rnet->lrn_routes));
2374 cfs_list_for_each_entry(route, &rnet->lrn_routes,
2376 if (shortest == NULL ||
2377 route->lr_hops < shortest->lr_hops)
2381 LASSERT (shortest != NULL);
2382 hops = shortest->lr_hops;
2383 if (srcnidp != NULL)
2384 *srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
2394 return -EHOSTUNREACH;
2398 * Set the number of asynchronous messages expected from a target process.
2400 * This function is only meaningful for userspace callers. It's a no-op when
2401 * called from kernel.
2403 * Asynchronous messages are those that can come from a target when the
2404 * userspace process is not waiting for IO to complete; e.g., AST callbacks
2405 * from Lustre servers. Specifying the expected number of such messages
2406 * allows them to be eagerly received when user process is not running in
2407 * LNet; otherwise network errors may occur.
2409 * \param id Process ID of the target process.
2410 * \param nasync Number of asynchronous messages expected from the target.
2412 * \return 0 on success, and an error code otherwise.
2415 LNetSetAsync(lnet_process_id_t id, int nasync)
2421 lnet_remotenet_t *rnet;
2423 lnet_route_t *route;
2430 /* Target on a local network? */
2431 ni = lnet_net2ni(LNET_NIDNET(id.nid));
2433 if (ni->ni_lnd->lnd_setasync != NULL)
2434 rc = (ni->ni_lnd->lnd_setasync)(ni, id, nasync);
2439 /* Target on a remote network: apply to routers */
2441 LIBCFS_ALLOC(nids, maxnids * sizeof(*nids));
2446 /* Snapshot all the router NIDs */
2448 rnet = lnet_find_net_locked(LNET_NIDNET(id.nid));
2450 cfs_list_for_each(tmp, &rnet->lrn_routes) {
2451 if (nnids == maxnids) {
2453 LIBCFS_FREE(nids, maxnids * sizeof(*nids));
2458 route = cfs_list_entry(tmp, lnet_route_t, lr_list);
2459 nids[nnids++] = route->lr_gateway->lp_nid;
2464 /* set async on all the routers */
2465 while (nnids-- > 0) {
2466 id.pid = LUSTRE_SRV_LNET_PID;
2467 id.nid = nids[nnids];
2469 ni = lnet_net2ni(LNET_NIDNET(id.nid));
2473 if (ni->ni_lnd->lnd_setasync != NULL) {
2474 rc2 = (ni->ni_lnd->lnd_setasync)(ni, id, nasync);
2481 LIBCFS_FREE(nids, maxnids * sizeof(*nids));