4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/lnet/lib-move.c
38 * Data movement routines
41 #define DEBUG_SUBSYSTEM S_LNET
43 #include <lnet/lib-lnet.h>
45 static int local_nid_dist_zero = 1;
46 CFS_MODULE_PARM(local_nid_dist_zero, "i", int, 0444,
50 lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
54 struct list_head *next;
55 struct list_head cull;
57 LASSERT(the_lnet.ln_init);
59 /* NB: use lnet_net_lock(0) to serialize operations on test peers */
61 /* Adding a new entry */
62 LIBCFS_ALLOC(tp, sizeof(*tp));
67 tp->tp_threshold = threshold;
70 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
75 /* removing entries */
76 INIT_LIST_HEAD(&cull);
80 list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
81 tp = list_entry(el, lnet_test_peer_t, tp_list);
83 if (tp->tp_threshold == 0 || /* needs culling anyway */
84 nid == LNET_NID_ANY || /* removing all entries */
85 tp->tp_nid == nid) { /* matched this one */
86 list_del(&tp->tp_list);
87 list_add(&tp->tp_list, &cull);
93 while (!list_empty(&cull)) {
94 tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
96 list_del(&tp->tp_list);
97 LIBCFS_FREE(tp, sizeof(*tp));
103 fail_peer (lnet_nid_t nid, int outgoing)
105 lnet_test_peer_t *tp;
106 struct list_head *el;
107 struct list_head *next;
108 struct list_head cull;
111 INIT_LIST_HEAD(&cull);
113 /* NB: use lnet_net_lock(0) to serialize operations on test peers */
116 list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
117 tp = list_entry(el, lnet_test_peer_t, tp_list);
119 if (tp->tp_threshold == 0) {
122 /* only cull zombies on outgoing tests,
123 * since we may be at interrupt priority on
124 * incoming messages. */
125 list_del(&tp->tp_list);
126 list_add(&tp->tp_list, &cull);
131 if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
132 nid == tp->tp_nid) { /* fail this peer */
135 if (tp->tp_threshold != LNET_MD_THRESH_INF) {
138 tp->tp_threshold == 0) {
140 list_del(&tp->tp_list);
141 list_add(&tp->tp_list, &cull);
150 while (!list_empty(&cull)) {
151 tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
152 list_del(&tp->tp_list);
154 LIBCFS_FREE(tp, sizeof(*tp));
161 lnet_iov_nob (unsigned int niov, struct iovec *iov)
163 unsigned int nob = 0;
165 LASSERT(niov == 0 || iov != NULL);
167 nob += (iov++)->iov_len;
171 EXPORT_SYMBOL(lnet_iov_nob);
174 lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov, unsigned int doffset,
175 unsigned int nsiov, struct iovec *siov, unsigned int soffset,
178 /* NB diov, siov are READ-ONLY */
179 unsigned int this_nob;
184 /* skip complete frags before 'doffset' */
186 while (doffset >= diov->iov_len) {
187 doffset -= diov->iov_len;
193 /* skip complete frags before 'soffset' */
195 while (soffset >= siov->iov_len) {
196 soffset -= siov->iov_len;
205 this_nob = MIN(diov->iov_len - doffset,
206 siov->iov_len - soffset);
207 this_nob = MIN(this_nob, nob);
209 memcpy ((char *)diov->iov_base + doffset,
210 (char *)siov->iov_base + soffset, this_nob);
213 if (diov->iov_len > doffset + this_nob) {
221 if (siov->iov_len > soffset + this_nob) {
230 EXPORT_SYMBOL(lnet_copy_iov2iov);
233 lnet_extract_iov (int dst_niov, struct iovec *dst,
234 int src_niov, struct iovec *src,
235 unsigned int offset, unsigned int len)
237 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
238 * for exactly 'len' bytes, and return the number of entries.
239 * NB not destructive to 'src' */
240 unsigned int frag_len;
243 if (len == 0) /* no data => */
244 return (0); /* no frags */
246 LASSERT (src_niov > 0);
247 while (offset >= src->iov_len) { /* skip initial frags */
248 offset -= src->iov_len;
251 LASSERT (src_niov > 0);
256 LASSERT (src_niov > 0);
257 LASSERT ((int)niov <= dst_niov);
259 frag_len = src->iov_len - offset;
260 dst->iov_base = ((char *)src->iov_base) + offset;
262 if (len <= frag_len) {
267 dst->iov_len = frag_len;
277 EXPORT_SYMBOL(lnet_extract_iov);
281 lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
288 lnet_copy_kiov2kiov (unsigned int ndkiov, lnet_kiov_t *dkiov, unsigned int doffset,
289 unsigned int nskiov, lnet_kiov_t *skiov, unsigned int soffset,
296 lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset,
297 unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
304 lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
305 unsigned int niov, struct iovec *iov, unsigned int iovoffset,
312 lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
313 int src_niov, lnet_kiov_t *src,
314 unsigned int offset, unsigned int len)
319 #else /* __KERNEL__ */
322 lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
324 unsigned int nob = 0;
326 LASSERT(niov == 0 || kiov != NULL);
328 nob += (kiov++)->kiov_len;
332 EXPORT_SYMBOL(lnet_kiov_nob);
335 lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
336 unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
339 /* NB diov, siov are READ-ONLY */
340 unsigned int this_nob;
347 LASSERT (!in_interrupt ());
350 while (doffset >= diov->kiov_len) {
351 doffset -= diov->kiov_len;
358 while (soffset >= siov->kiov_len) {
359 soffset -= siov->kiov_len;
368 this_nob = MIN(diov->kiov_len - doffset,
369 siov->kiov_len - soffset);
370 this_nob = MIN(this_nob, nob);
373 daddr = ((char *)kmap(diov->kiov_page)) +
374 diov->kiov_offset + doffset;
376 saddr = ((char *)kmap(siov->kiov_page)) +
377 siov->kiov_offset + soffset;
379 /* Vanishing risk of kmap deadlock when mapping 2 pages.
380 * However in practice at least one of the kiovs will be mapped
381 * kernel pages and the map/unmap will be NOOPs */
383 memcpy (daddr, saddr, this_nob);
386 if (diov->kiov_len > doffset + this_nob) {
390 kunmap(diov->kiov_page);
397 if (siov->kiov_len > soffset + this_nob) {
401 kunmap(siov->kiov_page);
410 kunmap(diov->kiov_page);
412 kunmap(siov->kiov_page);
414 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
417 lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset,
418 unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
421 /* NB iov, kiov are READ-ONLY */
422 unsigned int this_nob;
428 LASSERT (!in_interrupt ());
431 while (iovoffset >= iov->iov_len) {
432 iovoffset -= iov->iov_len;
439 while (kiovoffset >= kiov->kiov_len) {
440 kiovoffset -= kiov->kiov_len;
449 this_nob = MIN(iov->iov_len - iovoffset,
450 kiov->kiov_len - kiovoffset);
451 this_nob = MIN(this_nob, nob);
454 addr = ((char *)kmap(kiov->kiov_page)) +
455 kiov->kiov_offset + kiovoffset;
457 memcpy ((char *)iov->iov_base + iovoffset, addr, this_nob);
460 if (iov->iov_len > iovoffset + this_nob) {
461 iovoffset += this_nob;
468 if (kiov->kiov_len > kiovoffset + this_nob) {
470 kiovoffset += this_nob;
472 kunmap(kiov->kiov_page);
482 kunmap(kiov->kiov_page);
484 EXPORT_SYMBOL(lnet_copy_kiov2iov);
487 lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
488 unsigned int niov, struct iovec *iov, unsigned int iovoffset,
491 /* NB kiov, iov are READ-ONLY */
492 unsigned int this_nob;
498 LASSERT (!in_interrupt ());
501 while (kiovoffset >= kiov->kiov_len) {
502 kiovoffset -= kiov->kiov_len;
509 while (iovoffset >= iov->iov_len) {
510 iovoffset -= iov->iov_len;
519 this_nob = MIN(kiov->kiov_len - kiovoffset,
520 iov->iov_len - iovoffset);
521 this_nob = MIN(this_nob, nob);
524 addr = ((char *)kmap(kiov->kiov_page)) +
525 kiov->kiov_offset + kiovoffset;
527 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
530 if (kiov->kiov_len > kiovoffset + this_nob) {
532 kiovoffset += this_nob;
534 kunmap(kiov->kiov_page);
541 if (iov->iov_len > iovoffset + this_nob) {
542 iovoffset += this_nob;
551 kunmap(kiov->kiov_page);
553 EXPORT_SYMBOL(lnet_copy_iov2kiov);
556 lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
557 int src_niov, lnet_kiov_t *src,
558 unsigned int offset, unsigned int len)
560 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
561 * for exactly 'len' bytes, and return the number of entries.
562 * NB not destructive to 'src' */
563 unsigned int frag_len;
566 if (len == 0) /* no data => */
567 return (0); /* no frags */
569 LASSERT (src_niov > 0);
570 while (offset >= src->kiov_len) { /* skip initial frags */
571 offset -= src->kiov_len;
574 LASSERT (src_niov > 0);
579 LASSERT (src_niov > 0);
580 LASSERT ((int)niov <= dst_niov);
582 frag_len = src->kiov_len - offset;
583 dst->kiov_page = src->kiov_page;
584 dst->kiov_offset = src->kiov_offset + offset;
586 if (len <= frag_len) {
588 LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
592 dst->kiov_len = frag_len;
593 LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
603 EXPORT_SYMBOL(lnet_extract_kiov);
607 lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
608 unsigned int offset, unsigned int mlen, unsigned int rlen)
610 unsigned int niov = 0;
611 struct iovec *iov = NULL;
612 lnet_kiov_t *kiov = NULL;
615 LASSERT (!in_interrupt ());
616 LASSERT (mlen == 0 || msg != NULL);
619 LASSERT(msg->msg_receiving);
620 LASSERT(!msg->msg_sending);
621 LASSERT(rlen == msg->msg_len);
622 LASSERT(mlen <= msg->msg_len);
623 LASSERT(msg->msg_offset == offset);
624 LASSERT(msg->msg_wanted == mlen);
626 msg->msg_receiving = 0;
629 niov = msg->msg_niov;
631 kiov = msg->msg_kiov;
634 LASSERT ((iov == NULL) != (kiov == NULL));
638 rc = (ni->ni_lnd->lnd_recv)(ni, private, msg, delayed,
639 niov, iov, kiov, offset, mlen, rlen);
641 lnet_finalize(ni, msg, rc);
645 lnet_setpayloadbuffer(lnet_msg_t *msg)
647 lnet_libmd_t *md = msg->msg_md;
649 LASSERT (msg->msg_len > 0);
650 LASSERT (!msg->msg_routing);
651 LASSERT (md != NULL);
652 LASSERT (msg->msg_niov == 0);
653 LASSERT (msg->msg_iov == NULL);
654 LASSERT (msg->msg_kiov == NULL);
656 msg->msg_niov = md->md_niov;
657 if ((md->md_options & LNET_MD_KIOV) != 0)
658 msg->msg_kiov = md->md_iov.kiov;
660 msg->msg_iov = md->md_iov.iov;
664 lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
665 unsigned int offset, unsigned int len)
667 msg->msg_type = type;
668 msg->msg_target = target;
670 msg->msg_offset = offset;
673 lnet_setpayloadbuffer(msg);
675 memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
676 msg->msg_hdr.type = cpu_to_le32(type);
677 msg->msg_hdr.dest_nid = cpu_to_le64(target.nid);
678 msg->msg_hdr.dest_pid = cpu_to_le32(target.pid);
679 /* src_nid will be set later */
680 msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid);
681 msg->msg_hdr.payload_length = cpu_to_le32(len);
685 lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
687 void *priv = msg->msg_private;
690 LASSERT (!in_interrupt ());
691 LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
692 (msg->msg_txcredit && msg->msg_peertxcredit));
694 rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
696 lnet_finalize(ni, msg, rc);
700 lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
704 LASSERT(!msg->msg_sending);
705 LASSERT(msg->msg_receiving);
706 LASSERT(!msg->msg_rx_ready_delay);
707 LASSERT(ni->ni_lnd->lnd_eager_recv != NULL);
709 msg->msg_rx_ready_delay = 1;
710 rc = (ni->ni_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
713 CERROR("recv from %s / send to %s aborted: "
714 "eager_recv failed %d\n",
715 libcfs_nid2str(msg->msg_rxpeer->lp_nid),
716 libcfs_id2str(msg->msg_target), rc);
717 LASSERT(rc < 0); /* required by my callers */
723 /* NB: caller shall hold a ref on 'lp' as I'd drop lnet_net_lock */
725 lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
727 cfs_time_t last_alive = 0;
729 LASSERT(lnet_peer_aliveness_enabled(lp));
730 LASSERT(ni->ni_lnd->lnd_query != NULL);
732 lnet_net_unlock(lp->lp_cpt);
733 (ni->ni_lnd->lnd_query)(ni, lp->lp_nid, &last_alive);
734 lnet_net_lock(lp->lp_cpt);
736 lp->lp_last_query = cfs_time_current();
738 if (last_alive != 0) /* NI has updated timestamp */
739 lp->lp_last_alive = last_alive;
742 /* NB: always called with lnet_net_lock held */
744 lnet_peer_is_alive (lnet_peer_t *lp, cfs_time_t now)
749 /* Trust lnet_notify() if it has more recent aliveness news, but
750 * ignore the initial assumed death (see lnet_peers_start_down()).
752 if (!lp->lp_alive && lp->lp_alive_count > 0 &&
753 cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive))
756 deadline = cfs_time_add(lp->lp_last_alive,
757 cfs_time_seconds(lp->lp_ni->ni_peertimeout));
758 alive = cfs_time_after(deadline, now);
760 /* Update obsolete lp_alive except for routers assumed to be dead
761 * initially, because router checker would update aliveness in this
762 * case, and moreover lp_last_alive at peer creation is assumed.
764 if (alive && !lp->lp_alive &&
765 !(lnet_isrouter(lp) && lp->lp_alive_count == 0))
766 lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
772 /* NB: returns 1 when alive, 0 when dead, negative when error;
773 * may drop the lnet_net_lock */
775 lnet_peer_alive_locked (lnet_peer_t *lp)
777 cfs_time_t now = cfs_time_current();
780 if (!lnet_peer_aliveness_enabled(lp))
783 if (lp->lp_last_query == 0) {
786 /* Peer appears dead, but we should avoid frequent NI queries
787 * (at most once per ni_query_interval seconds). */
788 static const int ni_query_interval = 1;
789 cfs_time_t next_query;
791 next_query = cfs_time_add(lp->lp_last_query,
792 cfs_time_seconds(ni_query_interval));
793 query = cfs_time_aftereq(now, next_query);
796 /* query NI for latest aliveness news */
798 lnet_ni_query_locked(lp->lp_ni, lp);
800 if (lnet_peer_is_alive(lp, now))
803 lnet_notify_locked(lp, 0, 0, lp->lp_last_alive);
808 * \param msg The message to be sent.
809 * \param do_send True if lnet_ni_send() should be called in this function.
810 * lnet_send() is going to lnet_net_unlock immediately after this, so
811 * it sets do_send FALSE and I don't do the unlock/send/lock bit.
813 * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
814 * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
815 * \retval -EHOSTUNREACH If the next hop of the message appears dead.
816 * \retval -ECANCELED If the MD of the message has been unlinked.
819 lnet_post_send_locked(lnet_msg_t *msg, int do_send)
821 lnet_peer_t *lp = msg->msg_txpeer;
822 lnet_ni_t *ni = lp->lp_ni;
823 int cpt = msg->msg_tx_cpt;
824 struct lnet_tx_queue *tq = ni->ni_tx_queues[cpt];
826 /* non-lnet_send() callers have checked before */
827 LASSERT(!do_send || msg->msg_tx_delayed);
828 LASSERT(!msg->msg_receiving);
829 LASSERT(msg->msg_tx_committed);
831 /* NB 'lp' is always the next hop */
832 if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
833 lnet_peer_alive_locked(lp) == 0 &&
834 !lnet_msg_is_rc_ping(msg)) { /* send RC ping even for dead router */
835 the_lnet.ln_counters[cpt]->drop_count++;
836 the_lnet.ln_counters[cpt]->drop_length += msg->msg_len;
837 lnet_net_unlock(cpt);
839 CNETERR("Dropping message for %s: peer not alive\n",
840 libcfs_id2str(msg->msg_target));
842 lnet_finalize(ni, msg, -EHOSTUNREACH);
845 return -EHOSTUNREACH;
848 if (msg->msg_md != NULL &&
849 (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
850 lnet_net_unlock(cpt);
852 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
853 "called on the MD/ME.\n",
854 libcfs_id2str(msg->msg_target));
856 lnet_finalize(ni, msg, -ECANCELED);
862 if (!msg->msg_peertxcredit) {
863 LASSERT((lp->lp_txcredits < 0) ==
864 !list_empty(&lp->lp_txq));
866 msg->msg_peertxcredit = 1;
867 lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
870 if (lp->lp_txcredits < lp->lp_mintxcredits)
871 lp->lp_mintxcredits = lp->lp_txcredits;
873 if (lp->lp_txcredits < 0) {
874 msg->msg_tx_delayed = 1;
875 list_add_tail(&msg->msg_list, &lp->lp_txq);
876 return LNET_CREDIT_WAIT;
880 if (!msg->msg_txcredit) {
881 LASSERT((tq->tq_credits < 0) ==
882 !list_empty(&tq->tq_delayed));
884 msg->msg_txcredit = 1;
887 if (tq->tq_credits < tq->tq_credits_min)
888 tq->tq_credits_min = tq->tq_credits;
890 if (tq->tq_credits < 0) {
891 msg->msg_tx_delayed = 1;
892 list_add_tail(&msg->msg_list, &tq->tq_delayed);
893 return LNET_CREDIT_WAIT;
898 lnet_net_unlock(cpt);
899 lnet_ni_send(ni, msg);
902 return LNET_CREDIT_OK;
908 lnet_msg2bufpool(lnet_msg_t *msg)
910 lnet_rtrbufpool_t *rbp;
913 LASSERT(msg->msg_rx_committed);
915 cpt = msg->msg_rx_cpt;
916 rbp = &the_lnet.ln_rtrpools[cpt][0];
918 LASSERT(msg->msg_len <= LNET_MTU);
919 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
921 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
928 lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
930 /* lnet_parse is going to lnet_net_unlock immediately after this, so it
931 * sets do_recv FALSE and I don't do the unlock/send/lock bit.
932 * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
933 * received or OK to receive */
934 lnet_peer_t *lp = msg->msg_rxpeer;
935 lnet_rtrbufpool_t *rbp;
938 LASSERT (msg->msg_iov == NULL);
939 LASSERT (msg->msg_kiov == NULL);
940 LASSERT (msg->msg_niov == 0);
941 LASSERT (msg->msg_routing);
942 LASSERT (msg->msg_receiving);
943 LASSERT (!msg->msg_sending);
945 /* non-lnet_parse callers only receive delayed messages */
946 LASSERT(!do_recv || msg->msg_rx_delayed);
948 if (!msg->msg_peerrtrcredit) {
949 LASSERT((lp->lp_rtrcredits < 0) ==
950 !list_empty(&lp->lp_rtrq));
952 msg->msg_peerrtrcredit = 1;
954 if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
955 lp->lp_minrtrcredits = lp->lp_rtrcredits;
957 if (lp->lp_rtrcredits < 0) {
958 /* must have checked eager_recv before here */
959 LASSERT(msg->msg_rx_ready_delay);
960 msg->msg_rx_delayed = 1;
961 list_add_tail(&msg->msg_list, &lp->lp_rtrq);
962 return LNET_CREDIT_WAIT;
966 rbp = lnet_msg2bufpool(msg);
968 if (!msg->msg_rtrcredit) {
969 msg->msg_rtrcredit = 1;
971 if (rbp->rbp_credits < rbp->rbp_mincredits)
972 rbp->rbp_mincredits = rbp->rbp_credits;
974 if (rbp->rbp_credits < 0) {
975 /* must have checked eager_recv before here */
976 LASSERT(msg->msg_rx_ready_delay);
977 msg->msg_rx_delayed = 1;
978 list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
979 return LNET_CREDIT_WAIT;
983 LASSERT(!list_empty(&rbp->rbp_bufs));
984 rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
985 list_del(&rb->rb_list);
987 msg->msg_niov = rbp->rbp_npages;
988 msg->msg_kiov = &rb->rb_kiov[0];
991 int cpt = msg->msg_rx_cpt;
993 lnet_net_unlock(cpt);
994 lnet_ni_recv(lp->lp_ni, msg->msg_private, msg, 1,
995 0, msg->msg_len, msg->msg_len);
998 return LNET_CREDIT_OK;
1003 lnet_return_tx_credits_locked(lnet_msg_t *msg)
1005 lnet_peer_t *txpeer = msg->msg_txpeer;
1008 if (msg->msg_txcredit) {
1009 struct lnet_ni *ni = txpeer->lp_ni;
1010 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
1012 /* give back NI txcredits */
1013 msg->msg_txcredit = 0;
1015 LASSERT((tq->tq_credits < 0) ==
1016 !list_empty(&tq->tq_delayed));
1019 if (tq->tq_credits <= 0) {
1020 msg2 = list_entry(tq->tq_delayed.next,
1021 lnet_msg_t, msg_list);
1022 list_del(&msg2->msg_list);
1024 LASSERT(msg2->msg_txpeer->lp_ni == ni);
1025 LASSERT(msg2->msg_tx_delayed);
1027 (void) lnet_post_send_locked(msg2, 1);
1031 if (msg->msg_peertxcredit) {
1032 /* give back peer txcredits */
1033 msg->msg_peertxcredit = 0;
1035 LASSERT((txpeer->lp_txcredits < 0) ==
1036 !list_empty(&txpeer->lp_txq));
1038 txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
1039 LASSERT (txpeer->lp_txqnob >= 0);
1041 txpeer->lp_txcredits++;
1042 if (txpeer->lp_txcredits <= 0) {
1043 msg2 = list_entry(txpeer->lp_txq.next,
1044 lnet_msg_t, msg_list);
1045 list_del(&msg2->msg_list);
1047 LASSERT(msg2->msg_txpeer == txpeer);
1048 LASSERT(msg2->msg_tx_delayed);
1050 (void) lnet_post_send_locked(msg2, 1);
1054 if (txpeer != NULL) {
1055 msg->msg_txpeer = NULL;
1056 lnet_peer_decref_locked(txpeer);
1062 lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp)
1066 if (list_empty(&rbp->rbp_msgs))
1068 msg = list_entry(rbp->rbp_msgs.next,
1069 lnet_msg_t, msg_list);
1070 list_del(&msg->msg_list);
1072 (void)lnet_post_routed_recv_locked(msg, 1);
1077 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1081 struct list_head drop;
1083 INIT_LIST_HEAD(&drop);
1085 list_splice_init(list, &drop);
1087 lnet_net_unlock(cpt);
1089 list_for_each_entry_safe(msg, tmp, &drop, msg_list) {
1090 lnet_ni_recv(msg->msg_rxpeer->lp_ni, msg->msg_private, NULL,
1091 0, 0, 0, msg->msg_hdr.payload_length);
1092 list_del_init(&msg->msg_list);
1093 lnet_finalize(NULL, msg, -ECANCELED);
1100 lnet_return_rx_credits_locked(lnet_msg_t *msg)
1102 lnet_peer_t *rxpeer = msg->msg_rxpeer;
1106 if (msg->msg_rtrcredit) {
1107 /* give back global router credits */
1109 lnet_rtrbufpool_t *rbp;
1111 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1112 * there until it gets one allocated, or aborts the wait
1114 LASSERT(msg->msg_kiov != NULL);
1116 rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
1119 msg->msg_kiov = NULL;
1120 msg->msg_rtrcredit = 0;
1122 LASSERT(rbp == lnet_msg2bufpool(msg));
1124 LASSERT((rbp->rbp_credits > 0) ==
1125 !list_empty(&rbp->rbp_bufs));
1127 /* If routing is now turned off, we just drop this buffer and
1128 * don't bother trying to return credits. */
1129 if (!the_lnet.ln_routing) {
1130 lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1134 /* It is possible that a user has lowered the desired number of
1135 * buffers in this pool. Make sure we never put back
1136 * more buffers than the stated number. */
1137 if (rbp->rbp_credits >= rbp->rbp_nbuffers) {
1138 /* Discard this buffer so we don't have too many. */
1139 lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1141 list_add(&rb->rb_list, &rbp->rbp_bufs);
1143 if (rbp->rbp_credits <= 0)
1144 lnet_schedule_blocked_locked(rbp);
1149 if (msg->msg_peerrtrcredit) {
1150 /* give back peer router credits */
1151 msg->msg_peerrtrcredit = 0;
1153 LASSERT((rxpeer->lp_rtrcredits < 0) ==
1154 !list_empty(&rxpeer->lp_rtrq));
1156 rxpeer->lp_rtrcredits++;
1158 /* drop all messages which are queued to be routed on that
1160 if (!the_lnet.ln_routing) {
1161 lnet_drop_routed_msgs_locked(&rxpeer->lp_rtrq,
1163 } else if (rxpeer->lp_rtrcredits <= 0) {
1164 msg2 = list_entry(rxpeer->lp_rtrq.next,
1165 lnet_msg_t, msg_list);
1166 list_del(&msg2->msg_list);
1168 (void) lnet_post_routed_recv_locked(msg2, 1);
1172 LASSERT(!msg->msg_rtrcredit);
1173 LASSERT(!msg->msg_peerrtrcredit);
1175 if (rxpeer != NULL) {
1176 msg->msg_rxpeer = NULL;
1177 lnet_peer_decref_locked(rxpeer);
1182 lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
1184 lnet_peer_t *p1 = r1->lr_gateway;
1185 lnet_peer_t *p2 = r2->lr_gateway;
1187 if (p1->lp_ni->ni_peertimeout > 0 &&
1188 p2->lp_ni->ni_peertimeout > 0) {
1189 /* if a router has queued bytes but no aliveness update for
1190 * the last 10 seconds, it could be potentially dead or
1191 * congested, so we prefer not to choose it even its status
1194 int router_slow = cfs_time_seconds(10);
1197 cfs_time_t now = cfs_time_current();
1199 r1_slow = p1->lp_txqnob != 0 &&
1200 cfs_time_aftereq(now, p1->lp_last_alive + router_slow);
1201 r2_slow = p2->lp_txqnob != 0 &&
1202 cfs_time_aftereq(now, p2->lp_last_alive + router_slow);
1204 if (!r1_slow && r2_slow)
1207 if (r1_slow && !r2_slow)
1211 if (r1->lr_priority < r2->lr_priority)
1214 if (r1->lr_priority > r2->lr_priority)
1217 if (r1->lr_hops < r2->lr_hops)
1220 if (r1->lr_hops > r2->lr_hops)
1223 if (p1->lp_txqnob < p2->lp_txqnob)
1226 if (p1->lp_txqnob > p2->lp_txqnob)
1229 if (p1->lp_txcredits > p2->lp_txcredits)
1232 if (p1->lp_txcredits < p2->lp_txcredits)
1235 if (r1->lr_seq - r2->lr_seq <= 0)
1241 static lnet_peer_t *
1242 lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
1244 lnet_remotenet_t *rnet;
1245 lnet_route_t *route;
1246 lnet_route_t *best_route;
1247 lnet_route_t *last_route;
1248 struct lnet_peer *lp_best;
1249 struct lnet_peer *lp;
1252 /* If @rtr_nid is not LNET_NID_ANY, return the gateway with
1253 * rtr_nid nid, otherwise find the best gateway I can use */
1255 rnet = lnet_find_net_locked(LNET_NIDNET(target));
1260 best_route = last_route = NULL;
1261 list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1262 lp = route->lr_gateway;
1264 if (!lnet_is_route_alive(route))
1267 if (ni != NULL && lp->lp_ni != ni)
1270 if (lp->lp_nid == rtr_nid) /* it's pre-determined router */
1273 if (lp_best == NULL) {
1274 best_route = last_route = route;
1279 /* no protection on below fields, but it's harmless */
1280 if (last_route->lr_seq - route->lr_seq < 0)
1283 rc = lnet_compare_routes(route, best_route);
1291 /* set sequence number on the best router to the latest sequence + 1
1292 * so we can round-robin all routers, it's race and inaccurate but
1293 * harmless and functional */
1294 if (best_route != NULL)
1295 best_route->lr_seq = last_route->lr_seq + 1;
1300 lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
1302 lnet_nid_t dst_nid = msg->msg_target.nid;
1303 struct lnet_ni *src_ni;
1304 struct lnet_ni *local_ni;
1305 struct lnet_peer *lp;
1310 /* NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
1311 * but we might want to use pre-determined router for ACK/REPLY
1313 /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
1314 LASSERT (msg->msg_txpeer == NULL);
1315 LASSERT (!msg->msg_sending);
1316 LASSERT (!msg->msg_target_is_router);
1317 LASSERT (!msg->msg_receiving);
1319 msg->msg_sending = 1;
1321 LASSERT(!msg->msg_tx_committed);
1322 cpt = lnet_cpt_of_nid(rtr_nid == LNET_NID_ANY ? dst_nid : rtr_nid);
1326 if (the_lnet.ln_shutdown) {
1327 lnet_net_unlock(cpt);
1331 if (src_nid == LNET_NID_ANY) {
1334 src_ni = lnet_nid2ni_locked(src_nid, cpt);
1335 if (src_ni == NULL) {
1336 lnet_net_unlock(cpt);
1337 LCONSOLE_WARN("Can't send to %s: src %s is not a "
1338 "local nid\n", libcfs_nid2str(dst_nid),
1339 libcfs_nid2str(src_nid));
1342 LASSERT (!msg->msg_routing);
1345 /* Is this for someone on a local network? */
1346 local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid), cpt);
1348 if (local_ni != NULL) {
1349 if (src_ni == NULL) {
1351 src_nid = src_ni->ni_nid;
1352 } else if (src_ni == local_ni) {
1353 lnet_ni_decref_locked(local_ni, cpt);
1355 lnet_ni_decref_locked(local_ni, cpt);
1356 lnet_ni_decref_locked(src_ni, cpt);
1357 lnet_net_unlock(cpt);
1358 LCONSOLE_WARN("No route to %s via from %s\n",
1359 libcfs_nid2str(dst_nid),
1360 libcfs_nid2str(src_nid));
1364 LASSERT(src_nid != LNET_NID_ANY);
1365 lnet_msg_commit(msg, cpt);
1367 if (!msg->msg_routing)
1368 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1370 if (src_ni == the_lnet.ln_loni) {
1371 /* No send credit hassles with LOLND */
1372 lnet_net_unlock(cpt);
1373 lnet_ni_send(src_ni, msg);
1376 lnet_ni_decref_locked(src_ni, cpt);
1377 lnet_net_unlock(cpt);
1381 rc = lnet_nid2peer_locked(&lp, dst_nid, cpt);
1382 /* lp has ref on src_ni; lose mine */
1383 lnet_ni_decref_locked(src_ni, cpt);
1385 lnet_net_unlock(cpt);
1386 LCONSOLE_WARN("Error %d finding peer %s\n", rc,
1387 libcfs_nid2str(dst_nid));
1388 /* ENOMEM or shutting down */
1391 LASSERT (lp->lp_ni == src_ni);
1394 lnet_net_unlock(cpt);
1397 * - once application finishes computation, check here to update
1398 * router states before it waits for pending IO in LNetEQPoll
1399 * - recursion breaker: router checker sends no message
1400 * to remote networks */
1401 if (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING)
1402 lnet_router_checker();
1406 /* sending to a remote network */
1407 lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid);
1410 lnet_ni_decref_locked(src_ni, cpt);
1411 lnet_net_unlock(cpt);
1413 LCONSOLE_WARN("No route to %s via %s "
1414 "(all routers down)\n",
1415 libcfs_id2str(msg->msg_target),
1416 libcfs_nid2str(src_nid));
1417 return -EHOSTUNREACH;
1420 /* rtr_nid is LNET_NID_ANY or NID of pre-determined router,
1421 * it's possible that rtr_nid isn't LNET_NID_ANY and lp isn't
1422 * pre-determined router, this can happen if router table
1423 * was changed when we release the lock */
1424 if (rtr_nid != lp->lp_nid) {
1425 cpt2 = lnet_cpt_of_nid_locked(lp->lp_nid);
1428 lnet_ni_decref_locked(src_ni, cpt);
1429 lnet_net_unlock(cpt);
1431 rtr_nid = lp->lp_nid;
1437 CDEBUG(D_NET, "Best route to %s via %s for %s %d\n",
1438 libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid),
1439 lnet_msgtyp2str(msg->msg_type), msg->msg_len);
1441 if (src_ni == NULL) {
1443 src_nid = src_ni->ni_nid;
1445 LASSERT (src_ni == lp->lp_ni);
1446 lnet_ni_decref_locked(src_ni, cpt);
1449 lnet_peer_addref_locked(lp);
1451 LASSERT(src_nid != LNET_NID_ANY);
1452 lnet_msg_commit(msg, cpt);
1454 if (!msg->msg_routing) {
1455 /* I'm the source and now I know which NI to send on */
1456 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1459 msg->msg_target_is_router = 1;
1460 msg->msg_target.nid = lp->lp_nid;
1461 msg->msg_target.pid = LNET_PID_LUSTRE;
1464 /* 'lp' is our best choice of peer */
1466 LASSERT (!msg->msg_peertxcredit);
1467 LASSERT (!msg->msg_txcredit);
1468 LASSERT (msg->msg_txpeer == NULL);
1470 msg->msg_txpeer = lp; /* msg takes my ref on lp */
1472 rc = lnet_post_send_locked(msg, 0);
1473 lnet_net_unlock(cpt);
1478 if (rc == LNET_CREDIT_OK)
1479 lnet_ni_send(src_ni, msg);
1481 return 0; /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT */
1485 lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob)
1488 the_lnet.ln_counters[cpt]->drop_count++;
1489 the_lnet.ln_counters[cpt]->drop_length += nob;
1490 lnet_net_unlock(cpt);
1492 lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
1496 lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
1498 lnet_hdr_t *hdr = &msg->msg_hdr;
1500 if (msg->msg_wanted != 0)
1501 lnet_setpayloadbuffer(msg);
1503 lnet_build_msg_event(msg, LNET_EVENT_PUT);
1505 /* Must I ACK? If so I'll grab the ack_wmd out of the header and put
1506 * it back into the ACK during lnet_finalize() */
1507 msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
1508 (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
1510 lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
1511 msg->msg_offset, msg->msg_wanted, hdr->payload_length);
1515 lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
1517 lnet_hdr_t *hdr = &msg->msg_hdr;
1518 struct lnet_match_info info;
1521 /* Convert put fields to host byte order */
1522 hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
1523 hdr->msg.put.ptl_index = le32_to_cpu(hdr->msg.put.ptl_index);
1524 hdr->msg.put.offset = le32_to_cpu(hdr->msg.put.offset);
1526 info.mi_id.nid = hdr->src_nid;
1527 info.mi_id.pid = hdr->src_pid;
1528 info.mi_opc = LNET_MD_OP_PUT;
1529 info.mi_portal = hdr->msg.put.ptl_index;
1530 info.mi_rlength = hdr->payload_length;
1531 info.mi_roffset = hdr->msg.put.offset;
1532 info.mi_mbits = hdr->msg.put.match_bits;
1534 msg->msg_rx_ready_delay = ni->ni_lnd->lnd_eager_recv == NULL;
1537 rc = lnet_ptl_match_md(&info, msg);
1542 case LNET_MATCHMD_OK:
1543 lnet_recv_put(ni, msg);
1546 case LNET_MATCHMD_NONE:
1547 if (msg->msg_rx_delayed) /* attached on delayed list */
1550 rc = lnet_ni_eager_recv(ni, msg);
1555 case LNET_MATCHMD_DROP:
1556 CNETERR("Dropping PUT from %s portal %d match "LPU64
1557 " offset %d length %d: %d\n",
1558 libcfs_id2str(info.mi_id), info.mi_portal,
1559 info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
1561 return ENOENT; /* +ve: OK but no match */
1566 lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
1568 struct lnet_match_info info;
1569 lnet_hdr_t *hdr = &msg->msg_hdr;
1570 lnet_handle_wire_t reply_wmd;
1573 /* Convert get fields to host byte order */
1574 hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits);
1575 hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index);
1576 hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
1577 hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset);
1579 info.mi_id.nid = hdr->src_nid;
1580 info.mi_id.pid = hdr->src_pid;
1581 info.mi_opc = LNET_MD_OP_GET;
1582 info.mi_portal = hdr->msg.get.ptl_index;
1583 info.mi_rlength = hdr->msg.get.sink_length;
1584 info.mi_roffset = hdr->msg.get.src_offset;
1585 info.mi_mbits = hdr->msg.get.match_bits;
1587 rc = lnet_ptl_match_md(&info, msg);
1588 if (rc == LNET_MATCHMD_DROP) {
1589 CNETERR("Dropping GET from %s portal %d match "LPU64
1590 " offset %d length %d\n",
1591 libcfs_id2str(info.mi_id), info.mi_portal,
1592 info.mi_mbits, info.mi_roffset, info.mi_rlength);
1593 return ENOENT; /* +ve: OK but no match */
1596 LASSERT(rc == LNET_MATCHMD_OK);
1598 lnet_build_msg_event(msg, LNET_EVENT_GET);
1600 reply_wmd = hdr->msg.get.return_wmd;
1602 lnet_prep_send(msg, LNET_MSG_REPLY, info.mi_id,
1603 msg->msg_offset, msg->msg_wanted);
1605 msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
1608 /* The LND completes the REPLY from her recv procedure */
1609 lnet_ni_recv(ni, msg->msg_private, msg, 0,
1610 msg->msg_offset, msg->msg_len, msg->msg_len);
1614 lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
1615 msg->msg_receiving = 0;
1617 rc = lnet_send(ni->ni_nid, msg, LNET_NID_ANY);
1619 /* didn't get as far as lnet_ni_send() */
1620 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
1621 libcfs_nid2str(ni->ni_nid),
1622 libcfs_id2str(info.mi_id), rc);
1624 lnet_finalize(ni, msg, rc);
1631 lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
1633 void *private = msg->msg_private;
1634 lnet_hdr_t *hdr = &msg->msg_hdr;
1635 lnet_process_id_t src = {0};
1641 cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
1644 src.nid = hdr->src_nid;
1645 src.pid = hdr->src_pid;
1647 /* NB handles only looked up by creator (no flips) */
1648 md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
1649 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
1650 CNETERR("%s: Dropping REPLY from %s for %s "
1651 "MD "LPX64"."LPX64"\n",
1652 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1653 (md == NULL) ? "invalid" : "inactive",
1654 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1655 hdr->msg.reply.dst_wmd.wh_object_cookie);
1656 if (md != NULL && md->md_me != NULL)
1657 CERROR("REPLY MD also attached to portal %d\n",
1658 md->md_me->me_portal);
1660 lnet_res_unlock(cpt);
1661 return ENOENT; /* +ve: OK but no match */
1664 LASSERT (md->md_offset == 0);
1666 rlength = hdr->payload_length;
1667 mlength = MIN(rlength, (int)md->md_length);
1669 if (mlength < rlength &&
1670 (md->md_options & LNET_MD_TRUNCATE) == 0) {
1671 CNETERR("%s: Dropping REPLY from %s length %d "
1672 "for MD "LPX64" would overflow (%d)\n",
1673 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1674 rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
1676 lnet_res_unlock(cpt);
1677 return ENOENT; /* +ve: OK but no match */
1680 CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md "LPX64"\n",
1681 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1682 mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
1684 lnet_msg_attach_md(msg, md, 0, mlength);
1687 lnet_setpayloadbuffer(msg);
1689 lnet_res_unlock(cpt);
1691 lnet_build_msg_event(msg, LNET_EVENT_REPLY);
1693 lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
1698 lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
1700 lnet_hdr_t *hdr = &msg->msg_hdr;
1701 lnet_process_id_t src = {0};
1705 src.nid = hdr->src_nid;
1706 src.pid = hdr->src_pid;
1708 /* Convert ack fields to host byte order */
1709 hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
1710 hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
1712 cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
1715 /* NB handles only looked up by creator (no flips) */
1716 md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
1717 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
1718 /* Don't moan; this is expected */
1720 "%s: Dropping ACK from %s to %s MD "LPX64"."LPX64"\n",
1721 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1722 (md == NULL) ? "invalid" : "inactive",
1723 hdr->msg.ack.dst_wmd.wh_interface_cookie,
1724 hdr->msg.ack.dst_wmd.wh_object_cookie);
1725 if (md != NULL && md->md_me != NULL)
1726 CERROR("Source MD also attached to portal %d\n",
1727 md->md_me->me_portal);
1729 lnet_res_unlock(cpt);
1730 return ENOENT; /* +ve! */
1733 CDEBUG(D_NET, "%s: ACK from %s into md "LPX64"\n",
1734 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1735 hdr->msg.ack.dst_wmd.wh_object_cookie);
1737 lnet_msg_attach_md(msg, md, 0, 0);
1739 lnet_res_unlock(cpt);
1741 lnet_build_msg_event(msg, LNET_EVENT_ACK);
1743 lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
1748 * \retval LNET_CREDIT_OK If \a msg is forwarded
1749 * \retval LNET_CREDIT_WAIT If \a msg is blocked because w/o buffer
1750 * \retval -ve error code
1753 lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
1758 if (!the_lnet.ln_routing)
1761 if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
1762 lnet_msg2bufpool(msg)->rbp_credits <= 0) {
1763 if (ni->ni_lnd->lnd_eager_recv == NULL) {
1764 msg->msg_rx_ready_delay = 1;
1766 lnet_net_unlock(msg->msg_rx_cpt);
1767 rc = lnet_ni_eager_recv(ni, msg);
1768 lnet_net_lock(msg->msg_rx_cpt);
1773 rc = lnet_post_routed_recv_locked(msg, 0);
1781 lnet_parse_local(lnet_ni_t *ni, lnet_msg_t *msg)
1785 switch (msg->msg_type) {
1787 rc = lnet_parse_ack(ni, msg);
1790 rc = lnet_parse_put(ni, msg);
1793 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
1795 case LNET_MSG_REPLY:
1796 rc = lnet_parse_reply(ni, msg);
1798 default: /* prevent an unused label if !kernel */
1803 LASSERT(rc == 0 || rc == ENOENT);
1808 lnet_msgtyp2str (int type)
1817 case LNET_MSG_REPLY:
1819 case LNET_MSG_HELLO:
1822 return ("<UNKNOWN>");
1825 EXPORT_SYMBOL(lnet_msgtyp2str);
1828 lnet_print_hdr(lnet_hdr_t * hdr)
1830 lnet_process_id_t src = {0};
1831 lnet_process_id_t dst = {0};
1832 char *type_str = lnet_msgtyp2str (hdr->type);
1834 src.nid = hdr->src_nid;
1835 src.pid = hdr->src_pid;
1837 dst.nid = hdr->dest_nid;
1838 dst.pid = hdr->dest_pid;
1840 CWARN("P3 Header at %p of type %s\n", hdr, type_str);
1841 CWARN(" From %s\n", libcfs_id2str(src));
1842 CWARN(" To %s\n", libcfs_id2str(dst));
1844 switch (hdr->type) {
1849 CWARN(" Ptl index %d, ack md "LPX64"."LPX64", "
1850 "match bits "LPU64"\n",
1851 hdr->msg.put.ptl_index,
1852 hdr->msg.put.ack_wmd.wh_interface_cookie,
1853 hdr->msg.put.ack_wmd.wh_object_cookie,
1854 hdr->msg.put.match_bits);
1855 CWARN(" Length %d, offset %d, hdr data "LPX64"\n",
1856 hdr->payload_length, hdr->msg.put.offset,
1857 hdr->msg.put.hdr_data);
1861 CWARN(" Ptl index %d, return md "LPX64"."LPX64", "
1862 "match bits "LPU64"\n", hdr->msg.get.ptl_index,
1863 hdr->msg.get.return_wmd.wh_interface_cookie,
1864 hdr->msg.get.return_wmd.wh_object_cookie,
1865 hdr->msg.get.match_bits);
1866 CWARN(" Length %d, src offset %d\n",
1867 hdr->msg.get.sink_length,
1868 hdr->msg.get.src_offset);
1872 CWARN(" dst md "LPX64"."LPX64", "
1873 "manipulated length %d\n",
1874 hdr->msg.ack.dst_wmd.wh_interface_cookie,
1875 hdr->msg.ack.dst_wmd.wh_object_cookie,
1876 hdr->msg.ack.mlength);
1879 case LNET_MSG_REPLY:
1880 CWARN(" dst md "LPX64"."LPX64", "
1882 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1883 hdr->msg.reply.dst_wmd.wh_object_cookie,
1884 hdr->payload_length);
1890 lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
1891 void *private, int rdma_req)
1893 struct lnet_msg *msg;
1894 lnet_peer_t *rxpeer;
1895 lnet_pid_t dest_pid;
1896 lnet_nid_t dest_nid;
1898 __u32 payload_length;
1904 LASSERT(!in_interrupt());
1906 type = le32_to_cpu(hdr->type);
1907 src_nid = le64_to_cpu(hdr->src_nid);
1908 dest_nid = le64_to_cpu(hdr->dest_nid);
1909 dest_pid = le32_to_cpu(hdr->dest_pid);
1910 payload_length = le32_to_cpu(hdr->payload_length);
1912 for_me = (ni->ni_nid == dest_nid);
1913 cpt = lnet_cpt_of_nid(from_nid);
1918 if (payload_length > 0) {
1919 CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
1920 libcfs_nid2str(from_nid),
1921 libcfs_nid2str(src_nid),
1922 lnet_msgtyp2str(type), payload_length);
1928 case LNET_MSG_REPLY:
1929 if (payload_length >
1930 (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
1931 CERROR("%s, src %s: bad %s payload %d "
1932 "(%d max expected)\n",
1933 libcfs_nid2str(from_nid),
1934 libcfs_nid2str(src_nid),
1935 lnet_msgtyp2str(type),
1937 for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
1943 CERROR("%s, src %s: Bad message type 0x%x\n",
1944 libcfs_nid2str(from_nid),
1945 libcfs_nid2str(src_nid), type);
1949 if (the_lnet.ln_routing &&
1950 ni->ni_last_alive != cfs_time_current_sec()) {
1951 /* NB: so far here is the only place to set NI status to "up */
1953 ni->ni_last_alive = cfs_time_current_sec();
1954 if (ni->ni_status != NULL &&
1955 ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
1956 ni->ni_status->ns_status = LNET_NI_STATUS_UP;
1960 /* Regard a bad destination NID as a protocol error. Senders should
1961 * know what they're doing; if they don't they're misconfigured, buggy
1962 * or malicious so we chop them off at the knees :) */
1965 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
1966 /* should have gone direct */
1967 CERROR("%s, src %s: Bad dest nid %s "
1968 "(should have been sent direct)\n",
1969 libcfs_nid2str(from_nid),
1970 libcfs_nid2str(src_nid),
1971 libcfs_nid2str(dest_nid));
1975 if (lnet_islocalnid(dest_nid)) {
1976 /* dest is another local NI; sender should have used
1977 * this node's NID on its own network */
1978 CERROR("%s, src %s: Bad dest nid %s "
1979 "(it's my nid but on a different network)\n",
1980 libcfs_nid2str(from_nid),
1981 libcfs_nid2str(src_nid),
1982 libcfs_nid2str(dest_nid));
1986 if (rdma_req && type == LNET_MSG_GET) {
1987 CERROR("%s, src %s: Bad optimized GET for %s "
1988 "(final destination must be me)\n",
1989 libcfs_nid2str(from_nid),
1990 libcfs_nid2str(src_nid),
1991 libcfs_nid2str(dest_nid));
1995 if (!the_lnet.ln_routing) {
1996 CERROR("%s, src %s: Dropping message for %s "
1997 "(routing not enabled)\n",
1998 libcfs_nid2str(from_nid),
1999 libcfs_nid2str(src_nid),
2000 libcfs_nid2str(dest_nid));
2005 /* Message looks OK; we're not going to return an error, so we MUST
2006 * call back lnd_recv() come what may... */
2008 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
2009 fail_peer(src_nid, 0)) { /* shall we now? */
2010 CERROR("%s, src %s: Dropping %s to simulate failure\n",
2011 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2012 lnet_msgtyp2str(type));
2016 if (!list_empty(&the_lnet.ln_drop_rules) &&
2017 lnet_drop_rule_match(hdr)) {
2018 CDEBUG(D_NET, "%s, src %s, dst %s: Dropping %s to simulate"
2019 "silent message loss\n",
2020 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2021 libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
2026 msg = lnet_msg_alloc();
2028 CERROR("%s, src %s: Dropping %s (out of memory)\n",
2029 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2030 lnet_msgtyp2str(type));
2034 /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
2035 * pointers NULL etc */
2037 msg->msg_type = type;
2038 msg->msg_private = private;
2039 msg->msg_receiving = 1;
2040 msg->msg_rdma_get = rdma_req;
2041 msg->msg_len = msg->msg_wanted = payload_length;
2042 msg->msg_offset = 0;
2043 msg->msg_hdr = *hdr;
2044 /* for building message event */
2045 msg->msg_from = from_nid;
2047 msg->msg_target.pid = dest_pid;
2048 msg->msg_target.nid = dest_nid;
2049 msg->msg_routing = 1;
2052 /* convert common msg->hdr fields to host byteorder */
2053 msg->msg_hdr.type = type;
2054 msg->msg_hdr.src_nid = src_nid;
2055 msg->msg_hdr.src_pid = le32_to_cpu(msg->msg_hdr.src_pid);
2056 msg->msg_hdr.dest_nid = dest_nid;
2057 msg->msg_hdr.dest_pid = dest_pid;
2058 msg->msg_hdr.payload_length = payload_length;
2062 rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt);
2064 lnet_net_unlock(cpt);
2065 CERROR("%s, src %s: Dropping %s "
2066 "(error %d looking up sender)\n",
2067 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2068 lnet_msgtyp2str(type), rc);
2073 if (lnet_isrouter(msg->msg_rxpeer)) {
2074 lnet_peer_set_alive(msg->msg_rxpeer);
2075 if (avoid_asym_router_failure &&
2076 LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
2077 /* received a remote message from router, update
2078 * remote NI status on this router.
2079 * NB: multi-hop routed message will be ignored.
2081 lnet_router_ni_update_locked(msg->msg_rxpeer,
2082 LNET_NIDNET(src_nid));
2086 lnet_msg_commit(msg, cpt);
2087 /* LND just notified me for incoming message from rxpeer, so assume
2089 rxpeer = msg->msg_rxpeer;
2090 rxpeer->lp_last_alive = rxpeer->lp_last_query = cfs_time_current();
2091 if (!rxpeer->lp_alive)
2092 lnet_notify_locked(rxpeer, 0, 1, rxpeer->lp_last_alive);
2094 if (lnet_isrouter(msg->msg_rxpeer) &&
2095 LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
2096 lnet_router_ni_update_locked(msg->msg_rxpeer,
2097 LNET_NIDNET(src_nid));
2100 /* message delay simulation */
2101 if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
2102 lnet_delay_rule_match_locked(hdr, msg))) {
2103 lnet_net_unlock(cpt);
2108 rc = lnet_parse_forward_locked(ni, msg);
2109 lnet_net_unlock(cpt);
2114 if (rc == LNET_CREDIT_OK) {
2115 lnet_ni_recv(ni, msg->msg_private, msg, 0,
2116 0, payload_length, payload_length);
2121 lnet_net_unlock(cpt);
2123 rc = lnet_parse_local(ni, msg);
2129 LASSERT(msg->msg_md == NULL);
2130 lnet_finalize(ni, msg, rc);
2133 lnet_drop_message(ni, cpt, private, payload_length);
2136 EXPORT_SYMBOL(lnet_parse);
2139 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
2141 while (!list_empty(head)) {
2142 lnet_process_id_t id = {0};
2145 msg = list_entry(head->next, lnet_msg_t, msg_list);
2146 list_del(&msg->msg_list);
2148 id.nid = msg->msg_hdr.src_nid;
2149 id.pid = msg->msg_hdr.src_pid;
2151 LASSERT(msg->msg_md == NULL);
2152 LASSERT(msg->msg_rx_delayed);
2153 LASSERT(msg->msg_rxpeer != NULL);
2154 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
2156 CWARN("Dropping delayed PUT from %s portal %d match "LPU64
2157 " offset %d length %d: %s\n",
2159 msg->msg_hdr.msg.put.ptl_index,
2160 msg->msg_hdr.msg.put.match_bits,
2161 msg->msg_hdr.msg.put.offset,
2162 msg->msg_hdr.payload_length, reason);
2164 /* NB I can't drop msg's ref on msg_rxpeer until after I've
2165 * called lnet_drop_message(), so I just hang onto msg as well
2166 * until that's done */
2168 lnet_drop_message(msg->msg_rxpeer->lp_ni,
2169 msg->msg_rxpeer->lp_cpt,
2170 msg->msg_private, msg->msg_len);
2172 * NB: message will not generate event because w/o attached MD,
2173 * but we still should give error code so lnet_msg_decommit()
2174 * can skip counters operations and other checks.
2176 lnet_finalize(msg->msg_rxpeer->lp_ni, msg, -ENOENT);
2181 lnet_recv_delayed_msg_list(struct list_head *head)
2183 while (!list_empty(head)) {
2185 lnet_process_id_t id;
2187 msg = list_entry(head->next, lnet_msg_t, msg_list);
2188 list_del(&msg->msg_list);
2190 /* md won't disappear under me, since each msg
2191 * holds a ref on it */
2193 id.nid = msg->msg_hdr.src_nid;
2194 id.pid = msg->msg_hdr.src_pid;
2196 LASSERT(msg->msg_rx_delayed);
2197 LASSERT(msg->msg_md != NULL);
2198 LASSERT(msg->msg_rxpeer != NULL);
2199 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
2201 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
2202 "match "LPU64" offset %d length %d.\n",
2203 libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
2204 msg->msg_hdr.msg.put.match_bits,
2205 msg->msg_hdr.msg.put.offset,
2206 msg->msg_hdr.payload_length);
2208 lnet_recv_put(msg->msg_rxpeer->lp_ni, msg);
2213 * Initiate an asynchronous PUT operation.
2215 * There are several events associated with a PUT: completion of the send on
2216 * the initiator node (LNET_EVENT_SEND), and when the send completes
2217 * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
2218 * that the operation was accepted by the target. The event LNET_EVENT_PUT is
2219 * used at the target node to indicate the completion of incoming data
2222 * The local events will be logged in the EQ associated with the MD pointed to
2223 * by \a mdh handle. Using a MD without an associated EQ results in these
2224 * events being discarded. In this case, the caller must have another
2225 * mechanism (e.g., a higher level protocol) for determining when it is safe
2226 * to modify the memory region associated with the MD.
2228 * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
2229 * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
2231 * \param self Indicates the NID of a local interface through which to send
2232 * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
2233 * \param mdh A handle for the MD that describes the memory to be sent. The MD
2234 * must be "free floating" (See LNetMDBind()).
2235 * \param ack Controls whether an acknowledgment is requested.
2236 * Acknowledgments are only sent when they are requested by the initiating
2237 * process and the target MD enables them.
2238 * \param target A process identifier for the target process.
2239 * \param portal The index in the \a target's portal table.
2240 * \param match_bits The match bits to use for MD selection at the target
2242 * \param offset The offset into the target MD (only used when the target
2243 * MD has the LNET_MD_MANAGE_REMOTE option set).
2244 * \param hdr_data 64 bits of user data that can be included in the message
2245 * header. This data is written to an event queue entry at the target if an
2246 * EQ is present on the matching MD.
2248 * \retval 0 Success, and only in this case events will be generated
2249 * and logged to EQ (if it exists).
2250 * \retval -EIO Simulated failure.
2251 * \retval -ENOMEM Memory allocation failure.
2252 * \retval -ENOENT Invalid MD object.
2254 * \see lnet_event_t::hdr_data and lnet_event_kind_t.
2257 LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
2258 lnet_process_id_t target, unsigned int portal,
2259 __u64 match_bits, unsigned int offset,
2262 struct lnet_msg *msg;
2263 struct lnet_libmd *md;
2267 LASSERT(the_lnet.ln_init);
2268 LASSERT(the_lnet.ln_refcount > 0);
2270 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
2271 fail_peer(target.nid, 1)) { /* shall we now? */
2272 CERROR("Dropping PUT to %s: simulated failure\n",
2273 libcfs_id2str(target));
2277 msg = lnet_msg_alloc();
2279 CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
2280 libcfs_id2str(target));
2283 msg->msg_vmflush = !!memory_pressure_get();
2285 cpt = lnet_cpt_of_cookie(mdh.cookie);
2288 md = lnet_handle2md(&mdh);
2289 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2290 CERROR("Dropping PUT ("LPU64":%d:%s): MD (%d) invalid\n",
2291 match_bits, portal, libcfs_id2str(target),
2292 md == NULL ? -1 : md->md_threshold);
2293 if (md != NULL && md->md_me != NULL)
2294 CERROR("Source MD also attached to portal %d\n",
2295 md->md_me->me_portal);
2296 lnet_res_unlock(cpt);
2302 CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
2304 lnet_msg_attach_md(msg, md, 0, 0);
2306 lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
2308 msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
2309 msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
2310 msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
2311 msg->msg_hdr.msg.put.hdr_data = hdr_data;
2313 /* NB handles only looked up by creator (no flips) */
2314 if (ack == LNET_ACK_REQ) {
2315 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2316 the_lnet.ln_interface_cookie;
2317 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2318 md->md_lh.lh_cookie;
2320 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2321 LNET_WIRE_HANDLE_COOKIE_NONE;
2322 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2323 LNET_WIRE_HANDLE_COOKIE_NONE;
2326 lnet_res_unlock(cpt);
2328 lnet_build_msg_event(msg, LNET_EVENT_SEND);
2330 rc = lnet_send(self, msg, LNET_NID_ANY);
2332 CNETERR( "Error sending PUT to %s: %d\n",
2333 libcfs_id2str(target), rc);
2334 lnet_finalize (NULL, msg, rc);
2337 /* completion will be signalled by an event */
2340 EXPORT_SYMBOL(LNetPut);
2343 lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *getmsg)
2345 /* The LND can DMA direct to the GET md (i.e. no REPLY msg). This
2346 * returns a msg for the LND to pass to lnet_finalize() when the sink
2347 * data has been received.
2349 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
2350 * lnet_finalize() is called on it, so the LND must call this first */
2352 struct lnet_msg *msg = lnet_msg_alloc();
2353 struct lnet_libmd *getmd = getmsg->msg_md;
2354 lnet_process_id_t peer_id = getmsg->msg_target;
2357 LASSERT(!getmsg->msg_target_is_router);
2358 LASSERT(!getmsg->msg_routing);
2361 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
2362 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
2366 cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
2369 LASSERT(getmd->md_refcount > 0);
2371 if (getmd->md_threshold == 0) {
2372 CERROR ("%s: Dropping REPLY from %s for inactive MD %p\n",
2373 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
2375 lnet_res_unlock(cpt);
2379 LASSERT(getmd->md_offset == 0);
2381 CDEBUG(D_NET, "%s: Reply from %s md %p\n",
2382 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
2384 /* setup information for lnet_build_msg_event */
2385 msg->msg_from = peer_id.nid;
2386 msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
2387 msg->msg_hdr.src_nid = peer_id.nid;
2388 msg->msg_hdr.payload_length = getmd->md_length;
2389 msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
2391 lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
2392 lnet_res_unlock(cpt);
2394 cpt = lnet_cpt_of_nid(peer_id.nid);
2397 lnet_msg_commit(msg, cpt);
2398 lnet_net_unlock(cpt);
2400 lnet_build_msg_event(msg, LNET_EVENT_REPLY);
2405 cpt = lnet_cpt_of_nid(peer_id.nid);
2408 the_lnet.ln_counters[cpt]->drop_count++;
2409 the_lnet.ln_counters[cpt]->drop_length += getmd->md_length;
2410 lnet_net_unlock(cpt);
2417 EXPORT_SYMBOL(lnet_create_reply_msg);
2420 lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
2422 /* Set the REPLY length, now the RDMA that elides the REPLY message has
2423 * completed and I know it. */
2424 LASSERT (reply != NULL);
2425 LASSERT (reply->msg_type == LNET_MSG_GET);
2426 LASSERT (reply->msg_ev.type == LNET_EVENT_REPLY);
2428 /* NB I trusted my peer to RDMA. If she tells me she's written beyond
2429 * the end of my buffer, I might as well be dead. */
2430 LASSERT (len <= reply->msg_ev.mlength);
2432 reply->msg_ev.mlength = len;
2434 EXPORT_SYMBOL(lnet_set_reply_msg_len);
2437 * Initiate an asynchronous GET operation.
2439 * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
2440 * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
2441 * the target node in the REPLY has been written to local MD.
2443 * On the target node, an LNET_EVENT_GET is logged when the GET request
2444 * arrives and is accepted into a MD.
2446 * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
2447 * \param mdh A handle for the MD that describes the memory into which the
2448 * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
2450 * \retval 0 Success, and only in this case events will be generated
2451 * and logged to EQ (if it exists) of the MD.
2452 * \retval -EIO Simulated failure.
2453 * \retval -ENOMEM Memory allocation failure.
2454 * \retval -ENOENT Invalid MD object.
2457 LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
2458 lnet_process_id_t target, unsigned int portal,
2459 __u64 match_bits, unsigned int offset)
2461 struct lnet_msg *msg;
2462 struct lnet_libmd *md;
2466 LASSERT (the_lnet.ln_init);
2467 LASSERT (the_lnet.ln_refcount > 0);
2469 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
2470 fail_peer(target.nid, 1)) /* shall we now? */
2472 CERROR("Dropping GET to %s: simulated failure\n",
2473 libcfs_id2str(target));
2477 msg = lnet_msg_alloc();
2479 CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
2480 libcfs_id2str(target));
2484 cpt = lnet_cpt_of_cookie(mdh.cookie);
2487 md = lnet_handle2md(&mdh);
2488 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2489 CERROR("Dropping GET ("LPU64":%d:%s): MD (%d) invalid\n",
2490 match_bits, portal, libcfs_id2str(target),
2491 md == NULL ? -1 : md->md_threshold);
2492 if (md != NULL && md->md_me != NULL)
2493 CERROR("REPLY MD also attached to portal %d\n",
2494 md->md_me->me_portal);
2496 lnet_res_unlock(cpt);
2502 CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
2504 lnet_msg_attach_md(msg, md, 0, 0);
2506 lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
2508 msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
2509 msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
2510 msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
2511 msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
2513 /* NB handles only looked up by creator (no flips) */
2514 msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
2515 the_lnet.ln_interface_cookie;
2516 msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
2517 md->md_lh.lh_cookie;
2519 lnet_res_unlock(cpt);
2521 lnet_build_msg_event(msg, LNET_EVENT_SEND);
2523 rc = lnet_send(self, msg, LNET_NID_ANY);
2525 CNETERR("Error sending GET to %s: %d\n",
2526 libcfs_id2str(target), rc);
2527 lnet_finalize(NULL, msg, rc);
2530 /* completion will be signalled by an event */
2533 EXPORT_SYMBOL(LNetGet);
2536 * Calculate distance to node at \a dstnid.
2538 * \param dstnid Target NID.
2539 * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
2541 * \param orderp If not NULL, order of the route to reach \a dstnid is saved
2544 * \retval 0 If \a dstnid belongs to a local interface, and reserved option
2545 * local_nid_dist_zero is set, which is the default.
2546 * \retval positives Distance to target NID, i.e. number of hops plus one.
2547 * \retval -EHOSTUNREACH If \a dstnid is not reachable.
2550 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
2552 struct list_head *e;
2554 lnet_remotenet_t *rnet;
2555 __u32 dstnet = LNET_NIDNET(dstnid);
2559 struct list_head *rn_list;
2561 /* if !local_nid_dist_zero, I don't return a distance of 0 ever
2562 * (when lustre sees a distance of 0, it substitutes 0@lo), so I
2563 * keep order 0 free for 0@lo and order 1 free for a local NID
2566 LASSERT (the_lnet.ln_init);
2567 LASSERT (the_lnet.ln_refcount > 0);
2569 cpt = lnet_net_lock_current();
2571 list_for_each(e, &the_lnet.ln_nis) {
2572 ni = list_entry(e, lnet_ni_t, ni_list);
2574 if (ni->ni_nid == dstnid) {
2575 if (srcnidp != NULL)
2577 if (orderp != NULL) {
2578 if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
2583 lnet_net_unlock(cpt);
2585 return local_nid_dist_zero ? 0 : 1;
2588 if (LNET_NIDNET(ni->ni_nid) == dstnet) {
2589 if (srcnidp != NULL)
2590 *srcnidp = ni->ni_nid;
2593 lnet_net_unlock(cpt);
2600 rn_list = lnet_net2rnethash(dstnet);
2601 list_for_each(e, rn_list) {
2602 rnet = list_entry(e, lnet_remotenet_t, lrn_list);
2604 if (rnet->lrn_net == dstnet) {
2605 lnet_route_t *route;
2606 lnet_route_t *shortest = NULL;
2608 LASSERT(!list_empty(&rnet->lrn_routes));
2610 list_for_each_entry(route, &rnet->lrn_routes,
2612 if (shortest == NULL ||
2613 route->lr_hops < shortest->lr_hops)
2617 LASSERT (shortest != NULL);
2618 hops = shortest->lr_hops;
2619 if (srcnidp != NULL)
2620 *srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
2623 lnet_net_unlock(cpt);
2629 lnet_net_unlock(cpt);
2630 return -EHOSTUNREACH;
2632 EXPORT_SYMBOL(LNetDist);
2635 * Set the number of asynchronous messages expected from a target process.
2637 * This function is only meaningful for userspace callers. It's a no-op when
2638 * called from kernel.
2640 * Asynchronous messages are those that can come from a target when the
2641 * userspace process is not waiting for IO to complete; e.g., AST callbacks
2642 * from Lustre servers. Specifying the expected number of such messages
2643 * allows them to be eagerly received when user process is not running in
2644 * LNet; otherwise network errors may occur.
2646 * \param id Process ID of the target process.
2647 * \param nasync Number of asynchronous messages expected from the target.
2649 * \return 0 on success, and an error code otherwise.
2652 LNetSetAsync(lnet_process_id_t id, int nasync)
2658 lnet_remotenet_t *rnet;
2659 struct list_head *tmp;
2660 lnet_route_t *route;
2668 /* Target on a local network? */
2669 ni = lnet_net2ni(LNET_NIDNET(id.nid));
2671 if (ni->ni_lnd->lnd_setasync != NULL)
2672 rc = (ni->ni_lnd->lnd_setasync)(ni, id, nasync);
2677 /* Target on a remote network: apply to routers */
2679 LIBCFS_ALLOC(nids, maxnids * sizeof(*nids));
2684 /* Snapshot all the router NIDs */
2685 cpt = lnet_net_lock_current();
2686 rnet = lnet_find_net_locked(LNET_NIDNET(id.nid));
2688 list_for_each(tmp, &rnet->lrn_routes) {
2689 if (nnids == maxnids) {
2690 lnet_net_unlock(cpt);
2691 LIBCFS_FREE(nids, maxnids * sizeof(*nids));
2696 route = list_entry(tmp, lnet_route_t, lr_list);
2697 nids[nnids++] = route->lr_gateway->lp_nid;
2700 lnet_net_unlock(cpt);
2702 /* set async on all the routers */
2703 while (nnids-- > 0) {
2704 id.pid = LNET_PID_LUSTRE;
2705 id.nid = nids[nnids];
2707 ni = lnet_net2ni(LNET_NIDNET(id.nid));
2711 if (ni->ni_lnd->lnd_setasync != NULL) {
2712 rc2 = (ni->ni_lnd->lnd_setasync)(ni, id, nasync);
2719 LIBCFS_FREE(nids, maxnids * sizeof(*nids));
2723 EXPORT_SYMBOL(LNetSetAsync);