1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/lnet/lib-move.c
38 * Data movement routines
41 #define DEBUG_SUBSYSTEM S_LNET
43 #include <lnet/lib-lnet.h>
45 static int local_nid_dist_zero = 1;
46 CFS_MODULE_PARM(local_nid_dist_zero, "i", int, 0444,
50 static void lnet_commit_md (lnet_libmd_t *md, lnet_msg_t *msg);
52 #define LNET_MATCHMD_NONE 0 /* Didn't match */
53 #define LNET_MATCHMD_OK 1 /* Matched OK */
54 #define LNET_MATCHMD_DROP 2 /* Must be discarded */
57 lnet_try_match_md (int index, int op_mask, lnet_process_id_t src,
58 unsigned int rlength, unsigned int roffset,
59 __u64 match_bits, lnet_libmd_t *md, lnet_msg_t *msg,
60 unsigned int *mlength_out, unsigned int *offset_out)
62 /* ALWAYS called holding the LNET_LOCK, and can't LNET_UNLOCK;
63 * lnet_match_blocked_msg() relies on this to avoid races */
66 lnet_me_t *me = md->md_me;
68 /* mismatched MD op */
69 if ((md->md_options & op_mask) == 0)
70 return LNET_MATCHMD_NONE;
73 if (lnet_md_exhausted(md))
74 return LNET_MATCHMD_NONE;
76 /* mismatched ME nid/pid? */
77 if (me->me_match_id.nid != LNET_NID_ANY &&
78 me->me_match_id.nid != src.nid)
79 return LNET_MATCHMD_NONE;
81 if (me->me_match_id.pid != LNET_PID_ANY &&
82 me->me_match_id.pid != src.pid)
83 return LNET_MATCHMD_NONE;
85 /* mismatched ME matchbits? */
86 if (((me->me_match_bits ^ match_bits) & ~me->me_ignore_bits) != 0)
87 return LNET_MATCHMD_NONE;
89 /* Hurrah! This _is_ a match; check it out... */
91 if ((md->md_options & LNET_MD_MANAGE_REMOTE) == 0)
92 offset = md->md_offset;
96 if ((md->md_options & LNET_MD_MAX_SIZE) != 0) {
97 mlength = md->md_max_size;
98 LASSERT (md->md_offset + mlength <= md->md_length);
100 mlength = md->md_length - offset;
103 if (rlength <= mlength) { /* fits in allowed space */
105 } else if ((md->md_options & LNET_MD_TRUNCATE) == 0) {
106 /* this packet _really_ is too big */
107 CERROR("Matching packet from %s, match "LPU64
108 " length %d too big: %d left, %d allowed\n",
109 libcfs_id2str(src), match_bits, rlength,
110 md->md_length - offset, mlength);
112 return LNET_MATCHMD_DROP;
115 /* Commit to this ME/MD */
116 CDEBUG(D_NET, "Incoming %s index %x from %s of "
117 "length %d/%d into md "LPX64" [%d] + %d\n",
118 (op_mask == LNET_MD_OP_PUT) ? "put" : "get",
119 index, libcfs_id2str(src), mlength, rlength,
120 md->md_lh.lh_cookie, md->md_niov, offset);
122 lnet_commit_md(md, msg);
123 md->md_offset = offset + mlength;
125 /* NB Caller will set ev.type and ev.hdr_data */
126 msg->msg_ev.initiator = src;
127 msg->msg_ev.pt_index = index;
128 msg->msg_ev.match_bits = match_bits;
129 msg->msg_ev.rlength = rlength;
130 msg->msg_ev.mlength = mlength;
131 msg->msg_ev.offset = offset;
133 lnet_md_deconstruct(md, &msg->msg_ev.md);
134 lnet_md2handle(&msg->msg_ev.md_handle, md);
136 *offset_out = offset;
137 *mlength_out = mlength;
139 /* Auto-unlink NOW, so the ME gets unlinked if required.
140 * We bumped md->md_refcount above so the MD just gets flagged
141 * for unlink when it is finalized. */
142 if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 &&
143 lnet_md_exhausted(md)) {
147 return LNET_MATCHMD_OK;
151 lnet_match_md(int index, int op_mask, lnet_process_id_t src,
152 unsigned int rlength, unsigned int roffset,
153 __u64 match_bits, lnet_msg_t *msg,
154 unsigned int *mlength_out, unsigned int *offset_out,
155 lnet_libmd_t **md_out)
157 lnet_portal_t *ptl = &the_lnet.ln_portals[index];
163 CDEBUG (D_NET, "Request from %s of length %d into portal %d "
164 "MB="LPX64"\n", libcfs_id2str(src), rlength, index, match_bits);
166 if (index < 0 || index >= the_lnet.ln_nportals) {
167 CERROR("Invalid portal %d not in [0-%d]\n",
168 index, the_lnet.ln_nportals);
169 return LNET_MATCHMD_DROP;
172 cfs_list_for_each_entry_safe_typed (me, tmp, &ptl->ptl_ml,
173 lnet_me_t, me_list) {
176 /* ME attached but MD not attached yet */
180 LASSERT (me == md->md_me);
182 rc = lnet_try_match_md(index, op_mask, src, rlength,
183 roffset, match_bits, md, msg,
184 mlength_out, offset_out);
189 case LNET_MATCHMD_NONE:
192 case LNET_MATCHMD_OK:
194 return LNET_MATCHMD_OK;
196 case LNET_MATCHMD_DROP:
197 return LNET_MATCHMD_DROP;
202 if (op_mask == LNET_MD_OP_GET ||
203 (ptl->ptl_options & LNET_PTL_LAZY) == 0)
204 return LNET_MATCHMD_DROP;
206 return LNET_MATCHMD_NONE;
210 lnet_fail_nid (lnet_nid_t nid, unsigned int threshold)
212 lnet_test_peer_t *tp;
213 struct list_head *el;
214 struct list_head *next;
215 struct list_head cull;
217 LASSERT (the_lnet.ln_init);
219 if (threshold != 0) {
220 /* Adding a new entry */
221 LIBCFS_ALLOC(tp, sizeof(*tp));
226 tp->tp_threshold = threshold;
229 list_add_tail (&tp->tp_list, &the_lnet.ln_test_peers);
234 /* removing entries */
235 CFS_INIT_LIST_HEAD (&cull);
239 list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
240 tp = list_entry (el, lnet_test_peer_t, tp_list);
242 if (tp->tp_threshold == 0 || /* needs culling anyway */
243 nid == LNET_NID_ANY || /* removing all entries */
244 tp->tp_nid == nid) /* matched this one */
246 list_del (&tp->tp_list);
247 list_add (&tp->tp_list, &cull);
253 while (!list_empty (&cull)) {
254 tp = list_entry (cull.next, lnet_test_peer_t, tp_list);
256 list_del (&tp->tp_list);
257 LIBCFS_FREE(tp, sizeof (*tp));
263 fail_peer (lnet_nid_t nid, int outgoing)
265 lnet_test_peer_t *tp;
266 struct list_head *el;
267 struct list_head *next;
268 struct list_head cull;
271 CFS_INIT_LIST_HEAD (&cull);
275 list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
276 tp = list_entry (el, lnet_test_peer_t, tp_list);
278 if (tp->tp_threshold == 0) {
281 /* only cull zombies on outgoing tests,
282 * since we may be at interrupt priority on
283 * incoming messages. */
284 list_del (&tp->tp_list);
285 list_add (&tp->tp_list, &cull);
290 if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
291 nid == tp->tp_nid) { /* fail this peer */
294 if (tp->tp_threshold != LNET_MD_THRESH_INF) {
297 tp->tp_threshold == 0) {
299 list_del (&tp->tp_list);
300 list_add (&tp->tp_list, &cull);
309 while (!list_empty (&cull)) {
310 tp = list_entry (cull.next, lnet_test_peer_t, tp_list);
311 list_del (&tp->tp_list);
313 LIBCFS_FREE(tp, sizeof (*tp));
320 lnet_iov_nob (unsigned int niov, struct iovec *iov)
322 unsigned int nob = 0;
325 nob += (iov++)->iov_len;
331 lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov, unsigned int doffset,
332 unsigned int nsiov, struct iovec *siov, unsigned int soffset,
335 /* NB diov, siov are READ-ONLY */
336 unsigned int this_nob;
341 /* skip complete frags before 'doffset' */
343 while (doffset >= diov->iov_len) {
344 doffset -= diov->iov_len;
350 /* skip complete frags before 'soffset' */
352 while (soffset >= siov->iov_len) {
353 soffset -= siov->iov_len;
362 this_nob = MIN(diov->iov_len - doffset,
363 siov->iov_len - soffset);
364 this_nob = MIN(this_nob, nob);
366 memcpy ((char *)diov->iov_base + doffset,
367 (char *)siov->iov_base + soffset, this_nob);
370 if (diov->iov_len > doffset + this_nob) {
378 if (siov->iov_len > soffset + this_nob) {
389 lnet_extract_iov (int dst_niov, struct iovec *dst,
390 int src_niov, struct iovec *src,
391 unsigned int offset, unsigned int len)
393 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
394 * for exactly 'len' bytes, and return the number of entries.
395 * NB not destructive to 'src' */
396 unsigned int frag_len;
399 if (len == 0) /* no data => */
400 return (0); /* no frags */
402 LASSERT (src_niov > 0);
403 while (offset >= src->iov_len) { /* skip initial frags */
404 offset -= src->iov_len;
407 LASSERT (src_niov > 0);
412 LASSERT (src_niov > 0);
413 LASSERT ((int)niov <= dst_niov);
415 frag_len = src->iov_len - offset;
416 dst->iov_base = ((char *)src->iov_base) + offset;
418 if (len <= frag_len) {
423 dst->iov_len = frag_len;
436 lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
443 lnet_copy_kiov2kiov (unsigned int ndkiov, lnet_kiov_t *dkiov, unsigned int doffset,
444 unsigned int nskiov, lnet_kiov_t *skiov, unsigned int soffset,
451 lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset,
452 unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
459 lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
460 unsigned int niov, struct iovec *iov, unsigned int iovoffset,
467 lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
468 int src_niov, lnet_kiov_t *src,
469 unsigned int offset, unsigned int len)
474 #else /* __KERNEL__ */
477 lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
479 unsigned int nob = 0;
482 nob += (kiov++)->kiov_len;
488 lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
489 unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
492 /* NB diov, siov are READ-ONLY */
493 unsigned int this_nob;
500 LASSERT (!in_interrupt ());
503 while (doffset >= diov->kiov_len) {
504 doffset -= diov->kiov_len;
511 while (soffset >= siov->kiov_len) {
512 soffset -= siov->kiov_len;
521 this_nob = MIN(diov->kiov_len - doffset,
522 siov->kiov_len - soffset);
523 this_nob = MIN(this_nob, nob);
526 daddr = ((char *)cfs_kmap(diov->kiov_page)) +
527 diov->kiov_offset + doffset;
529 saddr = ((char *)cfs_kmap(siov->kiov_page)) +
530 siov->kiov_offset + soffset;
532 /* Vanishing risk of kmap deadlock when mapping 2 pages.
533 * However in practice at least one of the kiovs will be mapped
534 * kernel pages and the map/unmap will be NOOPs */
536 memcpy (daddr, saddr, this_nob);
539 if (diov->kiov_len > doffset + this_nob) {
543 cfs_kunmap(diov->kiov_page);
550 if (siov->kiov_len > soffset + this_nob) {
554 cfs_kunmap(siov->kiov_page);
563 cfs_kunmap(diov->kiov_page);
565 cfs_kunmap(siov->kiov_page);
569 lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset,
570 unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
573 /* NB iov, kiov are READ-ONLY */
574 unsigned int this_nob;
580 LASSERT (!in_interrupt ());
583 while (iovoffset >= iov->iov_len) {
584 iovoffset -= iov->iov_len;
591 while (kiovoffset >= kiov->kiov_len) {
592 kiovoffset -= kiov->kiov_len;
601 this_nob = MIN(iov->iov_len - iovoffset,
602 kiov->kiov_len - kiovoffset);
603 this_nob = MIN(this_nob, nob);
606 addr = ((char *)cfs_kmap(kiov->kiov_page)) +
607 kiov->kiov_offset + kiovoffset;
609 memcpy ((char *)iov->iov_base + iovoffset, addr, this_nob);
612 if (iov->iov_len > iovoffset + this_nob) {
613 iovoffset += this_nob;
620 if (kiov->kiov_len > kiovoffset + this_nob) {
622 kiovoffset += this_nob;
624 cfs_kunmap(kiov->kiov_page);
634 cfs_kunmap(kiov->kiov_page);
638 lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
639 unsigned int niov, struct iovec *iov, unsigned int iovoffset,
642 /* NB kiov, iov are READ-ONLY */
643 unsigned int this_nob;
649 LASSERT (!in_interrupt ());
652 while (kiovoffset >= kiov->kiov_len) {
653 kiovoffset -= kiov->kiov_len;
660 while (iovoffset >= iov->iov_len) {
661 iovoffset -= iov->iov_len;
670 this_nob = MIN(kiov->kiov_len - kiovoffset,
671 iov->iov_len - iovoffset);
672 this_nob = MIN(this_nob, nob);
675 addr = ((char *)cfs_kmap(kiov->kiov_page)) +
676 kiov->kiov_offset + kiovoffset;
678 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
681 if (kiov->kiov_len > kiovoffset + this_nob) {
683 kiovoffset += this_nob;
685 cfs_kunmap(kiov->kiov_page);
692 if (iov->iov_len > iovoffset + this_nob) {
693 iovoffset += this_nob;
702 cfs_kunmap(kiov->kiov_page);
706 lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
707 int src_niov, lnet_kiov_t *src,
708 unsigned int offset, unsigned int len)
710 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
711 * for exactly 'len' bytes, and return the number of entries.
712 * NB not destructive to 'src' */
713 unsigned int frag_len;
716 if (len == 0) /* no data => */
717 return (0); /* no frags */
719 LASSERT (src_niov > 0);
720 while (offset >= src->kiov_len) { /* skip initial frags */
721 offset -= src->kiov_len;
724 LASSERT (src_niov > 0);
729 LASSERT (src_niov > 0);
730 LASSERT ((int)niov <= dst_niov);
732 frag_len = src->kiov_len - offset;
733 dst->kiov_page = src->kiov_page;
734 dst->kiov_offset = src->kiov_offset + offset;
736 if (len <= frag_len) {
738 LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
742 dst->kiov_len = frag_len;
743 LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
756 lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
757 unsigned int offset, unsigned int mlen, unsigned int rlen)
759 unsigned int niov = 0;
760 struct iovec *iov = NULL;
761 lnet_kiov_t *kiov = NULL;
764 LASSERT (!in_interrupt ());
765 LASSERT (mlen == 0 || msg != NULL);
768 LASSERT(msg->msg_receiving);
769 LASSERT(!msg->msg_sending);
770 LASSERT(rlen == msg->msg_len);
771 LASSERT(mlen <= msg->msg_len);
773 msg->msg_wanted = mlen;
774 msg->msg_offset = offset;
775 msg->msg_receiving = 0;
778 niov = msg->msg_niov;
780 kiov = msg->msg_kiov;
783 LASSERT ((iov == NULL) != (kiov == NULL));
787 rc = (ni->ni_lnd->lnd_recv)(ni, private, msg, delayed,
788 niov, iov, kiov, offset, mlen, rlen);
790 lnet_finalize(ni, msg, rc);
794 lnet_compare_routers(lnet_peer_t *p1, lnet_peer_t *p2)
796 if (p1->lp_txqnob < p2->lp_txqnob)
799 if (p1->lp_txqnob > p2->lp_txqnob)
802 if (p1->lp_txcredits > p2->lp_txcredits)
805 if (p1->lp_txcredits < p2->lp_txcredits)
813 lnet_setpayloadbuffer(lnet_msg_t *msg)
815 lnet_libmd_t *md = msg->msg_md;
817 LASSERT (msg->msg_len > 0);
818 LASSERT (!msg->msg_routing);
819 LASSERT (md != NULL);
820 LASSERT (msg->msg_niov == 0);
821 LASSERT (msg->msg_iov == NULL);
822 LASSERT (msg->msg_kiov == NULL);
824 msg->msg_niov = md->md_niov;
825 if ((md->md_options & LNET_MD_KIOV) != 0)
826 msg->msg_kiov = md->md_iov.kiov;
828 msg->msg_iov = md->md_iov.iov;
832 lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
833 unsigned int offset, unsigned int len)
835 msg->msg_type = type;
836 msg->msg_target = target;
838 msg->msg_offset = offset;
841 lnet_setpayloadbuffer(msg);
843 memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
844 msg->msg_hdr.type = cpu_to_le32(type);
845 msg->msg_hdr.dest_nid = cpu_to_le64(target.nid);
846 msg->msg_hdr.dest_pid = cpu_to_le32(target.pid);
847 /* src_nid will be set later */
848 msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid);
849 msg->msg_hdr.payload_length = cpu_to_le32(len);
853 lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
855 void *priv = msg->msg_private;
858 LASSERT (!in_interrupt ());
859 LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
860 (msg->msg_txcredit && msg->msg_peertxcredit));
862 rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
864 lnet_finalize(ni, msg, rc);
868 lnet_eager_recv_locked(lnet_msg_t *msg)
874 LASSERT (!msg->msg_delayed);
875 msg->msg_delayed = 1;
877 LASSERT (msg->msg_receiving);
878 LASSERT (!msg->msg_sending);
880 peer = msg->msg_rxpeer;
883 if (ni->ni_lnd->lnd_eager_recv != NULL) {
886 rc = (ni->ni_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
889 CERROR("recv from %s / send to %s aborted: "
890 "eager_recv failed %d\n",
891 libcfs_nid2str(peer->lp_nid),
892 libcfs_id2str(msg->msg_target), rc);
893 LASSERT (rc < 0); /* required by my callers */
902 /* NB: caller shall hold a ref on 'lp' as I'd drop LNET_LOCK */
904 lnet_ni_peer_alive(lnet_peer_t *lp)
906 cfs_time_t last_alive = 0;
907 lnet_ni_t *ni = lp->lp_ni;
909 LASSERT (ni != NULL);
910 LASSERT (ni->ni_peertimeout > 0);
911 LASSERT (ni->ni_lnd->lnd_query != NULL);
914 (ni->ni_lnd->lnd_query)(ni, lp->lp_nid, &last_alive);
917 lp->lp_last_query = cfs_time_current();
919 if (last_alive != 0) /* NI has updated timestamp */
920 lp->lp_last_alive = last_alive;
924 /* NB: always called with LNET_LOCK held */
926 lnet_peer_is_alive (lnet_peer_t *lp, cfs_time_t now)
928 lnet_ni_t *ni = lp->lp_ni;
932 LASSERT (ni != NULL);
933 LASSERT (ni->ni_peertimeout > 0);
935 /* Trust lnet_notify() if it has more recent aliveness news, but
936 * ignore the initial assumed death (see lnet_peers_start_down()).
938 if (!lp->lp_alive && lp->lp_alive_count > 0 &&
939 cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive))
942 deadline = cfs_time_add(lp->lp_last_alive,
943 cfs_time_seconds(ni->ni_peertimeout));
944 alive = cfs_time_after(deadline, now);
946 /* Update obsolete lp_alive */
947 if (alive && !lp->lp_alive && lp->lp_timestamp != 0 &&
948 cfs_time_before(lp->lp_timestamp, lp->lp_last_alive))
949 lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
955 /* NB: returns 1 when alive, 0 when dead, negative when error;
956 * may drop the LNET_LOCK */
958 lnet_peer_alive_locked (lnet_peer_t *lp)
960 lnet_ni_t *ni = lp->lp_ni;
961 cfs_time_t now = cfs_time_current();
963 LASSERT (ni != NULL);
965 if (ni->ni_peertimeout <= 0) /* disabled */
968 if (lnet_peer_is_alive(lp, now))
971 /* Peer appears dead, but we should avoid frequent NI queries (at
972 * most once per lnet_queryinterval seconds). */
973 if (lp->lp_last_query != 0) {
974 static const int lnet_queryinterval = 1;
976 cfs_time_t next_query =
977 cfs_time_add(lp->lp_last_query,
978 cfs_time_seconds(lnet_queryinterval));
980 if (cfs_time_before(now, next_query)) {
982 CWARN("Unexpected aliveness of peer %s: "
984 libcfs_nid2str(lp->lp_nid),
985 (int)now, (int)next_query,
986 lnet_queryinterval, ni->ni_peertimeout);
991 /* query NI for latest aliveness news */
992 lnet_ni_peer_alive(lp);
994 if (lnet_peer_is_alive(lp, now))
997 lnet_notify_locked(lp, 0, 0, lp->lp_last_alive);
1002 lnet_post_send_locked (lnet_msg_t *msg, int do_send)
1004 /* lnet_send is going to LNET_UNLOCK immediately after this, so it sets
1005 * do_send FALSE and I don't do the unlock/send/lock bit. I return
1006 * EAGAIN if msg blocked, EHOSTUNREACH if msg_txpeer appears dead, and
1007 * 0 if sent or OK to send */
1008 lnet_peer_t *lp = msg->msg_txpeer;
1009 lnet_ni_t *ni = lp->lp_ni;
1011 /* non-lnet_send() callers have checked before */
1012 LASSERT (!do_send || msg->msg_delayed);
1013 LASSERT (!msg->msg_receiving);
1015 /* NB 'lp' is always the next hop */
1016 if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
1017 lnet_peer_alive_locked(lp) == 0) {
1020 CDEBUG(D_NETERROR, "Dropping message for %s: peer not alive\n",
1021 libcfs_id2str(msg->msg_target));
1023 lnet_finalize(ni, msg, -EHOSTUNREACH);
1026 return EHOSTUNREACH;
1029 if (!msg->msg_peertxcredit) {
1030 LASSERT ((lp->lp_txcredits < 0) == !list_empty(&lp->lp_txq));
1032 msg->msg_peertxcredit = 1;
1033 lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
1036 if (lp->lp_txcredits < lp->lp_mintxcredits)
1037 lp->lp_mintxcredits = lp->lp_txcredits;
1039 if (lp->lp_txcredits < 0) {
1040 msg->msg_delayed = 1;
1041 list_add_tail (&msg->msg_list, &lp->lp_txq);
1046 if (!msg->msg_txcredit) {
1047 LASSERT ((ni->ni_txcredits < 0) == !list_empty(&ni->ni_txq));
1049 msg->msg_txcredit = 1;
1052 if (ni->ni_txcredits < ni->ni_mintxcredits)
1053 ni->ni_mintxcredits = ni->ni_txcredits;
1055 if (ni->ni_txcredits < 0) {
1056 msg->msg_delayed = 1;
1057 list_add_tail (&msg->msg_list, &ni->ni_txq);
1064 lnet_ni_send(ni, msg);
1072 lnet_commit_routedmsg (lnet_msg_t *msg)
1074 /* ALWAYS called holding the LNET_LOCK */
1075 LASSERT (msg->msg_routing);
1077 the_lnet.ln_counters.msgs_alloc++;
1078 if (the_lnet.ln_counters.msgs_alloc >
1079 the_lnet.ln_counters.msgs_max)
1080 the_lnet.ln_counters.msgs_max =
1081 the_lnet.ln_counters.msgs_alloc;
1083 the_lnet.ln_counters.route_count++;
1084 the_lnet.ln_counters.route_length += msg->msg_len;
1086 LASSERT (!msg->msg_onactivelist);
1087 msg->msg_onactivelist = 1;
1088 list_add (&msg->msg_activelist, &the_lnet.ln_active_msgs);
1092 lnet_msg2bufpool(lnet_msg_t *msg)
1094 lnet_rtrbufpool_t *rbp = &the_lnet.ln_rtrpools[0];
1096 LASSERT (msg->msg_len <= LNET_MTU);
1097 while (msg->msg_len > (unsigned int)rbp->rbp_npages * CFS_PAGE_SIZE) {
1099 LASSERT (rbp < &the_lnet.ln_rtrpools[LNET_NRBPOOLS]);
1106 lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
1108 /* lnet_parse is going to LNET_UNLOCK immediately after this, so it
1109 * sets do_recv FALSE and I don't do the unlock/send/lock bit. I
1110 * return EAGAIN if msg blocked and 0 if received or OK to receive */
1111 lnet_peer_t *lp = msg->msg_rxpeer;
1112 lnet_rtrbufpool_t *rbp;
1115 LASSERT (msg->msg_iov == NULL);
1116 LASSERT (msg->msg_kiov == NULL);
1117 LASSERT (msg->msg_niov == 0);
1118 LASSERT (msg->msg_routing);
1119 LASSERT (msg->msg_receiving);
1120 LASSERT (!msg->msg_sending);
1122 /* non-lnet_parse callers only send delayed messages */
1123 LASSERT (!do_recv || msg->msg_delayed);
1125 if (!msg->msg_peerrtrcredit) {
1126 LASSERT ((lp->lp_rtrcredits < 0) == !list_empty(&lp->lp_rtrq));
1128 msg->msg_peerrtrcredit = 1;
1129 lp->lp_rtrcredits--;
1130 if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
1131 lp->lp_minrtrcredits = lp->lp_rtrcredits;
1133 if (lp->lp_rtrcredits < 0) {
1134 /* must have checked eager_recv before here */
1135 LASSERT (msg->msg_delayed);
1136 list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1141 rbp = lnet_msg2bufpool(msg);
1143 if (!msg->msg_rtrcredit) {
1144 LASSERT ((rbp->rbp_credits < 0) == !list_empty(&rbp->rbp_msgs));
1146 msg->msg_rtrcredit = 1;
1148 if (rbp->rbp_credits < rbp->rbp_mincredits)
1149 rbp->rbp_mincredits = rbp->rbp_credits;
1151 if (rbp->rbp_credits < 0) {
1152 /* must have checked eager_recv before here */
1153 LASSERT (msg->msg_delayed);
1154 list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1159 LASSERT (!list_empty(&rbp->rbp_bufs));
1160 rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
1161 list_del(&rb->rb_list);
1163 msg->msg_niov = rbp->rbp_npages;
1164 msg->msg_kiov = &rb->rb_kiov[0];
1168 lnet_ni_recv(lp->lp_ni, msg->msg_private, msg, 1,
1169 0, msg->msg_len, msg->msg_len);
1177 lnet_return_credits_locked (lnet_msg_t *msg)
1179 lnet_peer_t *txpeer = msg->msg_txpeer;
1180 lnet_peer_t *rxpeer = msg->msg_rxpeer;
1184 if (msg->msg_txcredit) {
1185 /* give back NI txcredits */
1186 msg->msg_txcredit = 0;
1189 LASSERT((ni->ni_txcredits < 0) == !list_empty(&ni->ni_txq));
1192 if (ni->ni_txcredits <= 0) {
1193 msg2 = list_entry(ni->ni_txq.next, lnet_msg_t, msg_list);
1194 list_del(&msg2->msg_list);
1196 LASSERT(msg2->msg_txpeer->lp_ni == ni);
1197 LASSERT(msg2->msg_delayed);
1199 (void) lnet_post_send_locked(msg2, 1);
1203 if (msg->msg_peertxcredit) {
1204 /* give back peer txcredits */
1205 msg->msg_peertxcredit = 0;
1207 LASSERT((txpeer->lp_txcredits < 0) == !list_empty(&txpeer->lp_txq));
1209 txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
1210 LASSERT (txpeer->lp_txqnob >= 0);
1212 txpeer->lp_txcredits++;
1213 if (txpeer->lp_txcredits <= 0) {
1214 msg2 = list_entry(txpeer->lp_txq.next,
1215 lnet_msg_t, msg_list);
1216 list_del(&msg2->msg_list);
1218 LASSERT (msg2->msg_txpeer == txpeer);
1219 LASSERT (msg2->msg_delayed);
1221 (void) lnet_post_send_locked(msg2, 1);
1225 if (txpeer != NULL) {
1226 msg->msg_txpeer = NULL;
1227 lnet_peer_decref_locked(txpeer);
1231 if (msg->msg_rtrcredit) {
1232 /* give back global router credits */
1234 lnet_rtrbufpool_t *rbp;
1236 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1237 * there until it gets one allocated, or aborts the wait
1239 LASSERT (msg->msg_kiov != NULL);
1241 rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
1243 LASSERT (rbp == lnet_msg2bufpool(msg));
1245 msg->msg_kiov = NULL;
1246 msg->msg_rtrcredit = 0;
1248 LASSERT((rbp->rbp_credits < 0) == !list_empty(&rbp->rbp_msgs));
1249 LASSERT((rbp->rbp_credits > 0) == !list_empty(&rbp->rbp_bufs));
1251 list_add(&rb->rb_list, &rbp->rbp_bufs);
1253 if (rbp->rbp_credits <= 0) {
1254 msg2 = list_entry(rbp->rbp_msgs.next,
1255 lnet_msg_t, msg_list);
1256 list_del(&msg2->msg_list);
1258 (void) lnet_post_routed_recv_locked(msg2, 1);
1262 if (msg->msg_peerrtrcredit) {
1263 /* give back peer router credits */
1264 msg->msg_peerrtrcredit = 0;
1266 LASSERT((rxpeer->lp_rtrcredits < 0) == !list_empty(&rxpeer->lp_rtrq));
1268 rxpeer->lp_rtrcredits++;
1269 if (rxpeer->lp_rtrcredits <= 0) {
1270 msg2 = list_entry(rxpeer->lp_rtrq.next,
1271 lnet_msg_t, msg_list);
1272 list_del(&msg2->msg_list);
1274 (void) lnet_post_routed_recv_locked(msg2, 1);
1278 LASSERT (!msg->msg_rtrcredit);
1279 LASSERT (!msg->msg_peerrtrcredit);
1281 if (rxpeer != NULL) {
1282 msg->msg_rxpeer = NULL;
1283 lnet_peer_decref_locked(rxpeer);
1288 lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg)
1290 lnet_nid_t dst_nid = msg->msg_target.nid;
1292 lnet_ni_t *local_ni;
1293 lnet_remotenet_t *rnet;
1294 lnet_route_t *route;
1295 lnet_route_t *best_route;
1296 struct list_head *tmp;
1301 LASSERT (msg->msg_txpeer == NULL);
1302 LASSERT (!msg->msg_sending);
1303 LASSERT (!msg->msg_target_is_router);
1304 LASSERT (!msg->msg_receiving);
1306 msg->msg_sending = 1;
1308 /* NB! ni != NULL == interface pre-determined (ACK/REPLY) */
1312 if (the_lnet.ln_shutdown) {
1317 if (src_nid == LNET_NID_ANY) {
1320 src_ni = lnet_nid2ni_locked(src_nid);
1321 if (src_ni == NULL) {
1323 CERROR("Can't send to %s: src %s is not a local nid\n",
1324 libcfs_nid2str(dst_nid), libcfs_nid2str(src_nid));
1327 LASSERT (!msg->msg_routing);
1330 /* Is this for someone on a local network? */
1331 local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid));
1333 if (local_ni != NULL) {
1334 if (src_ni == NULL) {
1336 src_nid = src_ni->ni_nid;
1337 } else if (src_ni == local_ni) {
1338 lnet_ni_decref_locked(local_ni);
1340 lnet_ni_decref_locked(local_ni);
1341 lnet_ni_decref_locked(src_ni);
1343 CERROR("no route to %s via from %s\n",
1344 libcfs_nid2str(dst_nid), libcfs_nid2str(src_nid));
1348 LASSERT (src_nid != LNET_NID_ANY);
1350 if (!msg->msg_routing)
1351 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1353 if (src_ni == the_lnet.ln_loni) {
1354 /* No send credit hassles with LOLND */
1356 lnet_ni_send(src_ni, msg);
1357 lnet_ni_decref(src_ni);
1361 rc = lnet_nid2peer_locked(&lp, dst_nid);
1362 lnet_ni_decref_locked(src_ni); /* lp has ref on src_ni; lose mine */
1365 CERROR("Error %d finding peer %s\n", rc,
1366 libcfs_nid2str(dst_nid));
1367 /* ENOMEM or shutting down */
1370 LASSERT (lp->lp_ni == src_ni);
1376 * - once application finishes computation, check here to update
1377 * router states before it waits for pending IO in LNetEQPoll
1378 * - recursion breaker: router checker sends no message
1379 * to remote networks */
1380 if (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING)
1381 lnet_router_checker();
1385 /* sending to a remote network */
1386 rnet = lnet_find_net_locked(LNET_NIDNET(dst_nid));
1389 lnet_ni_decref_locked(src_ni);
1391 CERROR("No route to %s\n", libcfs_id2str(msg->msg_target));
1392 return -EHOSTUNREACH;
1395 /* Find the best gateway I can use */
1398 list_for_each(tmp, &rnet->lrn_routes) {
1399 route = list_entry(tmp, lnet_route_t, lr_list);
1400 lp2 = route->lr_gateway;
1402 if (lp2->lp_alive &&
1403 lnet_router_down_ni(lp2, rnet->lrn_net) <= 0 &&
1404 (src_ni == NULL || lp2->lp_ni == src_ni) &&
1405 (lp == NULL || lnet_compare_routers(lp2, lp) > 0)) {
1413 lnet_ni_decref_locked(src_ni);
1415 CERROR("No route to %s (all routers down)\n",
1416 libcfs_id2str(msg->msg_target));
1417 return -EHOSTUNREACH;
1420 /* Place selected route at the end of the route list to ensure
1421 * fairness; everything else being equal... */
1422 list_del(&best_route->lr_list);
1423 list_add_tail(&best_route->lr_list, &rnet->lrn_routes);
1425 if (src_ni == NULL) {
1427 src_nid = src_ni->ni_nid;
1429 LASSERT (src_ni == lp->lp_ni);
1430 lnet_ni_decref_locked(src_ni);
1433 lnet_peer_addref_locked(lp);
1435 LASSERT (src_nid != LNET_NID_ANY);
1437 if (!msg->msg_routing) {
1438 /* I'm the source and now I know which NI to send on */
1439 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1442 msg->msg_target_is_router = 1;
1443 msg->msg_target.nid = lp->lp_nid;
1444 msg->msg_target.pid = LUSTRE_SRV_LNET_PID;
1447 /* 'lp' is our best choice of peer */
1449 LASSERT (!msg->msg_peertxcredit);
1450 LASSERT (!msg->msg_txcredit);
1451 LASSERT (msg->msg_txpeer == NULL);
1453 msg->msg_txpeer = lp; /* msg takes my ref on lp */
1455 rc = lnet_post_send_locked(msg, 0);
1458 if (rc == EHOSTUNREACH)
1459 return -EHOSTUNREACH;
1462 lnet_ni_send(src_ni, msg);
1468 lnet_commit_md (lnet_libmd_t *md, lnet_msg_t *msg)
1470 /* ALWAYS called holding the LNET_LOCK */
1471 /* Here, we commit the MD to a network OP by marking it busy and
1472 * decrementing its threshold. Come what may, the network "owns"
1473 * the MD until a call to lnet_finalize() signals completion. */
1474 LASSERT (!msg->msg_routing);
1479 if (md->md_threshold != LNET_MD_THRESH_INF) {
1480 LASSERT (md->md_threshold > 0);
1484 the_lnet.ln_counters.msgs_alloc++;
1485 if (the_lnet.ln_counters.msgs_alloc >
1486 the_lnet.ln_counters.msgs_max)
1487 the_lnet.ln_counters.msgs_max =
1488 the_lnet.ln_counters.msgs_alloc;
1490 LASSERT (!msg->msg_onactivelist);
1491 msg->msg_onactivelist = 1;
1492 list_add (&msg->msg_activelist, &the_lnet.ln_active_msgs);
1496 lnet_drop_message (lnet_ni_t *ni, void *private, unsigned int nob)
1499 the_lnet.ln_counters.drop_count++;
1500 the_lnet.ln_counters.drop_length += nob;
1503 lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
1507 lnet_drop_delayed_put(lnet_msg_t *msg, char *reason)
1509 lnet_process_id_t id = {0};
1511 id.nid = msg->msg_hdr.src_nid;
1512 id.pid = msg->msg_hdr.src_pid;
1514 LASSERT (msg->msg_md == NULL);
1515 LASSERT (msg->msg_delayed);
1516 LASSERT (msg->msg_rxpeer != NULL);
1517 LASSERT (msg->msg_hdr.type == LNET_MSG_PUT);
1519 CWARN("Dropping delayed PUT from %s portal %d match "LPU64
1520 " offset %d length %d: %s\n",
1522 msg->msg_hdr.msg.put.ptl_index,
1523 msg->msg_hdr.msg.put.match_bits,
1524 msg->msg_hdr.msg.put.offset,
1525 msg->msg_hdr.payload_length,
1528 /* NB I can't drop msg's ref on msg_rxpeer until after I've
1529 * called lnet_drop_message(), so I just hang onto msg as well
1530 * until that's done */
1532 lnet_drop_message(msg->msg_rxpeer->lp_ni,
1533 msg->msg_private, msg->msg_len);
1537 lnet_peer_decref_locked(msg->msg_rxpeer);
1538 msg->msg_rxpeer = NULL;
1546 LNetSetLazyPortal(int portal)
1548 lnet_portal_t *ptl = &the_lnet.ln_portals[portal];
1550 if (portal < 0 || portal >= the_lnet.ln_nportals)
1553 CDEBUG(D_NET, "Setting portal %d lazy\n", portal);
1557 ptl->ptl_options |= LNET_PTL_LAZY;
1565 LNetClearLazyPortal(int portal)
1567 struct list_head zombies;
1568 lnet_portal_t *ptl = &the_lnet.ln_portals[portal];
1571 if (portal < 0 || portal >= the_lnet.ln_nportals)
1576 if ((ptl->ptl_options & LNET_PTL_LAZY) == 0) {
1581 if (the_lnet.ln_shutdown)
1582 CWARN ("Active lazy portal %d on exit\n", portal);
1584 CDEBUG (D_NET, "clearing portal %d lazy\n", portal);
1586 /* grab all the blocked messages atomically */
1587 list_add(&zombies, &ptl->ptl_msgq);
1588 list_del_init(&ptl->ptl_msgq);
1590 ptl->ptl_msgq_version++;
1591 ptl->ptl_options &= ~LNET_PTL_LAZY;
1595 while (!list_empty(&zombies)) {
1596 msg = list_entry(zombies.next, lnet_msg_t, msg_list);
1597 list_del(&msg->msg_list);
1599 lnet_drop_delayed_put(msg, "Clearing lazy portal attr");
1606 lnet_recv_put(lnet_libmd_t *md, lnet_msg_t *msg, int delayed,
1607 unsigned int offset, unsigned int mlength)
1609 lnet_hdr_t *hdr = &msg->msg_hdr;
1613 the_lnet.ln_counters.recv_count++;
1614 the_lnet.ln_counters.recv_length += mlength;
1619 lnet_setpayloadbuffer(msg);
1621 msg->msg_ev.type = LNET_EVENT_PUT;
1622 msg->msg_ev.target.pid = hdr->dest_pid;
1623 msg->msg_ev.target.nid = hdr->dest_nid;
1624 msg->msg_ev.hdr_data = hdr->msg.put.hdr_data;
1626 /* Must I ACK? If so I'll grab the ack_wmd out of the header and put
1627 * it back into the ACK during lnet_finalize() */
1628 msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
1629 (md->md_options & LNET_MD_ACK_DISABLE) == 0);
1631 lnet_ni_recv(msg->msg_rxpeer->lp_ni,
1633 msg, delayed, offset, mlength,
1634 hdr->payload_length);
1637 /* called with LNET_LOCK held */
1639 lnet_match_blocked_msg(lnet_libmd_t *md)
1641 CFS_LIST_HEAD (drops);
1642 CFS_LIST_HEAD (matches);
1643 struct list_head *tmp;
1644 struct list_head *entry;
1646 lnet_me_t *me = md->md_me;
1647 lnet_portal_t *ptl = &the_lnet.ln_portals[me->me_portal];
1649 LASSERT (me->me_portal < (unsigned int)the_lnet.ln_nportals);
1651 if ((ptl->ptl_options & LNET_PTL_LAZY) == 0) {
1652 LASSERT (list_empty(&ptl->ptl_msgq));
1656 LASSERT (md->md_refcount == 0); /* a brand new MD */
1658 list_for_each_safe (entry, tmp, &ptl->ptl_msgq) {
1661 unsigned int mlength;
1662 unsigned int offset;
1664 lnet_process_id_t src;
1666 msg = list_entry(entry, lnet_msg_t, msg_list);
1668 LASSERT (msg->msg_delayed);
1670 hdr = &msg->msg_hdr;
1671 index = hdr->msg.put.ptl_index;
1673 src.nid = hdr->src_nid;
1674 src.pid = hdr->src_pid;
1676 rc = lnet_try_match_md(index, LNET_MD_OP_PUT, src,
1677 hdr->payload_length,
1678 hdr->msg.put.offset,
1679 hdr->msg.put.match_bits,
1680 md, msg, &mlength, &offset);
1682 if (rc == LNET_MATCHMD_NONE)
1685 /* Hurrah! This _is_ a match */
1686 list_del(&msg->msg_list);
1687 ptl->ptl_msgq_version++;
1689 if (rc == LNET_MATCHMD_OK) {
1690 list_add_tail(&msg->msg_list, &matches);
1692 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
1693 "match "LPU64" offset %d length %d.\n",
1695 hdr->msg.put.ptl_index,
1696 hdr->msg.put.match_bits,
1697 hdr->msg.put.offset,
1698 hdr->payload_length);
1700 LASSERT (rc == LNET_MATCHMD_DROP);
1702 list_add_tail(&msg->msg_list, &drops);
1705 if (lnet_md_exhausted(md))
1711 list_for_each_safe (entry, tmp, &drops) {
1712 msg = list_entry(entry, lnet_msg_t, msg_list);
1714 list_del(&msg->msg_list);
1716 lnet_drop_delayed_put(msg, "Bad match");
1719 list_for_each_safe (entry, tmp, &matches) {
1720 msg = list_entry(entry, lnet_msg_t, msg_list);
1722 list_del(&msg->msg_list);
1724 /* md won't disappear under me, since each msg
1725 * holds a ref on it */
1726 lnet_recv_put(md, msg, 1,
1728 msg->msg_ev.mlength);
1735 lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
1740 lnet_hdr_t *hdr = &msg->msg_hdr;
1741 unsigned int rlength = hdr->payload_length;
1742 unsigned int mlength = 0;
1743 unsigned int offset = 0;
1744 lnet_process_id_t src= {0};
1748 src.nid = hdr->src_nid;
1749 src.pid = hdr->src_pid;
1751 /* Convert put fields to host byte order */
1752 hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
1753 hdr->msg.put.ptl_index = le32_to_cpu(hdr->msg.put.ptl_index);
1754 hdr->msg.put.offset = le32_to_cpu(hdr->msg.put.offset);
1756 index = hdr->msg.put.ptl_index;
1757 ptl = &the_lnet.ln_portals[index];
1762 rc = lnet_match_md(index, LNET_MD_OP_PUT, src,
1763 rlength, hdr->msg.put.offset,
1764 hdr->msg.put.match_bits, msg,
1765 &mlength, &offset, &md);
1770 case LNET_MATCHMD_OK:
1772 lnet_recv_put(md, msg, msg->msg_delayed, offset, mlength);
1775 case LNET_MATCHMD_NONE:
1776 version = ptl->ptl_ml_version;
1779 if (!msg->msg_delayed)
1780 rc = lnet_eager_recv_locked(msg);
1783 !the_lnet.ln_shutdown &&
1784 ((ptl->ptl_options & LNET_PTL_LAZY) != 0)) {
1785 if (version != ptl->ptl_ml_version)
1788 list_add_tail(&msg->msg_list, &ptl->ptl_msgq);
1789 ptl->ptl_msgq_version++;
1792 CDEBUG(D_NET, "Delaying PUT from %s portal %d match "
1793 LPU64" offset %d length %d: no match \n",
1794 libcfs_id2str(src), index,
1795 hdr->msg.put.match_bits,
1796 hdr->msg.put.offset, rlength);
1801 case LNET_MATCHMD_DROP:
1803 "Dropping PUT from %s portal %d match "LPU64
1804 " offset %d length %d: %d\n",
1805 libcfs_id2str(src), index,
1806 hdr->msg.put.match_bits,
1807 hdr->msg.put.offset, rlength, rc);
1810 return ENOENT; /* +ve: OK but no match */
1815 lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
1817 lnet_hdr_t *hdr = &msg->msg_hdr;
1818 unsigned int mlength = 0;
1819 unsigned int offset = 0;
1820 lnet_process_id_t src = {0};
1821 lnet_handle_wire_t reply_wmd;
1825 src.nid = hdr->src_nid;
1826 src.pid = hdr->src_pid;
1828 /* Convert get fields to host byte order */
1829 hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits);
1830 hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index);
1831 hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
1832 hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset);
1836 rc = lnet_match_md(hdr->msg.get.ptl_index, LNET_MD_OP_GET, src,
1837 hdr->msg.get.sink_length, hdr->msg.get.src_offset,
1838 hdr->msg.get.match_bits, msg,
1839 &mlength, &offset, &md);
1840 if (rc == LNET_MATCHMD_DROP) {
1842 "Dropping GET from %s portal %d match "LPU64
1843 " offset %d length %d\n",
1845 hdr->msg.get.ptl_index,
1846 hdr->msg.get.match_bits,
1847 hdr->msg.get.src_offset,
1848 hdr->msg.get.sink_length);
1850 return ENOENT; /* +ve: OK but no match */
1853 LASSERT (rc == LNET_MATCHMD_OK);
1855 the_lnet.ln_counters.send_count++;
1856 the_lnet.ln_counters.send_length += mlength;
1860 msg->msg_ev.type = LNET_EVENT_GET;
1861 msg->msg_ev.target.pid = hdr->dest_pid;
1862 msg->msg_ev.target.nid = hdr->dest_nid;
1863 msg->msg_ev.hdr_data = 0;
1865 reply_wmd = hdr->msg.get.return_wmd;
1867 lnet_prep_send(msg, LNET_MSG_REPLY, src, offset, mlength);
1869 msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
1872 /* The LND completes the REPLY from her recv procedure */
1873 lnet_ni_recv(ni, msg->msg_private, msg, 0,
1874 msg->msg_offset, msg->msg_len, msg->msg_len);
1878 lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
1879 msg->msg_receiving = 0;
1881 rc = lnet_send(ni->ni_nid, msg);
1883 /* didn't get as far as lnet_ni_send() */
1884 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
1885 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), rc);
1887 lnet_finalize(ni, msg, rc);
1894 lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
1896 void *private = msg->msg_private;
1897 lnet_hdr_t *hdr = &msg->msg_hdr;
1898 lnet_process_id_t src = {0};
1905 src.nid = hdr->src_nid;
1906 src.pid = hdr->src_pid;
1908 /* NB handles only looked up by creator (no flips) */
1909 md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
1910 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
1911 CDEBUG(D_NETERROR, "%s: Dropping REPLY from %s for %s "
1912 "MD "LPX64"."LPX64"\n",
1913 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1914 (md == NULL) ? "invalid" : "inactive",
1915 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1916 hdr->msg.reply.dst_wmd.wh_object_cookie);
1917 if (md != NULL && md->md_me != NULL)
1918 CERROR("REPLY MD also attached to portal %d\n",
1919 md->md_me->me_portal);
1922 return ENOENT; /* +ve: OK but no match */
1925 LASSERT (md->md_offset == 0);
1927 rlength = hdr->payload_length;
1928 mlength = MIN(rlength, (int)md->md_length);
1930 if (mlength < rlength &&
1931 (md->md_options & LNET_MD_TRUNCATE) == 0) {
1932 CDEBUG(D_NETERROR, "%s: Dropping REPLY from %s length %d "
1933 "for MD "LPX64" would overflow (%d)\n",
1934 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1935 rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
1938 return ENOENT; /* +ve: OK but no match */
1941 CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md "LPX64"\n",
1942 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1943 mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
1945 lnet_commit_md(md, msg);
1948 lnet_setpayloadbuffer(msg);
1950 msg->msg_ev.type = LNET_EVENT_REPLY;
1951 msg->msg_ev.target.pid = hdr->dest_pid;
1952 msg->msg_ev.target.nid = hdr->dest_nid;
1953 msg->msg_ev.initiator = src;
1954 msg->msg_ev.rlength = rlength;
1955 msg->msg_ev.mlength = mlength;
1956 msg->msg_ev.offset = 0;
1958 lnet_md_deconstruct(md, &msg->msg_ev.md);
1959 lnet_md2handle(&msg->msg_ev.md_handle, md);
1961 the_lnet.ln_counters.recv_count++;
1962 the_lnet.ln_counters.recv_length += mlength;
1966 lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
1971 lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
1973 lnet_hdr_t *hdr = &msg->msg_hdr;
1974 lnet_process_id_t src = {0};
1977 src.nid = hdr->src_nid;
1978 src.pid = hdr->src_pid;
1980 /* Convert ack fields to host byte order */
1981 hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
1982 hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
1986 /* NB handles only looked up by creator (no flips) */
1987 md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
1988 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
1989 /* Don't moan; this is expected */
1991 "%s: Dropping ACK from %s to %s MD "LPX64"."LPX64"\n",
1992 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1993 (md == NULL) ? "invalid" : "inactive",
1994 hdr->msg.ack.dst_wmd.wh_interface_cookie,
1995 hdr->msg.ack.dst_wmd.wh_object_cookie);
1996 if (md != NULL && md->md_me != NULL)
1997 CERROR("Source MD also attached to portal %d\n",
1998 md->md_me->me_portal);
2001 return ENOENT; /* +ve! */
2004 CDEBUG(D_NET, "%s: ACK from %s into md "LPX64"\n",
2005 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
2006 hdr->msg.ack.dst_wmd.wh_object_cookie);
2008 lnet_commit_md(md, msg);
2010 msg->msg_ev.type = LNET_EVENT_ACK;
2011 msg->msg_ev.target.pid = hdr->dest_pid;
2012 msg->msg_ev.target.nid = hdr->dest_nid;
2013 msg->msg_ev.initiator = src;
2014 msg->msg_ev.mlength = hdr->msg.ack.mlength;
2015 msg->msg_ev.match_bits = hdr->msg.ack.match_bits;
2017 lnet_md_deconstruct(md, &msg->msg_ev.md);
2018 lnet_md2handle(&msg->msg_ev.md_handle, md);
2020 the_lnet.ln_counters.recv_count++;
2024 lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
2029 lnet_msgtyp2str (int type)
2038 case LNET_MSG_REPLY:
2040 case LNET_MSG_HELLO:
2043 return ("<UNKNOWN>");
2048 lnet_print_hdr(lnet_hdr_t * hdr)
2050 lnet_process_id_t src = {0};
2051 lnet_process_id_t dst = {0};
2052 char *type_str = lnet_msgtyp2str (hdr->type);
2054 src.nid = hdr->src_nid;
2055 src.pid = hdr->src_pid;
2057 dst.nid = hdr->dest_nid;
2058 dst.pid = hdr->dest_pid;
2060 CWARN("P3 Header at %p of type %s\n", hdr, type_str);
2061 CWARN(" From %s\n", libcfs_id2str(src));
2062 CWARN(" To %s\n", libcfs_id2str(dst));
2064 switch (hdr->type) {
2069 CWARN(" Ptl index %d, ack md "LPX64"."LPX64", "
2070 "match bits "LPU64"\n",
2071 hdr->msg.put.ptl_index,
2072 hdr->msg.put.ack_wmd.wh_interface_cookie,
2073 hdr->msg.put.ack_wmd.wh_object_cookie,
2074 hdr->msg.put.match_bits);
2075 CWARN(" Length %d, offset %d, hdr data "LPX64"\n",
2076 hdr->payload_length, hdr->msg.put.offset,
2077 hdr->msg.put.hdr_data);
2081 CWARN(" Ptl index %d, return md "LPX64"."LPX64", "
2082 "match bits "LPU64"\n", hdr->msg.get.ptl_index,
2083 hdr->msg.get.return_wmd.wh_interface_cookie,
2084 hdr->msg.get.return_wmd.wh_object_cookie,
2085 hdr->msg.get.match_bits);
2086 CWARN(" Length %d, src offset %d\n",
2087 hdr->msg.get.sink_length,
2088 hdr->msg.get.src_offset);
2092 CWARN(" dst md "LPX64"."LPX64", "
2093 "manipulated length %d\n",
2094 hdr->msg.ack.dst_wmd.wh_interface_cookie,
2095 hdr->msg.ack.dst_wmd.wh_object_cookie,
2096 hdr->msg.ack.mlength);
2099 case LNET_MSG_REPLY:
2100 CWARN(" dst md "LPX64"."LPX64", "
2102 hdr->msg.reply.dst_wmd.wh_interface_cookie,
2103 hdr->msg.reply.dst_wmd.wh_object_cookie,
2104 hdr->payload_length);
2110 lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
2111 void *private, int rdma_req)
2116 lnet_pid_t dest_pid;
2117 lnet_nid_t dest_nid;
2119 __u32 payload_length;
2122 LASSERT (!in_interrupt ());
2124 type = le32_to_cpu(hdr->type);
2125 src_nid = le64_to_cpu(hdr->src_nid);
2126 dest_nid = le64_to_cpu(hdr->dest_nid);
2127 dest_pid = le32_to_cpu(hdr->dest_pid);
2128 payload_length = le32_to_cpu(hdr->payload_length);
2130 for_me = (ni->ni_nid == dest_nid);
2135 if (payload_length > 0) {
2136 CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
2137 libcfs_nid2str(from_nid),
2138 libcfs_nid2str(src_nid),
2139 lnet_msgtyp2str(type), payload_length);
2145 case LNET_MSG_REPLY:
2146 if (payload_length > (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
2147 CERROR("%s, src %s: bad %s payload %d "
2148 "(%d max expected)\n",
2149 libcfs_nid2str(from_nid),
2150 libcfs_nid2str(src_nid),
2151 lnet_msgtyp2str(type),
2153 for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
2159 CERROR("%s, src %s: Bad message type 0x%x\n",
2160 libcfs_nid2str(from_nid),
2161 libcfs_nid2str(src_nid), type);
2165 if (the_lnet.ln_routing) {
2166 cfs_time_t now = cfs_time_current();
2170 ni->ni_last_alive = now;
2171 if (ni->ni_status != NULL &&
2172 ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
2173 ni->ni_status->ns_status = LNET_NI_STATUS_UP;
2178 /* Regard a bad destination NID as a protocol error. Senders should
2179 * know what they're doing; if they don't they're misconfigured, buggy
2180 * or malicious so we chop them off at the knees :) */
2183 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
2184 /* should have gone direct */
2185 CERROR ("%s, src %s: Bad dest nid %s "
2186 "(should have been sent direct)\n",
2187 libcfs_nid2str(from_nid),
2188 libcfs_nid2str(src_nid),
2189 libcfs_nid2str(dest_nid));
2193 if (lnet_islocalnid(dest_nid)) {
2194 /* dest is another local NI; sender should have used
2195 * this node's NID on its own network */
2196 CERROR ("%s, src %s: Bad dest nid %s "
2197 "(it's my nid but on a different network)\n",
2198 libcfs_nid2str(from_nid),
2199 libcfs_nid2str(src_nid),
2200 libcfs_nid2str(dest_nid));
2204 if (rdma_req && type == LNET_MSG_GET) {
2205 CERROR ("%s, src %s: Bad optimized GET for %s "
2206 "(final destination must be me)\n",
2207 libcfs_nid2str(from_nid),
2208 libcfs_nid2str(src_nid),
2209 libcfs_nid2str(dest_nid));
2213 if (!the_lnet.ln_routing) {
2214 CERROR ("%s, src %s: Dropping message for %s "
2215 "(routing not enabled)\n",
2216 libcfs_nid2str(from_nid),
2217 libcfs_nid2str(src_nid),
2218 libcfs_nid2str(dest_nid));
2223 /* Message looks OK; we're not going to return an error, so we MUST
2224 * call back lnd_recv() come what may... */
2226 if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
2227 fail_peer (src_nid, 0)) /* shall we now? */
2229 CERROR("%s, src %s: Dropping %s to simulate failure\n",
2230 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2231 lnet_msgtyp2str(type));
2235 msg = lnet_msg_alloc();
2237 CERROR("%s, src %s: Dropping %s (out of memory)\n",
2238 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2239 lnet_msgtyp2str(type));
2243 /* msg zeroed in lnet_msg_alloc; i.e. flags all clear, pointers NULL etc */
2245 msg->msg_type = type;
2246 msg->msg_private = private;
2247 msg->msg_receiving = 1;
2248 msg->msg_len = msg->msg_wanted = payload_length;
2249 msg->msg_offset = 0;
2250 msg->msg_hdr = *hdr;
2253 rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid);
2256 CERROR("%s, src %s: Dropping %s "
2257 "(error %d looking up sender)\n",
2258 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2259 lnet_msgtyp2str(type), rc);
2268 msg->msg_target.pid = dest_pid;
2269 msg->msg_target.nid = dest_nid;
2270 msg->msg_routing = 1;
2271 msg->msg_offset = 0;
2274 if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
2275 lnet_msg2bufpool(msg)->rbp_credits <= 0) {
2276 rc = lnet_eager_recv_locked(msg);
2282 lnet_commit_routedmsg(msg);
2283 rc = lnet_post_routed_recv_locked(msg, 0);
2287 lnet_ni_recv(ni, msg->msg_private, msg, 0,
2288 0, payload_length, payload_length);
2292 /* convert common msg->hdr fields to host byteorder */
2293 msg->msg_hdr.type = type;
2294 msg->msg_hdr.src_nid = src_nid;
2295 msg->msg_hdr.src_pid = le32_to_cpu(msg->msg_hdr.src_pid);
2296 msg->msg_hdr.dest_nid = dest_nid;
2297 msg->msg_hdr.dest_pid = dest_pid;
2298 msg->msg_hdr.payload_length = payload_length;
2300 msg->msg_ev.sender = from_nid;
2304 rc = lnet_parse_ack(ni, msg);
2307 rc = lnet_parse_put(ni, msg);
2310 rc = lnet_parse_get(ni, msg, rdma_req);
2312 case LNET_MSG_REPLY:
2313 rc = lnet_parse_reply(ni, msg);
2317 goto free_drop; /* prevent an unused label if !kernel */
2323 LASSERT (rc == ENOENT);
2326 LASSERT (msg->msg_md == NULL);
2328 if (msg->msg_rxpeer != NULL) {
2329 lnet_peer_decref_locked(msg->msg_rxpeer);
2330 msg->msg_rxpeer = NULL;
2332 lnet_msg_free(msg); /* expects LNET_LOCK held */
2336 lnet_drop_message(ni, private, payload_length);
2341 LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
2342 lnet_process_id_t target, unsigned int portal,
2343 __u64 match_bits, unsigned int offset,
2350 LASSERT (the_lnet.ln_init);
2351 LASSERT (the_lnet.ln_refcount > 0);
2353 if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
2354 fail_peer (target.nid, 1)) /* shall we now? */
2356 CERROR("Dropping PUT to %s: simulated failure\n",
2357 libcfs_id2str(target));
2361 msg = lnet_msg_alloc();
2363 CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
2364 libcfs_id2str(target));
2370 md = lnet_handle2md(&mdh);
2371 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2374 CERROR("Dropping PUT ("LPU64":%d:%s): MD (%d) invalid\n",
2375 match_bits, portal, libcfs_id2str(target),
2376 md == NULL ? -1 : md->md_threshold);
2377 if (md != NULL && md->md_me != NULL)
2378 CERROR("Source MD also attached to portal %d\n",
2379 md->md_me->me_portal);
2385 CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
2387 lnet_commit_md(md, msg);
2389 lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
2391 msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
2392 msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
2393 msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
2394 msg->msg_hdr.msg.put.hdr_data = hdr_data;
2396 /* NB handles only looked up by creator (no flips) */
2397 if (ack == LNET_ACK_REQ) {
2398 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2399 the_lnet.ln_interface_cookie;
2400 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2401 md->md_lh.lh_cookie;
2403 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2404 LNET_WIRE_HANDLE_COOKIE_NONE;
2405 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2406 LNET_WIRE_HANDLE_COOKIE_NONE;
2409 msg->msg_ev.type = LNET_EVENT_SEND;
2410 msg->msg_ev.initiator.nid = LNET_NID_ANY;
2411 msg->msg_ev.initiator.pid = the_lnet.ln_pid;
2412 msg->msg_ev.target = target;
2413 msg->msg_ev.sender = LNET_NID_ANY;
2414 msg->msg_ev.pt_index = portal;
2415 msg->msg_ev.match_bits = match_bits;
2416 msg->msg_ev.rlength = md->md_length;
2417 msg->msg_ev.mlength = md->md_length;
2418 msg->msg_ev.offset = offset;
2419 msg->msg_ev.hdr_data = hdr_data;
2421 lnet_md_deconstruct(md, &msg->msg_ev.md);
2422 lnet_md2handle(&msg->msg_ev.md_handle, md);
2424 the_lnet.ln_counters.send_count++;
2425 the_lnet.ln_counters.send_length += md->md_length;
2429 rc = lnet_send(self, msg);
2431 CERROR("Error sending PUT to %s: %d\n",
2432 libcfs_id2str(target), rc);
2433 lnet_finalize (NULL, msg, rc);
2436 /* completion will be signalled by an event */
2441 lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *getmsg)
2443 /* The LND can DMA direct to the GET md (i.e. no REPLY msg). This
2444 * returns a msg for the LND to pass to lnet_finalize() when the sink
2445 * data has been received.
2447 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
2448 * lnet_finalize() is called on it, so the LND must call this first */
2450 lnet_msg_t *msg = lnet_msg_alloc();
2451 lnet_libmd_t *getmd = getmsg->msg_md;
2452 lnet_process_id_t peer_id = getmsg->msg_target;
2454 LASSERT (!getmsg->msg_target_is_router);
2455 LASSERT (!getmsg->msg_routing);
2459 LASSERT (getmd->md_refcount > 0);
2462 CERROR ("%s: Dropping REPLY from %s: can't allocate msg\n",
2463 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
2467 if (getmd->md_threshold == 0) {
2468 CERROR ("%s: Dropping REPLY from %s for inactive MD %p\n",
2469 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
2474 LASSERT (getmd->md_offset == 0);
2476 CDEBUG(D_NET, "%s: Reply from %s md %p\n",
2477 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
2479 lnet_commit_md (getmd, msg);
2481 msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
2483 msg->msg_ev.type = LNET_EVENT_REPLY;
2484 msg->msg_ev.initiator = peer_id;
2485 msg->msg_ev.sender = peer_id.nid; /* optimized GETs can't be routed */
2486 msg->msg_ev.rlength = msg->msg_ev.mlength = getmd->md_length;
2487 msg->msg_ev.offset = 0;
2489 lnet_md_deconstruct(getmd, &msg->msg_ev.md);
2490 lnet_md2handle(&msg->msg_ev.md_handle, getmd);
2492 the_lnet.ln_counters.recv_count++;
2493 the_lnet.ln_counters.recv_length += getmd->md_length;
2502 the_lnet.ln_counters.drop_count++;
2503 the_lnet.ln_counters.drop_length += getmd->md_length;
2511 lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
2513 /* Set the REPLY length, now the RDMA that elides the REPLY message has
2514 * completed and I know it. */
2515 LASSERT (reply != NULL);
2516 LASSERT (reply->msg_type == LNET_MSG_GET);
2517 LASSERT (reply->msg_ev.type == LNET_EVENT_REPLY);
2519 /* NB I trusted my peer to RDMA. If she tells me she's written beyond
2520 * the end of my buffer, I might as well be dead. */
2521 LASSERT (len <= reply->msg_ev.mlength);
2523 reply->msg_ev.mlength = len;
2527 LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
2528 lnet_process_id_t target, unsigned int portal,
2529 __u64 match_bits, unsigned int offset)
2535 LASSERT (the_lnet.ln_init);
2536 LASSERT (the_lnet.ln_refcount > 0);
2538 if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
2539 fail_peer (target.nid, 1)) /* shall we now? */
2541 CERROR("Dropping GET to %s: simulated failure\n",
2542 libcfs_id2str(target));
2546 msg = lnet_msg_alloc();
2548 CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
2549 libcfs_id2str(target));
2555 md = lnet_handle2md(&mdh);
2556 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2559 CERROR("Dropping GET ("LPU64":%d:%s): MD (%d) invalid\n",
2560 match_bits, portal, libcfs_id2str(target),
2561 md == NULL ? -1 : md->md_threshold);
2562 if (md != NULL && md->md_me != NULL)
2563 CERROR("REPLY MD also attached to portal %d\n",
2564 md->md_me->me_portal);
2570 CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
2572 lnet_commit_md(md, msg);
2574 lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
2576 msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
2577 msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
2578 msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
2579 msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
2581 /* NB handles only looked up by creator (no flips) */
2582 msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
2583 the_lnet.ln_interface_cookie;
2584 msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
2585 md->md_lh.lh_cookie;
2587 msg->msg_ev.type = LNET_EVENT_SEND;
2588 msg->msg_ev.initiator.nid = LNET_NID_ANY;
2589 msg->msg_ev.initiator.pid = the_lnet.ln_pid;
2590 msg->msg_ev.target = target;
2591 msg->msg_ev.sender = LNET_NID_ANY;
2592 msg->msg_ev.pt_index = portal;
2593 msg->msg_ev.match_bits = match_bits;
2594 msg->msg_ev.rlength = md->md_length;
2595 msg->msg_ev.mlength = md->md_length;
2596 msg->msg_ev.offset = offset;
2597 msg->msg_ev.hdr_data = 0;
2599 lnet_md_deconstruct(md, &msg->msg_ev.md);
2600 lnet_md2handle(&msg->msg_ev.md_handle, md);
2602 the_lnet.ln_counters.send_count++;
2606 rc = lnet_send(self, msg);
2608 CERROR("error sending GET to %s: %d\n",
2609 libcfs_id2str(target), rc);
2610 lnet_finalize (NULL, msg, rc);
2613 /* completion will be signalled by an event */
2618 LNetDist (lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
2620 struct list_head *e;
2622 lnet_route_t *route;
2623 lnet_remotenet_t *rnet;
2624 __u32 dstnet = LNET_NIDNET(dstnid);
2628 /* if !local_nid_dist_zero, I don't return a distance of 0 ever
2629 * (when lustre sees a distance of 0, it substitutes 0@lo), so I
2630 * keep order 0 free for 0@lo and order 1 free for a local NID
2633 LASSERT (the_lnet.ln_init);
2634 LASSERT (the_lnet.ln_refcount > 0);
2638 list_for_each (e, &the_lnet.ln_nis) {
2639 ni = list_entry(e, lnet_ni_t, ni_list);
2641 if (ni->ni_nid == dstnid) {
2642 if (srcnidp != NULL)
2644 if (orderp != NULL) {
2645 if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
2652 return local_nid_dist_zero ? 0 : 1;
2655 if (LNET_NIDNET(ni->ni_nid) == dstnet) {
2656 if (srcnidp != NULL)
2657 *srcnidp = ni->ni_nid;
2667 list_for_each (e, &the_lnet.ln_remote_nets) {
2668 rnet = list_entry(e, lnet_remotenet_t, lrn_list);
2670 if (rnet->lrn_net == dstnet) {
2671 LASSERT (!list_empty(&rnet->lrn_routes));
2672 route = list_entry(rnet->lrn_routes.next,
2673 lnet_route_t, lr_list);
2674 hops = rnet->lrn_hops;
2675 if (srcnidp != NULL)
2676 *srcnidp = route->lr_gateway->lp_ni->ni_nid;
2686 return -EHOSTUNREACH;
2690 LNetSetAsync(lnet_process_id_t id, int nasync)
2696 lnet_remotenet_t *rnet;
2697 struct list_head *tmp;
2698 lnet_route_t *route;
2705 /* Target on a local network? */
2707 ni = lnet_net2ni(LNET_NIDNET(id.nid));
2709 if (ni->ni_lnd->lnd_setasync != NULL)
2710 rc = (ni->ni_lnd->lnd_setasync)(ni, id, nasync);
2715 /* Target on a remote network: apply to routers */
2717 LIBCFS_ALLOC(nids, maxnids * sizeof(*nids));
2722 /* Snapshot all the router NIDs */
2724 rnet = lnet_find_net_locked(LNET_NIDNET(id.nid));
2726 list_for_each(tmp, &rnet->lrn_routes) {
2727 if (nnids == maxnids) {
2729 LIBCFS_FREE(nids, maxnids * sizeof(*nids));
2734 route = list_entry(tmp, lnet_route_t, lr_list);
2735 nids[nnids++] = route->lr_gateway->lp_nid;
2740 /* set async on all the routers */
2741 while (nnids-- > 0) {
2742 id.pid = LUSTRE_SRV_LNET_PID;
2743 id.nid = nids[nnids];
2745 ni = lnet_net2ni(LNET_NIDNET(id.nid));
2749 if (ni->ni_lnd->lnd_setasync != NULL) {
2750 rc2 = (ni->ni_lnd->lnd_setasync)(ni, id, nasync);
2757 LIBCFS_FREE(nids, maxnids * sizeof(*nids));