1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/lnet/lib-move.c
38 * Data movement routines
41 #define DEBUG_SUBSYSTEM S_LNET
43 #include <lnet/lib-lnet.h>
45 static int local_nid_dist_zero = 1;
46 CFS_MODULE_PARM(local_nid_dist_zero, "i", int, 0444,
50 static void lnet_commit_md (lnet_libmd_t *md, lnet_msg_t *msg);
52 #define LNET_MATCHMD_NONE 0 /* Didn't match */
53 #define LNET_MATCHMD_OK 1 /* Matched OK */
54 #define LNET_MATCHMD_DROP 2 /* Must be discarded */
57 lnet_try_match_md (int index, int op_mask, lnet_process_id_t src,
58 unsigned int rlength, unsigned int roffset,
59 __u64 match_bits, lnet_libmd_t *md, lnet_msg_t *msg,
60 unsigned int *mlength_out, unsigned int *offset_out)
62 /* ALWAYS called holding the LNET_LOCK, and can't LNET_UNLOCK;
63 * lnet_match_blocked_msg() relies on this to avoid races */
66 lnet_me_t *me = md->md_me;
68 /* mismatched MD op */
69 if ((md->md_options & op_mask) == 0)
70 return LNET_MATCHMD_NONE;
73 if (lnet_md_exhausted(md))
74 return LNET_MATCHMD_NONE;
76 /* mismatched ME nid/pid? */
77 if (me->me_match_id.nid != LNET_NID_ANY &&
78 me->me_match_id.nid != src.nid)
79 return LNET_MATCHMD_NONE;
81 if (me->me_match_id.pid != LNET_PID_ANY &&
82 me->me_match_id.pid != src.pid)
83 return LNET_MATCHMD_NONE;
85 /* mismatched ME matchbits? */
86 if (((me->me_match_bits ^ match_bits) & ~me->me_ignore_bits) != 0)
87 return LNET_MATCHMD_NONE;
89 /* Hurrah! This _is_ a match; check it out... */
91 if ((md->md_options & LNET_MD_MANAGE_REMOTE) == 0)
92 offset = md->md_offset;
96 if ((md->md_options & LNET_MD_MAX_SIZE) != 0) {
97 mlength = md->md_max_size;
98 LASSERT (md->md_offset + mlength <= md->md_length);
100 mlength = md->md_length - offset;
103 if (rlength <= mlength) { /* fits in allowed space */
105 } else if ((md->md_options & LNET_MD_TRUNCATE) == 0) {
106 /* this packet _really_ is too big */
107 CERROR("Matching packet from %s, match "LPU64
108 " length %d too big: %d left, %d allowed\n",
109 libcfs_id2str(src), match_bits, rlength,
110 md->md_length - offset, mlength);
112 return LNET_MATCHMD_DROP;
115 /* Commit to this ME/MD */
116 CDEBUG(D_NET, "Incoming %s index %x from %s of "
117 "length %d/%d into md "LPX64" [%d] + %d\n",
118 (op_mask == LNET_MD_OP_PUT) ? "put" : "get",
119 index, libcfs_id2str(src), mlength, rlength,
120 md->md_lh.lh_cookie, md->md_niov, offset);
122 lnet_commit_md(md, msg);
123 md->md_offset = offset + mlength;
125 /* NB Caller will set ev.type and ev.hdr_data */
126 msg->msg_ev.initiator = src;
127 msg->msg_ev.pt_index = index;
128 msg->msg_ev.match_bits = match_bits;
129 msg->msg_ev.rlength = rlength;
130 msg->msg_ev.mlength = mlength;
131 msg->msg_ev.offset = offset;
133 lnet_md_deconstruct(md, &msg->msg_ev.md);
134 lnet_md2handle(&msg->msg_ev.md_handle, md);
136 *offset_out = offset;
137 *mlength_out = mlength;
139 /* Auto-unlink NOW, so the ME gets unlinked if required.
140 * We bumped md->md_refcount above so the MD just gets flagged
141 * for unlink when it is finalized. */
142 if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 &&
143 lnet_md_exhausted(md)) {
147 return LNET_MATCHMD_OK;
151 lnet_match_md(int index, int op_mask, lnet_process_id_t src,
152 unsigned int rlength, unsigned int roffset,
153 __u64 match_bits, lnet_msg_t *msg,
154 unsigned int *mlength_out, unsigned int *offset_out,
155 lnet_libmd_t **md_out)
157 lnet_portal_t *ptl = &the_lnet.ln_portals[index];
158 struct list_head *head;
164 CDEBUG (D_NET, "Request from %s of length %d into portal %d "
165 "MB="LPX64"\n", libcfs_id2str(src), rlength, index, match_bits);
167 if (index < 0 || index >= the_lnet.ln_nportals) {
168 CERROR("Invalid portal %d not in [0-%d]\n",
169 index, the_lnet.ln_nportals);
170 return LNET_MATCHMD_DROP;
173 head = lnet_portal_me_head(index, src, match_bits);
174 if (head == NULL) /* nobody posted anything on this portal */
177 list_for_each_entry_safe (me, tmp, head, me_list) {
180 /* ME attached but MD not attached yet */
184 LASSERT (me == md->md_me);
186 rc = lnet_try_match_md(index, op_mask, src, rlength,
187 roffset, match_bits, md, msg,
188 mlength_out, offset_out);
193 case LNET_MATCHMD_NONE:
196 case LNET_MATCHMD_OK:
198 return LNET_MATCHMD_OK;
200 case LNET_MATCHMD_DROP:
201 return LNET_MATCHMD_DROP;
207 if (op_mask == LNET_MD_OP_GET ||
208 !lnet_portal_is_lazy(ptl))
209 return LNET_MATCHMD_DROP;
211 return LNET_MATCHMD_NONE;
215 lnet_fail_nid (lnet_nid_t nid, unsigned int threshold)
217 lnet_test_peer_t *tp;
218 struct list_head *el;
219 struct list_head *next;
220 struct list_head cull;
222 LASSERT (the_lnet.ln_init);
224 if (threshold != 0) {
225 /* Adding a new entry */
226 LIBCFS_ALLOC(tp, sizeof(*tp));
231 tp->tp_threshold = threshold;
234 list_add_tail (&tp->tp_list, &the_lnet.ln_test_peers);
239 /* removing entries */
240 CFS_INIT_LIST_HEAD (&cull);
244 list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
245 tp = list_entry (el, lnet_test_peer_t, tp_list);
247 if (tp->tp_threshold == 0 || /* needs culling anyway */
248 nid == LNET_NID_ANY || /* removing all entries */
249 tp->tp_nid == nid) /* matched this one */
251 list_del (&tp->tp_list);
252 list_add (&tp->tp_list, &cull);
258 while (!list_empty (&cull)) {
259 tp = list_entry (cull.next, lnet_test_peer_t, tp_list);
261 list_del (&tp->tp_list);
262 LIBCFS_FREE(tp, sizeof (*tp));
268 fail_peer (lnet_nid_t nid, int outgoing)
270 lnet_test_peer_t *tp;
271 struct list_head *el;
272 struct list_head *next;
273 struct list_head cull;
276 CFS_INIT_LIST_HEAD (&cull);
280 list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
281 tp = list_entry (el, lnet_test_peer_t, tp_list);
283 if (tp->tp_threshold == 0) {
286 /* only cull zombies on outgoing tests,
287 * since we may be at interrupt priority on
288 * incoming messages. */
289 list_del (&tp->tp_list);
290 list_add (&tp->tp_list, &cull);
295 if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
296 nid == tp->tp_nid) { /* fail this peer */
299 if (tp->tp_threshold != LNET_MD_THRESH_INF) {
302 tp->tp_threshold == 0) {
304 list_del (&tp->tp_list);
305 list_add (&tp->tp_list, &cull);
314 while (!list_empty (&cull)) {
315 tp = list_entry (cull.next, lnet_test_peer_t, tp_list);
316 list_del (&tp->tp_list);
318 LIBCFS_FREE(tp, sizeof (*tp));
325 lnet_iov_nob (unsigned int niov, struct iovec *iov)
327 unsigned int nob = 0;
330 nob += (iov++)->iov_len;
336 lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov, unsigned int doffset,
337 unsigned int nsiov, struct iovec *siov, unsigned int soffset,
340 /* NB diov, siov are READ-ONLY */
341 unsigned int this_nob;
346 /* skip complete frags before 'doffset' */
348 while (doffset >= diov->iov_len) {
349 doffset -= diov->iov_len;
355 /* skip complete frags before 'soffset' */
357 while (soffset >= siov->iov_len) {
358 soffset -= siov->iov_len;
367 this_nob = MIN(diov->iov_len - doffset,
368 siov->iov_len - soffset);
369 this_nob = MIN(this_nob, nob);
371 memcpy ((char *)diov->iov_base + doffset,
372 (char *)siov->iov_base + soffset, this_nob);
375 if (diov->iov_len > doffset + this_nob) {
383 if (siov->iov_len > soffset + this_nob) {
394 lnet_extract_iov (int dst_niov, struct iovec *dst,
395 int src_niov, struct iovec *src,
396 unsigned int offset, unsigned int len)
398 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
399 * for exactly 'len' bytes, and return the number of entries.
400 * NB not destructive to 'src' */
401 unsigned int frag_len;
404 if (len == 0) /* no data => */
405 return (0); /* no frags */
407 LASSERT (src_niov > 0);
408 while (offset >= src->iov_len) { /* skip initial frags */
409 offset -= src->iov_len;
412 LASSERT (src_niov > 0);
417 LASSERT (src_niov > 0);
418 LASSERT (niov <= dst_niov);
420 frag_len = src->iov_len - offset;
421 dst->iov_base = ((char *)src->iov_base) + offset;
423 if (len <= frag_len) {
428 dst->iov_len = frag_len;
441 lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
448 lnet_copy_kiov2kiov (unsigned int ndkiov, lnet_kiov_t *dkiov, unsigned int doffset,
449 unsigned int nskiov, lnet_kiov_t *skiov, unsigned int soffset,
456 lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset,
457 unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
464 lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
465 unsigned int niov, struct iovec *iov, unsigned int iovoffset,
472 lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
473 int src_niov, lnet_kiov_t *src,
474 unsigned int offset, unsigned int len)
479 #else /* __KERNEL__ */
482 lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
484 unsigned int nob = 0;
487 nob += (kiov++)->kiov_len;
493 lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
494 unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
497 /* NB diov, siov are READ-ONLY */
498 unsigned int this_nob;
505 LASSERT (!in_interrupt ());
508 while (doffset >= diov->kiov_len) {
509 doffset -= diov->kiov_len;
516 while (soffset >= siov->kiov_len) {
517 soffset -= siov->kiov_len;
526 this_nob = MIN(diov->kiov_len - doffset,
527 siov->kiov_len - soffset);
528 this_nob = MIN(this_nob, nob);
531 daddr = ((char *)cfs_kmap(diov->kiov_page)) +
532 diov->kiov_offset + doffset;
534 saddr = ((char *)cfs_kmap(siov->kiov_page)) +
535 siov->kiov_offset + soffset;
537 /* Vanishing risk of kmap deadlock when mapping 2 pages.
538 * However in practice at least one of the kiovs will be mapped
539 * kernel pages and the map/unmap will be NOOPs */
541 memcpy (daddr, saddr, this_nob);
544 if (diov->kiov_len > doffset + this_nob) {
548 cfs_kunmap(diov->kiov_page);
555 if (siov->kiov_len > soffset + this_nob) {
559 cfs_kunmap(siov->kiov_page);
568 cfs_kunmap(diov->kiov_page);
570 cfs_kunmap(siov->kiov_page);
574 lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset,
575 unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
578 /* NB iov, kiov are READ-ONLY */
579 unsigned int this_nob;
585 LASSERT (!in_interrupt ());
588 while (iovoffset >= iov->iov_len) {
589 iovoffset -= iov->iov_len;
596 while (kiovoffset >= kiov->kiov_len) {
597 kiovoffset -= kiov->kiov_len;
606 this_nob = MIN(iov->iov_len - iovoffset,
607 kiov->kiov_len - kiovoffset);
608 this_nob = MIN(this_nob, nob);
611 addr = ((char *)cfs_kmap(kiov->kiov_page)) +
612 kiov->kiov_offset + kiovoffset;
614 memcpy ((char *)iov->iov_base + iovoffset, addr, this_nob);
617 if (iov->iov_len > iovoffset + this_nob) {
618 iovoffset += this_nob;
625 if (kiov->kiov_len > kiovoffset + this_nob) {
627 kiovoffset += this_nob;
629 cfs_kunmap(kiov->kiov_page);
639 cfs_kunmap(kiov->kiov_page);
643 lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
644 unsigned int niov, struct iovec *iov, unsigned int iovoffset,
647 /* NB kiov, iov are READ-ONLY */
648 unsigned int this_nob;
654 LASSERT (!in_interrupt ());
657 while (kiovoffset >= kiov->kiov_len) {
658 kiovoffset -= kiov->kiov_len;
665 while (iovoffset >= iov->iov_len) {
666 iovoffset -= iov->iov_len;
675 this_nob = MIN(kiov->kiov_len - kiovoffset,
676 iov->iov_len - iovoffset);
677 this_nob = MIN(this_nob, nob);
680 addr = ((char *)cfs_kmap(kiov->kiov_page)) +
681 kiov->kiov_offset + kiovoffset;
683 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
686 if (kiov->kiov_len > kiovoffset + this_nob) {
688 kiovoffset += this_nob;
690 cfs_kunmap(kiov->kiov_page);
697 if (iov->iov_len > iovoffset + this_nob) {
698 iovoffset += this_nob;
707 cfs_kunmap(kiov->kiov_page);
711 lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
712 int src_niov, lnet_kiov_t *src,
713 unsigned int offset, unsigned int len)
715 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
716 * for exactly 'len' bytes, and return the number of entries.
717 * NB not destructive to 'src' */
718 unsigned int frag_len;
721 if (len == 0) /* no data => */
722 return (0); /* no frags */
724 LASSERT (src_niov > 0);
725 while (offset >= src->kiov_len) { /* skip initial frags */
726 offset -= src->kiov_len;
729 LASSERT (src_niov > 0);
734 LASSERT (src_niov > 0);
735 LASSERT (niov <= dst_niov);
737 frag_len = src->kiov_len - offset;
738 dst->kiov_page = src->kiov_page;
739 dst->kiov_offset = src->kiov_offset + offset;
741 if (len <= frag_len) {
743 LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
747 dst->kiov_len = frag_len;
748 LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
761 lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
762 unsigned int offset, unsigned int mlen, unsigned int rlen)
764 unsigned int niov = 0;
765 struct iovec *iov = NULL;
766 lnet_kiov_t *kiov = NULL;
769 LASSERT (!in_interrupt ());
770 LASSERT (mlen == 0 || msg != NULL);
773 LASSERT(msg->msg_receiving);
774 LASSERT(!msg->msg_sending);
775 LASSERT(rlen == msg->msg_len);
776 LASSERT(mlen <= msg->msg_len);
778 msg->msg_wanted = mlen;
779 msg->msg_offset = offset;
780 msg->msg_receiving = 0;
783 niov = msg->msg_niov;
785 kiov = msg->msg_kiov;
788 LASSERT ((iov == NULL) != (kiov == NULL));
792 rc = (ni->ni_lnd->lnd_recv)(ni, private, msg, delayed,
793 niov, iov, kiov, offset, mlen, rlen);
795 lnet_finalize(ni, msg, rc);
799 lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
801 lnet_peer_t *p1 = r1->lr_gateway;
802 lnet_peer_t *p2 = r2->lr_gateway;
804 if (r1->lr_hops < r2->lr_hops)
807 if (r1->lr_hops > r2->lr_hops)
810 if (p1->lp_txqnob < p2->lp_txqnob)
813 if (p1->lp_txqnob > p2->lp_txqnob)
816 if (p1->lp_txcredits > p2->lp_txcredits)
819 if (p1->lp_txcredits < p2->lp_txcredits)
827 lnet_setpayloadbuffer(lnet_msg_t *msg)
829 lnet_libmd_t *md = msg->msg_md;
831 LASSERT (msg->msg_len > 0);
832 LASSERT (!msg->msg_routing);
833 LASSERT (md != NULL);
834 LASSERT (msg->msg_niov == 0);
835 LASSERT (msg->msg_iov == NULL);
836 LASSERT (msg->msg_kiov == NULL);
838 msg->msg_niov = md->md_niov;
839 if ((md->md_options & LNET_MD_KIOV) != 0)
840 msg->msg_kiov = md->md_iov.kiov;
842 msg->msg_iov = md->md_iov.iov;
846 lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
847 unsigned int offset, unsigned int len)
849 msg->msg_type = type;
850 msg->msg_target = target;
852 msg->msg_offset = offset;
855 lnet_setpayloadbuffer(msg);
857 memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
858 msg->msg_hdr.type = cpu_to_le32(type);
859 msg->msg_hdr.dest_nid = cpu_to_le64(target.nid);
860 msg->msg_hdr.dest_pid = cpu_to_le32(target.pid);
861 /* src_nid will be set later */
862 msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid);
863 msg->msg_hdr.payload_length = cpu_to_le32(len);
867 lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
869 void *priv = msg->msg_private;
872 LASSERT (!in_interrupt ());
873 LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
874 (msg->msg_txcredit && msg->msg_peertxcredit));
876 rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
878 lnet_finalize(ni, msg, rc);
882 lnet_eager_recv_locked(lnet_msg_t *msg)
888 LASSERT (!msg->msg_delayed);
889 msg->msg_delayed = 1;
891 LASSERT (msg->msg_receiving);
892 LASSERT (!msg->msg_sending);
894 peer = msg->msg_rxpeer;
897 if (ni->ni_lnd->lnd_eager_recv != NULL) {
900 rc = (ni->ni_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
903 CERROR("recv from %s / send to %s aborted: "
904 "eager_recv failed %d\n",
905 libcfs_nid2str(peer->lp_nid),
906 libcfs_id2str(msg->msg_target), rc);
907 LASSERT (rc < 0); /* required by my callers */
916 /* NB: caller shall hold a ref on 'lp' as I'd drop LNET_LOCK */
918 lnet_ni_peer_alive(lnet_peer_t *lp)
920 cfs_time_t last_alive = 0;
921 lnet_ni_t *ni = lp->lp_ni;
923 LASSERT (lnet_peer_aliveness_enabled(lp));
924 LASSERT (ni->ni_lnd->lnd_query != NULL);
927 (ni->ni_lnd->lnd_query)(ni, lp->lp_nid, &last_alive);
930 lp->lp_last_query = cfs_time_current();
932 if (last_alive != 0) /* NI has updated timestamp */
933 lp->lp_last_alive = last_alive;
937 /* NB: always called with LNET_LOCK held */
939 lnet_peer_is_alive (lnet_peer_t *lp, cfs_time_t now)
944 LASSERT (lnet_peer_aliveness_enabled(lp));
946 /* Trust lnet_notify() if it has more recent aliveness news, but
947 * ignore the initial assumed death (see lnet_peers_start_down()).
949 if (!lp->lp_alive && lp->lp_alive_count > 0 &&
950 cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive))
953 deadline = cfs_time_add(lp->lp_last_alive,
954 cfs_time_seconds(lp->lp_ni->ni_peertimeout));
955 alive = cfs_time_after(deadline, now);
957 /* Update obsolete lp_alive except for routers assumed to be dead
958 * initially, because router checker would update aliveness in this
959 * case, and moreover lp_last_alive at peer creation is assumed.
961 if (alive && !lp->lp_alive &&
962 !(lnet_isrouter(lp) && lp->lp_alive_count == 0))
963 lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
969 /* NB: returns 1 when alive, 0 when dead, negative when error;
970 * may drop the LNET_LOCK */
972 lnet_peer_alive_locked (lnet_peer_t *lp)
974 cfs_time_t now = cfs_time_current();
976 if (!lnet_peer_aliveness_enabled(lp))
979 if (lnet_peer_is_alive(lp, now))
982 /* Peer appears dead, but we should avoid frequent NI queries (at
983 * most once per lnet_queryinterval seconds). */
984 if (lp->lp_last_query != 0) {
985 static const int lnet_queryinterval = 1;
987 cfs_time_t next_query =
988 cfs_time_add(lp->lp_last_query,
989 cfs_time_seconds(lnet_queryinterval));
991 if (cfs_time_before(now, next_query)) {
993 CWARN("Unexpected aliveness of peer %s: "
995 libcfs_nid2str(lp->lp_nid),
996 (int)now, (int)next_query,
998 lp->lp_ni->ni_peertimeout);
1003 /* query NI for latest aliveness news */
1004 lnet_ni_peer_alive(lp);
1006 if (lnet_peer_is_alive(lp, now))
1009 lnet_notify_locked(lp, 0, 0, lp->lp_last_alive);
1014 lnet_post_send_locked (lnet_msg_t *msg, int do_send)
1016 /* lnet_send is going to LNET_UNLOCK immediately after this, so it sets
1017 * do_send FALSE and I don't do the unlock/send/lock bit. I return
1018 * EAGAIN if msg blocked, EHOSTUNREACH if msg_txpeer appears dead, and
1019 * 0 if sent or OK to send */
1020 lnet_peer_t *lp = msg->msg_txpeer;
1021 lnet_ni_t *ni = lp->lp_ni;
1023 /* non-lnet_send() callers have checked before */
1024 LASSERT (!do_send || msg->msg_delayed);
1025 LASSERT (!msg->msg_receiving);
1027 /* NB 'lp' is always the next hop */
1028 if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
1029 lnet_peer_alive_locked(lp) == 0) {
1032 CNETERR("Dropping message for %s: peer not alive\n",
1033 libcfs_id2str(msg->msg_target));
1035 lnet_finalize(ni, msg, -EHOSTUNREACH);
1038 return EHOSTUNREACH;
1041 if (!msg->msg_peertxcredit) {
1042 LASSERT ((lp->lp_txcredits < 0) == !list_empty(&lp->lp_txq));
1044 msg->msg_peertxcredit = 1;
1045 lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
1048 if (lp->lp_txcredits < lp->lp_mintxcredits)
1049 lp->lp_mintxcredits = lp->lp_txcredits;
1051 if (lp->lp_txcredits < 0) {
1052 msg->msg_delayed = 1;
1053 list_add_tail (&msg->msg_list, &lp->lp_txq);
1058 if (!msg->msg_txcredit) {
1059 LASSERT ((ni->ni_txcredits < 0) == !list_empty(&ni->ni_txq));
1061 msg->msg_txcredit = 1;
1064 if (ni->ni_txcredits < ni->ni_mintxcredits)
1065 ni->ni_mintxcredits = ni->ni_txcredits;
1067 if (ni->ni_txcredits < 0) {
1068 msg->msg_delayed = 1;
1069 list_add_tail (&msg->msg_list, &ni->ni_txq);
1076 lnet_ni_send(ni, msg);
1084 lnet_commit_routedmsg (lnet_msg_t *msg)
1086 /* ALWAYS called holding the LNET_LOCK */
1087 LASSERT (msg->msg_routing);
1089 the_lnet.ln_counters.msgs_alloc++;
1090 if (the_lnet.ln_counters.msgs_alloc >
1091 the_lnet.ln_counters.msgs_max)
1092 the_lnet.ln_counters.msgs_max =
1093 the_lnet.ln_counters.msgs_alloc;
1095 the_lnet.ln_counters.route_count++;
1096 the_lnet.ln_counters.route_length += msg->msg_len;
1098 LASSERT (!msg->msg_onactivelist);
1099 msg->msg_onactivelist = 1;
1100 list_add (&msg->msg_activelist, &the_lnet.ln_active_msgs);
1104 lnet_msg2bufpool(lnet_msg_t *msg)
1106 lnet_rtrbufpool_t *rbp = &the_lnet.ln_rtrpools[0];
1108 LASSERT (msg->msg_len <= LNET_MTU);
1109 while (msg->msg_len > rbp->rbp_npages * CFS_PAGE_SIZE) {
1111 LASSERT (rbp < &the_lnet.ln_rtrpools[LNET_NRBPOOLS]);
1118 lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
1120 /* lnet_parse is going to LNET_UNLOCK immediately after this, so it
1121 * sets do_recv FALSE and I don't do the unlock/send/lock bit. I
1122 * return EAGAIN if msg blocked and 0 if received or OK to receive */
1123 lnet_peer_t *lp = msg->msg_rxpeer;
1124 lnet_rtrbufpool_t *rbp;
1127 LASSERT (msg->msg_iov == NULL);
1128 LASSERT (msg->msg_kiov == NULL);
1129 LASSERT (msg->msg_niov == 0);
1130 LASSERT (msg->msg_routing);
1131 LASSERT (msg->msg_receiving);
1132 LASSERT (!msg->msg_sending);
1134 /* non-lnet_parse callers only send delayed messages */
1135 LASSERT (!do_recv || msg->msg_delayed);
1137 if (!msg->msg_peerrtrcredit) {
1138 LASSERT ((lp->lp_rtrcredits < 0) == !list_empty(&lp->lp_rtrq));
1140 msg->msg_peerrtrcredit = 1;
1141 lp->lp_rtrcredits--;
1142 if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
1143 lp->lp_minrtrcredits = lp->lp_rtrcredits;
1145 if (lp->lp_rtrcredits < 0) {
1146 /* must have checked eager_recv before here */
1147 LASSERT (msg->msg_delayed);
1148 list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1153 rbp = lnet_msg2bufpool(msg);
1155 if (!msg->msg_rtrcredit) {
1156 LASSERT ((rbp->rbp_credits < 0) == !list_empty(&rbp->rbp_msgs));
1158 msg->msg_rtrcredit = 1;
1160 if (rbp->rbp_credits < rbp->rbp_mincredits)
1161 rbp->rbp_mincredits = rbp->rbp_credits;
1163 if (rbp->rbp_credits < 0) {
1164 /* must have checked eager_recv before here */
1165 LASSERT (msg->msg_delayed);
1166 list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1171 LASSERT (!list_empty(&rbp->rbp_bufs));
1172 rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
1173 list_del(&rb->rb_list);
1175 msg->msg_niov = rbp->rbp_npages;
1176 msg->msg_kiov = &rb->rb_kiov[0];
1180 lnet_ni_recv(lp->lp_ni, msg->msg_private, msg, 1,
1181 0, msg->msg_len, msg->msg_len);
1189 lnet_return_credits_locked (lnet_msg_t *msg)
1191 lnet_peer_t *txpeer = msg->msg_txpeer;
1192 lnet_peer_t *rxpeer = msg->msg_rxpeer;
1196 if (msg->msg_txcredit) {
1197 /* give back NI txcredits */
1198 msg->msg_txcredit = 0;
1201 LASSERT((ni->ni_txcredits < 0) == !list_empty(&ni->ni_txq));
1204 if (ni->ni_txcredits <= 0) {
1205 msg2 = list_entry(ni->ni_txq.next, lnet_msg_t, msg_list);
1206 list_del(&msg2->msg_list);
1208 LASSERT(msg2->msg_txpeer->lp_ni == ni);
1209 LASSERT(msg2->msg_delayed);
1211 (void) lnet_post_send_locked(msg2, 1);
1215 if (msg->msg_peertxcredit) {
1216 /* give back peer txcredits */
1217 msg->msg_peertxcredit = 0;
1219 LASSERT((txpeer->lp_txcredits < 0) == !list_empty(&txpeer->lp_txq));
1221 txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
1222 LASSERT (txpeer->lp_txqnob >= 0);
1224 txpeer->lp_txcredits++;
1225 if (txpeer->lp_txcredits <= 0) {
1226 msg2 = list_entry(txpeer->lp_txq.next,
1227 lnet_msg_t, msg_list);
1228 list_del(&msg2->msg_list);
1230 LASSERT (msg2->msg_txpeer == txpeer);
1231 LASSERT (msg2->msg_delayed);
1233 (void) lnet_post_send_locked(msg2, 1);
1237 if (txpeer != NULL) {
1238 msg->msg_txpeer = NULL;
1239 lnet_peer_decref_locked(txpeer);
1243 if (msg->msg_rtrcredit) {
1244 /* give back global router credits */
1246 lnet_rtrbufpool_t *rbp;
1248 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1249 * there until it gets one allocated, or aborts the wait
1251 LASSERT (msg->msg_kiov != NULL);
1253 rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
1255 LASSERT (rbp == lnet_msg2bufpool(msg));
1257 msg->msg_kiov = NULL;
1258 msg->msg_rtrcredit = 0;
1260 LASSERT((rbp->rbp_credits < 0) == !list_empty(&rbp->rbp_msgs));
1261 LASSERT((rbp->rbp_credits > 0) == !list_empty(&rbp->rbp_bufs));
1263 list_add(&rb->rb_list, &rbp->rbp_bufs);
1265 if (rbp->rbp_credits <= 0) {
1266 msg2 = list_entry(rbp->rbp_msgs.next,
1267 lnet_msg_t, msg_list);
1268 list_del(&msg2->msg_list);
1270 (void) lnet_post_routed_recv_locked(msg2, 1);
1274 if (msg->msg_peerrtrcredit) {
1275 /* give back peer router credits */
1276 msg->msg_peerrtrcredit = 0;
1278 LASSERT((rxpeer->lp_rtrcredits < 0) == !list_empty(&rxpeer->lp_rtrq));
1280 rxpeer->lp_rtrcredits++;
1281 if (rxpeer->lp_rtrcredits <= 0) {
1282 msg2 = list_entry(rxpeer->lp_rtrq.next,
1283 lnet_msg_t, msg_list);
1284 list_del(&msg2->msg_list);
1286 (void) lnet_post_routed_recv_locked(msg2, 1);
1290 LASSERT (!msg->msg_rtrcredit);
1291 LASSERT (!msg->msg_peerrtrcredit);
1293 if (rxpeer != NULL) {
1294 msg->msg_rxpeer = NULL;
1295 lnet_peer_decref_locked(rxpeer);
1300 lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg)
1302 lnet_nid_t dst_nid = msg->msg_target.nid;
1304 lnet_ni_t *local_ni;
1305 lnet_remotenet_t *rnet;
1306 lnet_route_t *route;
1307 lnet_route_t *best_route;
1308 struct list_head *tmp;
1313 LASSERT (msg->msg_txpeer == NULL);
1314 LASSERT (!msg->msg_sending);
1315 LASSERT (!msg->msg_target_is_router);
1316 LASSERT (!msg->msg_receiving);
1318 msg->msg_sending = 1;
1320 /* NB! ni != NULL == interface pre-determined (ACK/REPLY) */
1324 if (the_lnet.ln_shutdown) {
1329 if (src_nid == LNET_NID_ANY) {
1332 src_ni = lnet_nid2ni_locked(src_nid);
1333 if (src_ni == NULL) {
1335 LCONSOLE_WARN("Can't send to %s: src %s is not a "
1336 "local nid\n", libcfs_nid2str(dst_nid),
1337 libcfs_nid2str(src_nid));
1340 LASSERT (!msg->msg_routing);
1343 /* Is this for someone on a local network? */
1344 local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid));
1346 if (local_ni != NULL) {
1347 if (src_ni == NULL) {
1349 src_nid = src_ni->ni_nid;
1350 } else if (src_ni == local_ni) {
1351 lnet_ni_decref_locked(local_ni);
1353 lnet_ni_decref_locked(local_ni);
1354 lnet_ni_decref_locked(src_ni);
1356 LCONSOLE_WARN("No route to %s via from %s\n",
1357 libcfs_nid2str(dst_nid),
1358 libcfs_nid2str(src_nid));
1362 LASSERT (src_nid != LNET_NID_ANY);
1364 if (!msg->msg_routing) {
1365 src_nid = lnet_ptlcompat_srcnid(src_nid, dst_nid);
1366 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1369 if (src_ni == the_lnet.ln_loni) {
1370 /* No send credit hassles with LOLND */
1372 lnet_ni_send(src_ni, msg);
1373 lnet_ni_decref(src_ni);
1377 rc = lnet_nid2peer_locked(&lp, dst_nid);
1378 lnet_ni_decref_locked(src_ni); /* lp has ref on src_ni; lose mine */
1381 LCONSOLE_WARN("Error %d finding peer %s\n", rc,
1382 libcfs_nid2str(dst_nid));
1383 /* ENOMEM or shutting down */
1386 LASSERT (lp->lp_ni == src_ni);
1392 * - once application finishes computation, check here to update
1393 * router states before it waits for pending IO in LNetEQPoll
1394 * - recursion breaker: router checker sends no message
1395 * to remote networks */
1396 if (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING)
1397 lnet_router_checker();
1401 /* sending to a remote network */
1402 rnet = lnet_find_net_locked(LNET_NIDNET(dst_nid));
1405 lnet_ni_decref_locked(src_ni);
1407 LCONSOLE_WARN("No route to %s\n",
1408 libcfs_id2str(msg->msg_target));
1409 return -EHOSTUNREACH;
1412 /* Find the best gateway I can use */
1415 list_for_each(tmp, &rnet->lrn_routes) {
1416 route = list_entry(tmp, lnet_route_t, lr_list);
1417 lp2 = route->lr_gateway;
1419 if (lp2->lp_alive &&
1420 lnet_router_down_ni(lp2, rnet->lrn_net) <= 0 &&
1421 (src_ni == NULL || lp2->lp_ni == src_ni) &&
1423 lnet_compare_routes(route, best_route) > 0)) {
1431 lnet_ni_decref_locked(src_ni);
1434 LCONSOLE_WARN("No route to %s via %s "
1435 "(all routers down)\n",
1436 libcfs_id2str(msg->msg_target),
1437 libcfs_nid2str(src_nid));
1438 return -EHOSTUNREACH;
1441 /* Place selected route at the end of the route list to ensure
1442 * fairness; everything else being equal... */
1443 list_del(&best_route->lr_list);
1444 list_add_tail(&best_route->lr_list, &rnet->lrn_routes);
1446 if (src_ni == NULL) {
1448 src_nid = src_ni->ni_nid;
1450 LASSERT (src_ni == lp->lp_ni);
1451 lnet_ni_decref_locked(src_ni);
1454 lnet_peer_addref_locked(lp);
1456 LASSERT (src_nid != LNET_NID_ANY);
1458 if (!msg->msg_routing) {
1459 /* I'm the source and now I know which NI to send on */
1460 src_nid = lnet_ptlcompat_srcnid(src_nid, dst_nid);
1461 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1464 msg->msg_target_is_router = 1;
1465 msg->msg_target.nid = lp->lp_nid;
1466 msg->msg_target.pid = LUSTRE_SRV_LNET_PID;
1469 /* 'lp' is our best choice of peer */
1471 LASSERT (!msg->msg_peertxcredit);
1472 LASSERT (!msg->msg_txcredit);
1473 LASSERT (msg->msg_txpeer == NULL);
1475 msg->msg_txpeer = lp; /* msg takes my ref on lp */
1477 rc = lnet_post_send_locked(msg, 0);
1480 if (rc == EHOSTUNREACH)
1481 return -EHOSTUNREACH;
1484 lnet_ni_send(src_ni, msg);
1490 lnet_commit_md (lnet_libmd_t *md, lnet_msg_t *msg)
1492 /* ALWAYS called holding the LNET_LOCK */
1493 /* Here, we commit the MD to a network OP by marking it busy and
1494 * decrementing its threshold. Come what may, the network "owns"
1495 * the MD until a call to lnet_finalize() signals completion. */
1496 LASSERT (!msg->msg_routing);
1501 if (md->md_threshold != LNET_MD_THRESH_INF) {
1502 LASSERT (md->md_threshold > 0);
1506 the_lnet.ln_counters.msgs_alloc++;
1507 if (the_lnet.ln_counters.msgs_alloc >
1508 the_lnet.ln_counters.msgs_max)
1509 the_lnet.ln_counters.msgs_max =
1510 the_lnet.ln_counters.msgs_alloc;
1512 LASSERT (!msg->msg_onactivelist);
1513 msg->msg_onactivelist = 1;
1514 list_add (&msg->msg_activelist, &the_lnet.ln_active_msgs);
1518 lnet_drop_message (lnet_ni_t *ni, void *private, unsigned int nob)
1521 the_lnet.ln_counters.drop_count++;
1522 the_lnet.ln_counters.drop_length += nob;
1525 lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
1529 lnet_drop_delayed_put(lnet_msg_t *msg, char *reason)
1531 LASSERT (msg->msg_md == NULL);
1532 LASSERT (msg->msg_delayed);
1533 LASSERT (msg->msg_rxpeer != NULL);
1534 LASSERT (msg->msg_hdr.type == LNET_MSG_PUT);
1536 CWARN("Dropping delayed PUT from %s portal %d match "LPU64
1537 " offset %d length %d: %s\n",
1538 libcfs_id2str((lnet_process_id_t){
1539 .nid = msg->msg_hdr.src_nid,
1540 .pid = msg->msg_hdr.src_pid}),
1541 msg->msg_hdr.msg.put.ptl_index,
1542 msg->msg_hdr.msg.put.match_bits,
1543 msg->msg_hdr.msg.put.offset,
1544 msg->msg_hdr.payload_length,
1547 /* NB I can't drop msg's ref on msg_rxpeer until after I've
1548 * called lnet_drop_message(), so I just hang onto msg as well
1549 * until that's done */
1551 lnet_drop_message(msg->msg_rxpeer->lp_ni,
1552 msg->msg_private, msg->msg_len);
1556 lnet_peer_decref_locked(msg->msg_rxpeer);
1557 msg->msg_rxpeer = NULL;
1565 LNetSetLazyPortal(int portal)
1567 lnet_portal_t *ptl = &the_lnet.ln_portals[portal];
1569 if (portal < 0 || portal >= the_lnet.ln_nportals)
1572 CDEBUG(D_NET, "Setting portal %d lazy\n", portal);
1575 lnet_portal_setopt(ptl, LNET_PTL_LAZY);
1582 LNetClearLazyPortal(int portal)
1584 struct list_head zombies;
1585 lnet_portal_t *ptl = &the_lnet.ln_portals[portal];
1588 if (portal < 0 || portal >= the_lnet.ln_nportals)
1593 if (!lnet_portal_is_lazy(ptl)) {
1598 if (the_lnet.ln_shutdown)
1599 CWARN ("Active lazy portal %d on exit\n", portal);
1601 CDEBUG (D_NET, "clearing portal %d lazy\n", portal);
1603 /* grab all the blocked messages atomically */
1604 list_add(&zombies, &ptl->ptl_msgq);
1605 list_del_init(&ptl->ptl_msgq);
1607 ptl->ptl_msgq_version++;
1608 lnet_portal_unsetopt(ptl, LNET_PTL_LAZY);
1612 while (!list_empty(&zombies)) {
1613 msg = list_entry(zombies.next, lnet_msg_t, msg_list);
1614 list_del(&msg->msg_list);
1616 lnet_drop_delayed_put(msg, "Clearing lazy portal attr");
1623 lnet_recv_put(lnet_libmd_t *md, lnet_msg_t *msg, int delayed,
1624 unsigned int offset, unsigned int mlength)
1626 lnet_hdr_t *hdr = &msg->msg_hdr;
1630 the_lnet.ln_counters.recv_count++;
1631 the_lnet.ln_counters.recv_length += mlength;
1636 lnet_setpayloadbuffer(msg);
1638 msg->msg_ev.type = LNET_EVENT_PUT;
1639 msg->msg_ev.target.pid = hdr->dest_pid;
1640 msg->msg_ev.target.nid = hdr->dest_nid;
1641 msg->msg_ev.hdr_data = hdr->msg.put.hdr_data;
1643 /* Must I ACK? If so I'll grab the ack_wmd out of the header and put
1644 * it back into the ACK during lnet_finalize() */
1645 msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
1646 (md->md_options & LNET_MD_ACK_DISABLE) == 0);
1648 lnet_ni_recv(msg->msg_rxpeer->lp_ni,
1650 msg, delayed, offset, mlength,
1651 hdr->payload_length);
1654 /* called with LNET_LOCK held */
1656 lnet_match_blocked_msg(lnet_libmd_t *md)
1658 CFS_LIST_HEAD (drops);
1659 CFS_LIST_HEAD (matches);
1660 struct list_head *tmp;
1661 struct list_head *entry;
1664 lnet_me_t *me = md->md_me;
1666 LASSERT (me->me_portal < the_lnet.ln_nportals);
1668 ptl = &the_lnet.ln_portals[me->me_portal];
1669 if (!lnet_portal_is_lazy(ptl)) {
1670 LASSERT (list_empty(&ptl->ptl_msgq));
1674 LASSERT (md->md_refcount == 0); /* a brand new MD */
1676 list_for_each_safe (entry, tmp, &ptl->ptl_msgq) {
1679 unsigned int mlength;
1680 unsigned int offset;
1682 lnet_process_id_t src;
1684 msg = list_entry(entry, lnet_msg_t, msg_list);
1686 LASSERT (msg->msg_delayed);
1688 hdr = &msg->msg_hdr;
1689 index = hdr->msg.put.ptl_index;
1691 src.nid = hdr->src_nid;
1692 src.pid = hdr->src_pid;
1694 rc = lnet_try_match_md(index, LNET_MD_OP_PUT, src,
1695 hdr->payload_length,
1696 hdr->msg.put.offset,
1697 hdr->msg.put.match_bits,
1698 md, msg, &mlength, &offset);
1700 if (rc == LNET_MATCHMD_NONE)
1703 /* Hurrah! This _is_ a match */
1704 list_del(&msg->msg_list);
1705 ptl->ptl_msgq_version++;
1707 if (rc == LNET_MATCHMD_OK) {
1708 list_add_tail(&msg->msg_list, &matches);
1710 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
1711 "match "LPU64" offset %d length %d.\n",
1713 hdr->msg.put.ptl_index,
1714 hdr->msg.put.match_bits,
1715 hdr->msg.put.offset,
1716 hdr->payload_length);
1718 LASSERT (rc == LNET_MATCHMD_DROP);
1720 list_add_tail(&msg->msg_list, &drops);
1723 if (lnet_md_exhausted(md))
1729 list_for_each_safe (entry, tmp, &drops) {
1730 msg = list_entry(entry, lnet_msg_t, msg_list);
1732 list_del(&msg->msg_list);
1734 lnet_drop_delayed_put(msg, "Bad match");
1737 list_for_each_safe (entry, tmp, &matches) {
1738 msg = list_entry(entry, lnet_msg_t, msg_list);
1740 list_del(&msg->msg_list);
1742 /* md won't disappear under me, since each msg
1743 * holds a ref on it */
1744 lnet_recv_put(md, msg, 1,
1746 msg->msg_ev.mlength);
1753 lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
1758 lnet_hdr_t *hdr = &msg->msg_hdr;
1759 unsigned int rlength = hdr->payload_length;
1760 unsigned int mlength = 0;
1761 unsigned int offset = 0;
1762 lnet_process_id_t src = {/* .nid = */ hdr->src_nid,
1763 /* .pid = */ hdr->src_pid};
1767 /* Convert put fields to host byte order */
1768 hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
1769 hdr->msg.put.ptl_index = le32_to_cpu(hdr->msg.put.ptl_index);
1770 hdr->msg.put.offset = le32_to_cpu(hdr->msg.put.offset);
1772 index = hdr->msg.put.ptl_index;
1777 rc = lnet_match_md(index, LNET_MD_OP_PUT, src,
1778 rlength, hdr->msg.put.offset,
1779 hdr->msg.put.match_bits, msg,
1780 &mlength, &offset, &md);
1785 case LNET_MATCHMD_OK:
1787 lnet_recv_put(md, msg, msg->msg_delayed, offset, mlength);
1790 case LNET_MATCHMD_NONE:
1791 ptl = &the_lnet.ln_portals[index];
1792 version = ptl->ptl_ml_version;
1795 if (!msg->msg_delayed)
1796 rc = lnet_eager_recv_locked(msg);
1799 !the_lnet.ln_shutdown &&
1800 lnet_portal_is_lazy(ptl)) {
1801 if (version != ptl->ptl_ml_version)
1804 list_add_tail(&msg->msg_list, &ptl->ptl_msgq);
1805 ptl->ptl_msgq_version++;
1808 CDEBUG(D_NET, "Delaying PUT from %s portal %d match "
1809 LPU64" offset %d length %d: no match \n",
1810 libcfs_id2str(src), index,
1811 hdr->msg.put.match_bits,
1812 hdr->msg.put.offset, rlength);
1817 case LNET_MATCHMD_DROP:
1818 CNETERR("Dropping PUT from %s portal %d match "LPU64
1819 " offset %d length %d: %d\n",
1820 libcfs_id2str(src), index,
1821 hdr->msg.put.match_bits,
1822 hdr->msg.put.offset, rlength, rc);
1825 return ENOENT; /* +ve: OK but no match */
1830 lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
1832 lnet_hdr_t *hdr = &msg->msg_hdr;
1833 unsigned int mlength = 0;
1834 unsigned int offset = 0;
1835 lnet_process_id_t src = {/* .nid = */ hdr->src_nid,
1836 /* .pid = */ hdr->src_pid};
1837 lnet_handle_wire_t reply_wmd;
1841 /* Convert get fields to host byte order */
1842 hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits);
1843 hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index);
1844 hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
1845 hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset);
1849 rc = lnet_match_md(hdr->msg.get.ptl_index, LNET_MD_OP_GET, src,
1850 hdr->msg.get.sink_length, hdr->msg.get.src_offset,
1851 hdr->msg.get.match_bits, msg,
1852 &mlength, &offset, &md);
1853 if (rc == LNET_MATCHMD_DROP) {
1854 CNETERR("Dropping GET from %s portal %d match "LPU64
1855 " offset %d length %d\n",
1857 hdr->msg.get.ptl_index,
1858 hdr->msg.get.match_bits,
1859 hdr->msg.get.src_offset,
1860 hdr->msg.get.sink_length);
1862 return ENOENT; /* +ve: OK but no match */
1865 LASSERT (rc == LNET_MATCHMD_OK);
1867 the_lnet.ln_counters.send_count++;
1868 the_lnet.ln_counters.send_length += mlength;
1872 msg->msg_ev.type = LNET_EVENT_GET;
1873 msg->msg_ev.target.pid = hdr->dest_pid;
1874 msg->msg_ev.target.nid = hdr->dest_nid;
1875 msg->msg_ev.hdr_data = 0;
1877 reply_wmd = hdr->msg.get.return_wmd;
1879 lnet_prep_send(msg, LNET_MSG_REPLY, src, offset, mlength);
1881 msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
1884 /* The LND completes the REPLY from her recv procedure */
1885 lnet_ni_recv(ni, msg->msg_private, msg, 0,
1886 msg->msg_offset, msg->msg_len, msg->msg_len);
1890 lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
1891 msg->msg_receiving = 0;
1893 rc = lnet_send(ni->ni_nid, msg);
1895 /* didn't get as far as lnet_ni_send() */
1896 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
1897 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), rc);
1899 lnet_finalize(ni, msg, rc);
1906 lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
1908 void *private = msg->msg_private;
1909 lnet_hdr_t *hdr = &msg->msg_hdr;
1910 lnet_process_id_t src = {/* .nid = */ hdr->src_nid,
1911 /* .pid = */ hdr->src_pid};
1918 /* NB handles only looked up by creator (no flips) */
1919 md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
1920 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
1921 CNETERR("%s: Dropping REPLY from %s for %s "
1922 "MD "LPX64"."LPX64"\n",
1923 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1924 (md == NULL) ? "invalid" : "inactive",
1925 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1926 hdr->msg.reply.dst_wmd.wh_object_cookie);
1927 if (md != NULL && md->md_me != NULL)
1928 CERROR("REPLY MD also attached to portal %d\n",
1929 md->md_me->me_portal);
1932 return ENOENT; /* +ve: OK but no match */
1935 LASSERT (md->md_offset == 0);
1937 rlength = hdr->payload_length;
1938 mlength = MIN(rlength, md->md_length);
1940 if (mlength < rlength &&
1941 (md->md_options & LNET_MD_TRUNCATE) == 0) {
1942 CNETERR("%s: Dropping REPLY from %s length %d "
1943 "for MD "LPX64" would overflow (%d)\n",
1944 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1945 rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
1948 return ENOENT; /* +ve: OK but no match */
1951 CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md "LPX64"\n",
1952 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1953 mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
1955 lnet_commit_md(md, msg);
1958 lnet_setpayloadbuffer(msg);
1960 msg->msg_ev.type = LNET_EVENT_REPLY;
1961 msg->msg_ev.target.pid = hdr->dest_pid;
1962 msg->msg_ev.target.nid = hdr->dest_nid;
1963 msg->msg_ev.initiator = src;
1964 msg->msg_ev.rlength = rlength;
1965 msg->msg_ev.mlength = mlength;
1966 msg->msg_ev.offset = 0;
1968 lnet_md_deconstruct(md, &msg->msg_ev.md);
1969 lnet_md2handle(&msg->msg_ev.md_handle, md);
1971 the_lnet.ln_counters.recv_count++;
1972 the_lnet.ln_counters.recv_length += mlength;
1976 lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
1981 lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
1983 lnet_hdr_t *hdr = &msg->msg_hdr;
1984 lnet_process_id_t src = {/* .nid = */ hdr->src_nid,
1985 /* .pid = */ hdr->src_pid};
1988 /* Convert ack fields to host byte order */
1989 hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
1990 hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
1994 /* NB handles only looked up by creator (no flips) */
1995 md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
1996 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
1997 /* Don't moan; this is expected */
1999 "%s: Dropping ACK from %s to %s MD "LPX64"."LPX64"\n",
2000 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
2001 (md == NULL) ? "invalid" : "inactive",
2002 hdr->msg.ack.dst_wmd.wh_interface_cookie,
2003 hdr->msg.ack.dst_wmd.wh_object_cookie);
2004 if (md != NULL && md->md_me != NULL)
2005 CERROR("Source MD also attached to portal %d\n",
2006 md->md_me->me_portal);
2009 return ENOENT; /* +ve! */
2012 CDEBUG(D_NET, "%s: ACK from %s into md "LPX64"\n",
2013 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
2014 hdr->msg.ack.dst_wmd.wh_object_cookie);
2016 lnet_commit_md(md, msg);
2018 msg->msg_ev.type = LNET_EVENT_ACK;
2019 msg->msg_ev.target.pid = hdr->dest_pid;
2020 msg->msg_ev.target.nid = hdr->dest_nid;
2021 msg->msg_ev.initiator = src;
2022 msg->msg_ev.mlength = hdr->msg.ack.mlength;
2023 msg->msg_ev.match_bits = hdr->msg.ack.match_bits;
2025 lnet_md_deconstruct(md, &msg->msg_ev.md);
2026 lnet_md2handle(&msg->msg_ev.md_handle, md);
2028 the_lnet.ln_counters.recv_count++;
2032 lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
2037 lnet_msgtyp2str (int type)
2046 case LNET_MSG_REPLY:
2048 case LNET_MSG_HELLO:
2051 return ("<UNKNOWN>");
2056 lnet_print_hdr(lnet_hdr_t * hdr)
2058 lnet_process_id_t src = {/* .nid = */ hdr->src_nid,
2059 /* .pid = */ hdr->src_pid};
2060 lnet_process_id_t dst = {/* .nid = */ hdr->dest_nid,
2061 /* .pid = */ hdr->dest_pid};
2062 char *type_str = lnet_msgtyp2str (hdr->type);
2064 CWARN("P3 Header at %p of type %s\n", hdr, type_str);
2065 CWARN(" From %s\n", libcfs_id2str(src));
2066 CWARN(" To %s\n", libcfs_id2str(dst));
2068 switch (hdr->type) {
2073 CWARN(" Ptl index %d, ack md "LPX64"."LPX64", "
2074 "match bits "LPU64"\n",
2075 hdr->msg.put.ptl_index,
2076 hdr->msg.put.ack_wmd.wh_interface_cookie,
2077 hdr->msg.put.ack_wmd.wh_object_cookie,
2078 hdr->msg.put.match_bits);
2079 CWARN(" Length %d, offset %d, hdr data "LPX64"\n",
2080 hdr->payload_length, hdr->msg.put.offset,
2081 hdr->msg.put.hdr_data);
2085 CWARN(" Ptl index %d, return md "LPX64"."LPX64", "
2086 "match bits "LPU64"\n", hdr->msg.get.ptl_index,
2087 hdr->msg.get.return_wmd.wh_interface_cookie,
2088 hdr->msg.get.return_wmd.wh_object_cookie,
2089 hdr->msg.get.match_bits);
2090 CWARN(" Length %d, src offset %d\n",
2091 hdr->msg.get.sink_length,
2092 hdr->msg.get.src_offset);
2096 CWARN(" dst md "LPX64"."LPX64", "
2097 "manipulated length %d\n",
2098 hdr->msg.ack.dst_wmd.wh_interface_cookie,
2099 hdr->msg.ack.dst_wmd.wh_object_cookie,
2100 hdr->msg.ack.mlength);
2103 case LNET_MSG_REPLY:
2104 CWARN(" dst md "LPX64"."LPX64", "
2106 hdr->msg.reply.dst_wmd.wh_interface_cookie,
2107 hdr->msg.reply.dst_wmd.wh_object_cookie,
2108 hdr->payload_length);
2114 lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
2115 void *private, int rdma_req)
2120 lnet_pid_t dest_pid;
2121 lnet_nid_t dest_nid;
2123 __u32 payload_length;
2126 LASSERT (!in_interrupt ());
2128 type = le32_to_cpu(hdr->type);
2129 src_nid = le64_to_cpu(hdr->src_nid);
2130 dest_nid = le64_to_cpu(hdr->dest_nid);
2131 dest_pid = le32_to_cpu(hdr->dest_pid);
2132 payload_length = le32_to_cpu(hdr->payload_length);
2134 for_me = lnet_ptlcompat_matchnid(ni->ni_nid, dest_nid);
2139 if (payload_length > 0) {
2140 CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
2141 libcfs_nid2str(from_nid),
2142 libcfs_nid2str(src_nid),
2143 lnet_msgtyp2str(type), payload_length);
2149 case LNET_MSG_REPLY:
2150 if (payload_length > (for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
2151 CERROR("%s, src %s: bad %s payload %d "
2152 "(%d max expected)\n",
2153 libcfs_nid2str(from_nid),
2154 libcfs_nid2str(src_nid),
2155 lnet_msgtyp2str(type),
2157 for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
2163 CERROR("%s, src %s: Bad message type 0x%x\n",
2164 libcfs_nid2str(from_nid),
2165 libcfs_nid2str(src_nid), type);
2169 if (the_lnet.ln_routing) {
2170 cfs_time_t now = cfs_time_current();
2174 ni->ni_last_alive = now;
2175 if (ni->ni_status != NULL &&
2176 ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
2177 ni->ni_status->ns_status = LNET_NI_STATUS_UP;
2182 /* Regard a bad destination NID as a protocol error. Senders should
2183 * know what they're doing; if they don't they're misconfigured, buggy
2184 * or malicious so we chop them off at the knees :) */
2187 if (the_lnet.ln_ptlcompat > 0) {
2188 /* portals compatibility is single-network */
2189 CERROR ("%s, src %s: Bad dest nid %s "
2190 "(routing not supported)\n",
2191 libcfs_nid2str(from_nid),
2192 libcfs_nid2str(src_nid),
2193 libcfs_nid2str(dest_nid));
2197 if (the_lnet.ln_ptlcompat == 0 &&
2198 LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
2199 /* should have gone direct */
2200 CERROR ("%s, src %s: Bad dest nid %s "
2201 "(should have been sent direct)\n",
2202 libcfs_nid2str(from_nid),
2203 libcfs_nid2str(src_nid),
2204 libcfs_nid2str(dest_nid));
2208 if (the_lnet.ln_ptlcompat == 0 &&
2209 lnet_islocalnid(dest_nid)) {
2210 /* dest is another local NI; sender should have used
2211 * this node's NID on its own network */
2212 CERROR ("%s, src %s: Bad dest nid %s "
2213 "(it's my nid but on a different network)\n",
2214 libcfs_nid2str(from_nid),
2215 libcfs_nid2str(src_nid),
2216 libcfs_nid2str(dest_nid));
2220 if (rdma_req && type == LNET_MSG_GET) {
2221 CERROR ("%s, src %s: Bad optimized GET for %s "
2222 "(final destination must be me)\n",
2223 libcfs_nid2str(from_nid),
2224 libcfs_nid2str(src_nid),
2225 libcfs_nid2str(dest_nid));
2229 if (!the_lnet.ln_routing) {
2230 CERROR ("%s, src %s: Dropping message for %s "
2231 "(routing not enabled)\n",
2232 libcfs_nid2str(from_nid),
2233 libcfs_nid2str(src_nid),
2234 libcfs_nid2str(dest_nid));
2239 /* Message looks OK; we're not going to return an error, so we MUST
2240 * call back lnd_recv() come what may... */
2242 if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
2243 fail_peer (src_nid, 0)) /* shall we now? */
2245 CERROR("%s, src %s: Dropping %s to simulate failure\n",
2246 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2247 lnet_msgtyp2str(type));
2251 msg = lnet_msg_alloc();
2253 CERROR("%s, src %s: Dropping %s (out of memory)\n",
2254 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2255 lnet_msgtyp2str(type));
2259 /* msg zeroed in lnet_msg_alloc; i.e. flags all clear, pointers NULL etc */
2261 msg->msg_type = type;
2262 msg->msg_private = private;
2263 msg->msg_receiving = 1;
2264 msg->msg_len = msg->msg_wanted = payload_length;
2265 msg->msg_offset = 0;
2266 msg->msg_hdr = *hdr;
2269 rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid);
2272 CERROR("%s, src %s: Dropping %s "
2273 "(error %d looking up sender)\n",
2274 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2275 lnet_msgtyp2str(type), rc);
2284 msg->msg_target.pid = dest_pid;
2285 msg->msg_target.nid = dest_nid;
2286 msg->msg_routing = 1;
2287 msg->msg_offset = 0;
2290 if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
2291 lnet_msg2bufpool(msg)->rbp_credits <= 0) {
2292 rc = lnet_eager_recv_locked(msg);
2298 lnet_commit_routedmsg(msg);
2299 rc = lnet_post_routed_recv_locked(msg, 0);
2303 lnet_ni_recv(ni, msg->msg_private, msg, 0,
2304 0, payload_length, payload_length);
2308 /* convert common msg->hdr fields to host byteorder */
2309 msg->msg_hdr.type = type;
2310 msg->msg_hdr.src_nid = src_nid;
2311 msg->msg_hdr.src_pid = le32_to_cpu(msg->msg_hdr.src_pid);
2312 msg->msg_hdr.dest_nid = dest_nid;
2313 msg->msg_hdr.dest_pid = dest_pid;
2314 msg->msg_hdr.payload_length = payload_length;
2316 msg->msg_ev.sender = from_nid;
2320 rc = lnet_parse_ack(ni, msg);
2323 rc = lnet_parse_put(ni, msg);
2326 rc = lnet_parse_get(ni, msg, rdma_req);
2328 case LNET_MSG_REPLY:
2329 rc = lnet_parse_reply(ni, msg);
2333 goto free_drop; /* prevent an unused label if !kernel */
2339 LASSERT (rc == ENOENT);
2342 LASSERT (msg->msg_md == NULL);
2344 if (msg->msg_rxpeer != NULL) {
2345 lnet_peer_decref_locked(msg->msg_rxpeer);
2346 msg->msg_rxpeer = NULL;
2348 lnet_msg_free(msg); /* expects LNET_LOCK held */
2352 lnet_drop_message(ni, private, payload_length);
2357 LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
2358 lnet_process_id_t target, unsigned int portal,
2359 __u64 match_bits, unsigned int offset,
2366 LASSERT (the_lnet.ln_init);
2367 LASSERT (the_lnet.ln_refcount > 0);
2369 if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
2370 fail_peer (target.nid, 1)) /* shall we now? */
2372 CERROR("Dropping PUT to %s: simulated failure\n",
2373 libcfs_id2str(target));
2377 msg = lnet_msg_alloc();
2379 CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
2380 libcfs_id2str(target));
2386 md = lnet_handle2md(&mdh);
2387 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2390 CERROR("Dropping PUT ("LPU64":%d:%s): MD (%d) invalid\n",
2391 match_bits, portal, libcfs_id2str(target),
2392 md == NULL ? -1 : md->md_threshold);
2393 if (md != NULL && md->md_me != NULL)
2394 CERROR("Source MD also attached to portal %d\n",
2395 md->md_me->me_portal);
2401 CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
2403 lnet_commit_md(md, msg);
2405 lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
2407 msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
2408 msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
2409 msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
2410 msg->msg_hdr.msg.put.hdr_data = hdr_data;
2412 /* NB handles only looked up by creator (no flips) */
2413 if (ack == LNET_ACK_REQ) {
2414 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2415 the_lnet.ln_interface_cookie;
2416 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2417 md->md_lh.lh_cookie;
2419 msg->msg_hdr.msg.put.ack_wmd = LNET_WIRE_HANDLE_NONE;
2422 msg->msg_ev.type = LNET_EVENT_SEND;
2423 msg->msg_ev.initiator.nid = LNET_NID_ANY;
2424 msg->msg_ev.initiator.pid = the_lnet.ln_pid;
2425 msg->msg_ev.target = target;
2426 msg->msg_ev.sender = LNET_NID_ANY;
2427 msg->msg_ev.pt_index = portal;
2428 msg->msg_ev.match_bits = match_bits;
2429 msg->msg_ev.rlength = md->md_length;
2430 msg->msg_ev.mlength = md->md_length;
2431 msg->msg_ev.offset = offset;
2432 msg->msg_ev.hdr_data = hdr_data;
2434 lnet_md_deconstruct(md, &msg->msg_ev.md);
2435 lnet_md2handle(&msg->msg_ev.md_handle, md);
2437 the_lnet.ln_counters.send_count++;
2438 the_lnet.ln_counters.send_length += md->md_length;
2442 rc = lnet_send(self, msg);
2444 CNETERR("Error sending PUT to %s: %d\n",
2445 libcfs_id2str(target), rc);
2446 lnet_finalize (NULL, msg, rc);
2449 /* completion will be signalled by an event */
2454 lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *getmsg)
2456 /* The LND can DMA direct to the GET md (i.e. no REPLY msg). This
2457 * returns a msg for the LND to pass to lnet_finalize() when the sink
2458 * data has been received.
2460 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
2461 * lnet_finalize() is called on it, so the LND must call this first */
2463 lnet_msg_t *msg = lnet_msg_alloc();
2464 lnet_libmd_t *getmd = getmsg->msg_md;
2465 lnet_process_id_t peer_id = getmsg->msg_target;
2467 LASSERT (!getmsg->msg_target_is_router);
2468 LASSERT (!getmsg->msg_routing);
2472 LASSERT (getmd->md_refcount > 0);
2475 CERROR ("%s: Dropping REPLY from %s: can't allocate msg\n",
2476 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
2480 if (getmd->md_threshold == 0) {
2481 CERROR ("%s: Dropping REPLY from %s for inactive MD %p\n",
2482 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
2487 LASSERT (getmd->md_offset == 0);
2489 CDEBUG(D_NET, "%s: Reply from %s md %p\n",
2490 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
2492 lnet_commit_md (getmd, msg);
2494 msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
2496 msg->msg_ev.type = LNET_EVENT_REPLY;
2497 msg->msg_ev.initiator = peer_id;
2498 msg->msg_ev.sender = peer_id.nid; /* optimized GETs can't be routed */
2499 msg->msg_ev.rlength = msg->msg_ev.mlength = getmd->md_length;
2500 msg->msg_ev.offset = 0;
2502 lnet_md_deconstruct(getmd, &msg->msg_ev.md);
2503 lnet_md2handle(&msg->msg_ev.md_handle, getmd);
2505 the_lnet.ln_counters.recv_count++;
2506 the_lnet.ln_counters.recv_length += getmd->md_length;
2515 the_lnet.ln_counters.drop_count++;
2516 the_lnet.ln_counters.drop_length += getmd->md_length;
2524 lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
2526 /* Set the REPLY length, now the RDMA that elides the REPLY message has
2527 * completed and I know it. */
2528 LASSERT (reply != NULL);
2529 LASSERT (reply->msg_type == LNET_MSG_GET);
2530 LASSERT (reply->msg_ev.type == LNET_EVENT_REPLY);
2532 /* NB I trusted my peer to RDMA. If she tells me she's written beyond
2533 * the end of my buffer, I might as well be dead. */
2534 LASSERT (len <= reply->msg_ev.mlength);
2536 reply->msg_ev.mlength = len;
2540 LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
2541 lnet_process_id_t target, unsigned int portal,
2542 __u64 match_bits, unsigned int offset)
2548 LASSERT (the_lnet.ln_init);
2549 LASSERT (the_lnet.ln_refcount > 0);
2551 if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
2552 fail_peer (target.nid, 1)) /* shall we now? */
2554 CERROR("Dropping GET to %s: simulated failure\n",
2555 libcfs_id2str(target));
2559 msg = lnet_msg_alloc();
2561 CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
2562 libcfs_id2str(target));
2568 md = lnet_handle2md(&mdh);
2569 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2572 CERROR("Dropping GET ("LPU64":%d:%s): MD (%d) invalid\n",
2573 match_bits, portal, libcfs_id2str(target),
2574 md == NULL ? -1 : md->md_threshold);
2575 if (md != NULL && md->md_me != NULL)
2576 CERROR("REPLY MD also attached to portal %d\n",
2577 md->md_me->me_portal);
2583 CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
2585 lnet_commit_md(md, msg);
2587 lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
2589 msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
2590 msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
2591 msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
2592 msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
2594 /* NB handles only looked up by creator (no flips) */
2595 msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
2596 the_lnet.ln_interface_cookie;
2597 msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
2598 md->md_lh.lh_cookie;
2600 msg->msg_ev.type = LNET_EVENT_SEND;
2601 msg->msg_ev.initiator.nid = LNET_NID_ANY;
2602 msg->msg_ev.initiator.pid = the_lnet.ln_pid;
2603 msg->msg_ev.target = target;
2604 msg->msg_ev.sender = LNET_NID_ANY;
2605 msg->msg_ev.pt_index = portal;
2606 msg->msg_ev.match_bits = match_bits;
2607 msg->msg_ev.rlength = md->md_length;
2608 msg->msg_ev.mlength = md->md_length;
2609 msg->msg_ev.offset = offset;
2610 msg->msg_ev.hdr_data = 0;
2612 lnet_md_deconstruct(md, &msg->msg_ev.md);
2613 lnet_md2handle(&msg->msg_ev.md_handle, md);
2615 the_lnet.ln_counters.send_count++;
2619 rc = lnet_send(self, msg);
2621 CNETERR( "Error sending GET to %s: %d\n",
2622 libcfs_id2str(target), rc);
2623 lnet_finalize (NULL, msg, rc);
2626 /* completion will be signalled by an event */
2631 LNetDist (lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
2633 struct list_head *e;
2635 lnet_remotenet_t *rnet;
2636 __u32 dstnet = LNET_NIDNET(dstnid);
2640 /* if !local_nid_dist_zero, I don't return a distance of 0 ever
2641 * (when lustre sees a distance of 0, it substitutes 0@lo), so I
2642 * keep order 0 free for 0@lo and order 1 free for a local NID
2645 LASSERT (the_lnet.ln_init);
2646 LASSERT (the_lnet.ln_refcount > 0);
2650 list_for_each (e, &the_lnet.ln_nis) {
2651 ni = list_entry(e, lnet_ni_t, ni_list);
2653 if (ni->ni_nid == dstnid ||
2654 (the_lnet.ln_ptlcompat > 0 &&
2655 LNET_NIDNET(dstnid) == 0 &&
2656 LNET_NIDADDR(dstnid) == LNET_NIDADDR(ni->ni_nid) &&
2657 LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) != LOLND)) {
2658 if (srcnidp != NULL)
2660 if (orderp != NULL) {
2661 if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
2668 return local_nid_dist_zero ? 0 : 1;
2671 if (LNET_NIDNET(ni->ni_nid) == dstnet ||
2672 (the_lnet.ln_ptlcompat > 0 &&
2674 LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) != LOLND)) {
2675 if (srcnidp != NULL)
2676 *srcnidp = ni->ni_nid;
2686 list_for_each (e, &the_lnet.ln_remote_nets) {
2687 rnet = list_entry(e, lnet_remotenet_t, lrn_list);
2689 if (rnet->lrn_net == dstnet) {
2690 lnet_route_t *route;
2691 lnet_route_t *shortest = NULL;
2693 LASSERT (!list_empty(&rnet->lrn_routes));
2695 list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
2696 if (shortest == NULL ||
2697 route->lr_hops < shortest->lr_hops)
2701 LASSERT (shortest != NULL);
2702 hops = shortest->lr_hops;
2703 if (srcnidp != NULL)
2704 *srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
2714 return -EHOSTUNREACH;
2718 LNetSetAsync(lnet_process_id_t id, int nasync)
2724 lnet_remotenet_t *rnet;
2725 struct list_head *tmp;
2726 lnet_route_t *route;
2733 /* Target on a local network? */
2735 ni = lnet_net2ni(LNET_NIDNET(id.nid));
2737 if (ni->ni_lnd->lnd_setasync != NULL)
2738 rc = (ni->ni_lnd->lnd_setasync)(ni, id, nasync);
2743 /* Target on a remote network: apply to routers */
2745 LIBCFS_ALLOC(nids, maxnids * sizeof(*nids));
2750 /* Snapshot all the router NIDs */
2752 rnet = lnet_find_net_locked(LNET_NIDNET(id.nid));
2754 list_for_each(tmp, &rnet->lrn_routes) {
2755 if (nnids == maxnids) {
2757 LIBCFS_FREE(nids, maxnids * sizeof(*nids));
2762 route = list_entry(tmp, lnet_route_t, lr_list);
2763 nids[nnids++] = route->lr_gateway->lp_nid;
2768 /* set async on all the routers */
2769 while (nnids-- > 0) {
2770 id.pid = LUSTRE_SRV_LNET_PID;
2771 id.nid = nids[nnids];
2773 ni = lnet_net2ni(LNET_NIDNET(id.nid));
2777 if (ni->ni_lnd->lnd_setasync != NULL) {
2778 rc2 = (ni->ni_lnd->lnd_setasync)(ni, id, nasync);
2785 LIBCFS_FREE(nids, maxnids * sizeof(*nids));