4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * lnet/lnet/lib-move.c
36 * Data movement routines
39 #define DEBUG_SUBSYSTEM S_LNET
41 #include <lnet/lib-lnet.h>
43 static int local_nid_dist_zero = 1;
44 CFS_MODULE_PARM(local_nid_dist_zero, "i", int, 0444,
48 static void lnet_commit_md (lnet_libmd_t *md, lnet_msg_t *msg);
50 #define LNET_MATCHMD_NONE 0 /* Didn't match */
51 #define LNET_MATCHMD_OK 1 /* Matched OK */
52 #define LNET_MATCHMD_DROP 2 /* Must be discarded */
55 lnet_try_match_md (int index, int op_mask, lnet_process_id_t src,
56 unsigned int rlength, unsigned int roffset,
57 __u64 match_bits, lnet_libmd_t *md, lnet_msg_t *msg,
58 unsigned int *mlength_out, unsigned int *offset_out)
60 /* ALWAYS called holding the LNET_LOCK, and can't LNET_UNLOCK;
61 * lnet_match_blocked_msg() relies on this to avoid races */
64 lnet_me_t *me = md->md_me;
66 /* mismatched MD op */
67 if ((md->md_options & op_mask) == 0)
68 return LNET_MATCHMD_NONE;
71 if (lnet_md_exhausted(md))
72 return LNET_MATCHMD_NONE;
74 /* mismatched ME nid/pid? */
75 if (me->me_match_id.nid != LNET_NID_ANY &&
76 me->me_match_id.nid != src.nid)
77 return LNET_MATCHMD_NONE;
79 if (me->me_match_id.pid != LNET_PID_ANY &&
80 me->me_match_id.pid != src.pid)
81 return LNET_MATCHMD_NONE;
83 /* mismatched ME matchbits? */
84 if (((me->me_match_bits ^ match_bits) & ~me->me_ignore_bits) != 0)
85 return LNET_MATCHMD_NONE;
87 /* Hurrah! This _is_ a match; check it out... */
89 if ((md->md_options & LNET_MD_MANAGE_REMOTE) == 0)
90 offset = md->md_offset;
94 if ((md->md_options & LNET_MD_MAX_SIZE) != 0) {
95 mlength = md->md_max_size;
96 LASSERT (md->md_offset + mlength <= md->md_length);
98 mlength = md->md_length - offset;
101 if (rlength <= mlength) { /* fits in allowed space */
103 } else if ((md->md_options & LNET_MD_TRUNCATE) == 0) {
104 /* this packet _really_ is too big */
105 CERROR("Matching packet from %s, match "LPU64
106 " length %d too big: %d left, %d allowed\n",
107 libcfs_id2str(src), match_bits, rlength,
108 md->md_length - offset, mlength);
110 return LNET_MATCHMD_DROP;
113 /* Commit to this ME/MD */
114 CDEBUG(D_NET, "Incoming %s index %x from %s of "
115 "length %d/%d into md "LPX64" [%d] + %d\n",
116 (op_mask == LNET_MD_OP_PUT) ? "put" : "get",
117 index, libcfs_id2str(src), mlength, rlength,
118 md->md_lh.lh_cookie, md->md_niov, offset);
120 lnet_commit_md(md, msg);
121 md->md_offset = offset + mlength;
123 /* NB Caller will set ev.type and ev.hdr_data */
124 msg->msg_ev.initiator = src;
125 msg->msg_ev.pt_index = index;
126 msg->msg_ev.match_bits = match_bits;
127 msg->msg_ev.rlength = rlength;
128 msg->msg_ev.mlength = mlength;
129 msg->msg_ev.offset = offset;
131 lnet_md_deconstruct(md, &msg->msg_ev.md);
132 lnet_md2handle(&msg->msg_ev.md_handle, md);
134 *offset_out = offset;
135 *mlength_out = mlength;
137 /* Auto-unlink NOW, so the ME gets unlinked if required.
138 * We bumped md->md_refcount above so the MD just gets flagged
139 * for unlink when it is finalized. */
140 if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 &&
141 lnet_md_exhausted(md)) {
145 return LNET_MATCHMD_OK;
149 lnet_match_md(int index, int op_mask, lnet_process_id_t src,
150 unsigned int rlength, unsigned int roffset,
151 __u64 match_bits, lnet_msg_t *msg,
152 unsigned int *mlength_out, unsigned int *offset_out,
153 lnet_libmd_t **md_out)
155 lnet_portal_t *ptl = &the_lnet.ln_portals[index];
162 CDEBUG (D_NET, "Request from %s of length %d into portal %d "
163 "MB="LPX64"\n", libcfs_id2str(src), rlength, index, match_bits);
165 if (index < 0 || index >= the_lnet.ln_nportals) {
166 CERROR("Invalid portal %d not in [0-%d]\n",
167 index, the_lnet.ln_nportals);
168 return LNET_MATCHMD_DROP;
171 head = lnet_portal_me_head(index, src, match_bits);
172 if (head == NULL) /* nobody posted anything on this portal */
175 cfs_list_for_each_entry_safe_typed (me, tmp, head,
176 lnet_me_t, me_list) {
179 /* ME attached but MD not attached yet */
183 LASSERT (me == md->md_me);
185 rc = lnet_try_match_md(index, op_mask, src, rlength,
186 roffset, match_bits, md, msg,
187 mlength_out, offset_out);
192 case LNET_MATCHMD_NONE:
195 case LNET_MATCHMD_OK:
197 return LNET_MATCHMD_OK;
199 case LNET_MATCHMD_DROP:
200 return LNET_MATCHMD_DROP;
206 if (op_mask == LNET_MD_OP_GET ||
207 !lnet_portal_is_lazy(ptl))
208 return LNET_MATCHMD_DROP;
210 return LNET_MATCHMD_NONE;
214 lnet_fail_nid (lnet_nid_t nid, unsigned int threshold)
216 lnet_test_peer_t *tp;
221 LASSERT (the_lnet.ln_init);
223 if (threshold != 0) {
224 /* Adding a new entry */
225 LIBCFS_ALLOC(tp, sizeof(*tp));
230 tp->tp_threshold = threshold;
233 cfs_list_add_tail (&tp->tp_list, &the_lnet.ln_test_peers);
238 /* removing entries */
239 CFS_INIT_LIST_HEAD (&cull);
243 cfs_list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
244 tp = cfs_list_entry (el, lnet_test_peer_t, tp_list);
246 if (tp->tp_threshold == 0 || /* needs culling anyway */
247 nid == LNET_NID_ANY || /* removing all entries */
248 tp->tp_nid == nid) /* matched this one */
250 cfs_list_del (&tp->tp_list);
251 cfs_list_add (&tp->tp_list, &cull);
257 while (!cfs_list_empty (&cull)) {
258 tp = cfs_list_entry (cull.next, lnet_test_peer_t, tp_list);
260 cfs_list_del (&tp->tp_list);
261 LIBCFS_FREE(tp, sizeof (*tp));
267 fail_peer (lnet_nid_t nid, int outgoing)
269 lnet_test_peer_t *tp;
275 CFS_INIT_LIST_HEAD (&cull);
279 cfs_list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
280 tp = cfs_list_entry (el, lnet_test_peer_t, tp_list);
282 if (tp->tp_threshold == 0) {
285 /* only cull zombies on outgoing tests,
286 * since we may be at interrupt priority on
287 * incoming messages. */
288 cfs_list_del (&tp->tp_list);
289 cfs_list_add (&tp->tp_list, &cull);
294 if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
295 nid == tp->tp_nid) { /* fail this peer */
298 if (tp->tp_threshold != LNET_MD_THRESH_INF) {
301 tp->tp_threshold == 0) {
303 cfs_list_del (&tp->tp_list);
304 cfs_list_add (&tp->tp_list, &cull);
313 while (!cfs_list_empty (&cull)) {
314 tp = cfs_list_entry (cull.next, lnet_test_peer_t, tp_list);
315 cfs_list_del (&tp->tp_list);
317 LIBCFS_FREE(tp, sizeof (*tp));
324 lnet_iov_nob (unsigned int niov, struct iovec *iov)
326 unsigned int nob = 0;
329 nob += (iov++)->iov_len;
335 lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov, unsigned int doffset,
336 unsigned int nsiov, struct iovec *siov, unsigned int soffset,
339 /* NB diov, siov are READ-ONLY */
340 unsigned int this_nob;
345 /* skip complete frags before 'doffset' */
347 while (doffset >= diov->iov_len) {
348 doffset -= diov->iov_len;
354 /* skip complete frags before 'soffset' */
356 while (soffset >= siov->iov_len) {
357 soffset -= siov->iov_len;
366 this_nob = MIN(diov->iov_len - doffset,
367 siov->iov_len - soffset);
368 this_nob = MIN(this_nob, nob);
370 memcpy ((char *)diov->iov_base + doffset,
371 (char *)siov->iov_base + soffset, this_nob);
374 if (diov->iov_len > doffset + this_nob) {
382 if (siov->iov_len > soffset + this_nob) {
393 lnet_extract_iov (int dst_niov, struct iovec *dst,
394 int src_niov, struct iovec *src,
395 unsigned int offset, unsigned int len)
397 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
398 * for exactly 'len' bytes, and return the number of entries.
399 * NB not destructive to 'src' */
400 unsigned int frag_len;
403 if (len == 0) /* no data => */
404 return (0); /* no frags */
406 LASSERT (src_niov > 0);
407 while (offset >= src->iov_len) { /* skip initial frags */
408 offset -= src->iov_len;
411 LASSERT (src_niov > 0);
416 LASSERT (src_niov > 0);
417 LASSERT ((int)niov <= dst_niov);
419 frag_len = src->iov_len - offset;
420 dst->iov_base = ((char *)src->iov_base) + offset;
422 if (len <= frag_len) {
427 dst->iov_len = frag_len;
440 lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
447 lnet_copy_kiov2kiov (unsigned int ndkiov, lnet_kiov_t *dkiov, unsigned int doffset,
448 unsigned int nskiov, lnet_kiov_t *skiov, unsigned int soffset,
455 lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset,
456 unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
463 lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
464 unsigned int niov, struct iovec *iov, unsigned int iovoffset,
471 lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
472 int src_niov, lnet_kiov_t *src,
473 unsigned int offset, unsigned int len)
478 #else /* __KERNEL__ */
481 lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
483 unsigned int nob = 0;
486 nob += (kiov++)->kiov_len;
492 lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
493 unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
496 /* NB diov, siov are READ-ONLY */
497 unsigned int this_nob;
504 LASSERT (!cfs_in_interrupt ());
507 while (doffset >= diov->kiov_len) {
508 doffset -= diov->kiov_len;
515 while (soffset >= siov->kiov_len) {
516 soffset -= siov->kiov_len;
525 this_nob = MIN(diov->kiov_len - doffset,
526 siov->kiov_len - soffset);
527 this_nob = MIN(this_nob, nob);
530 daddr = ((char *)cfs_kmap(diov->kiov_page)) +
531 diov->kiov_offset + doffset;
533 saddr = ((char *)cfs_kmap(siov->kiov_page)) +
534 siov->kiov_offset + soffset;
536 /* Vanishing risk of kmap deadlock when mapping 2 pages.
537 * However in practice at least one of the kiovs will be mapped
538 * kernel pages and the map/unmap will be NOOPs */
540 memcpy (daddr, saddr, this_nob);
543 if (diov->kiov_len > doffset + this_nob) {
547 cfs_kunmap(diov->kiov_page);
554 if (siov->kiov_len > soffset + this_nob) {
558 cfs_kunmap(siov->kiov_page);
567 cfs_kunmap(diov->kiov_page);
569 cfs_kunmap(siov->kiov_page);
573 lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset,
574 unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
577 /* NB iov, kiov are READ-ONLY */
578 unsigned int this_nob;
584 LASSERT (!cfs_in_interrupt ());
587 while (iovoffset >= iov->iov_len) {
588 iovoffset -= iov->iov_len;
595 while (kiovoffset >= kiov->kiov_len) {
596 kiovoffset -= kiov->kiov_len;
605 this_nob = MIN(iov->iov_len - iovoffset,
606 kiov->kiov_len - kiovoffset);
607 this_nob = MIN(this_nob, nob);
610 addr = ((char *)cfs_kmap(kiov->kiov_page)) +
611 kiov->kiov_offset + kiovoffset;
613 memcpy ((char *)iov->iov_base + iovoffset, addr, this_nob);
616 if (iov->iov_len > iovoffset + this_nob) {
617 iovoffset += this_nob;
624 if (kiov->kiov_len > kiovoffset + this_nob) {
626 kiovoffset += this_nob;
628 cfs_kunmap(kiov->kiov_page);
638 cfs_kunmap(kiov->kiov_page);
642 lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
643 unsigned int niov, struct iovec *iov, unsigned int iovoffset,
646 /* NB kiov, iov are READ-ONLY */
647 unsigned int this_nob;
653 LASSERT (!cfs_in_interrupt ());
656 while (kiovoffset >= kiov->kiov_len) {
657 kiovoffset -= kiov->kiov_len;
664 while (iovoffset >= iov->iov_len) {
665 iovoffset -= iov->iov_len;
674 this_nob = MIN(kiov->kiov_len - kiovoffset,
675 iov->iov_len - iovoffset);
676 this_nob = MIN(this_nob, nob);
679 addr = ((char *)cfs_kmap(kiov->kiov_page)) +
680 kiov->kiov_offset + kiovoffset;
682 memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
685 if (kiov->kiov_len > kiovoffset + this_nob) {
687 kiovoffset += this_nob;
689 cfs_kunmap(kiov->kiov_page);
696 if (iov->iov_len > iovoffset + this_nob) {
697 iovoffset += this_nob;
706 cfs_kunmap(kiov->kiov_page);
710 lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
711 int src_niov, lnet_kiov_t *src,
712 unsigned int offset, unsigned int len)
714 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
715 * for exactly 'len' bytes, and return the number of entries.
716 * NB not destructive to 'src' */
717 unsigned int frag_len;
720 if (len == 0) /* no data => */
721 return (0); /* no frags */
723 LASSERT (src_niov > 0);
724 while (offset >= src->kiov_len) { /* skip initial frags */
725 offset -= src->kiov_len;
728 LASSERT (src_niov > 0);
733 LASSERT (src_niov > 0);
734 LASSERT ((int)niov <= dst_niov);
736 frag_len = src->kiov_len - offset;
737 dst->kiov_page = src->kiov_page;
738 dst->kiov_offset = src->kiov_offset + offset;
740 if (len <= frag_len) {
742 LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
746 dst->kiov_len = frag_len;
747 LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
760 lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
761 unsigned int offset, unsigned int mlen, unsigned int rlen)
763 unsigned int niov = 0;
764 struct iovec *iov = NULL;
765 lnet_kiov_t *kiov = NULL;
768 LASSERT (!cfs_in_interrupt ());
769 LASSERT (mlen == 0 || msg != NULL);
772 LASSERT(msg->msg_receiving);
773 LASSERT(!msg->msg_sending);
774 LASSERT(rlen == msg->msg_len);
775 LASSERT(mlen <= msg->msg_len);
777 msg->msg_wanted = mlen;
778 msg->msg_offset = offset;
779 msg->msg_receiving = 0;
782 niov = msg->msg_niov;
784 kiov = msg->msg_kiov;
787 LASSERT ((iov == NULL) != (kiov == NULL));
791 rc = (ni->ni_lnd->lnd_recv)(ni, private, msg, delayed,
792 niov, iov, kiov, offset, mlen, rlen);
794 lnet_finalize(ni, msg, rc);
798 lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
800 lnet_peer_t *p1 = r1->lr_gateway;
801 lnet_peer_t *p2 = r2->lr_gateway;
803 if (r1->lr_hops < r2->lr_hops)
806 if (r1->lr_hops > r2->lr_hops)
809 if (p1->lp_txqnob < p2->lp_txqnob)
812 if (p1->lp_txqnob > p2->lp_txqnob)
815 if (p1->lp_txcredits > p2->lp_txcredits)
818 if (p1->lp_txcredits < p2->lp_txcredits)
826 lnet_setpayloadbuffer(lnet_msg_t *msg)
828 lnet_libmd_t *md = msg->msg_md;
830 LASSERT (msg->msg_len > 0);
831 LASSERT (!msg->msg_routing);
832 LASSERT (md != NULL);
833 LASSERT (msg->msg_niov == 0);
834 LASSERT (msg->msg_iov == NULL);
835 LASSERT (msg->msg_kiov == NULL);
837 msg->msg_niov = md->md_niov;
838 if ((md->md_options & LNET_MD_KIOV) != 0)
839 msg->msg_kiov = md->md_iov.kiov;
841 msg->msg_iov = md->md_iov.iov;
845 lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
846 unsigned int offset, unsigned int len)
848 msg->msg_type = type;
849 msg->msg_target = target;
851 msg->msg_offset = offset;
854 lnet_setpayloadbuffer(msg);
856 memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
857 msg->msg_hdr.type = cpu_to_le32(type);
858 msg->msg_hdr.dest_nid = cpu_to_le64(target.nid);
859 msg->msg_hdr.dest_pid = cpu_to_le32(target.pid);
860 /* src_nid will be set later */
861 msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid);
862 msg->msg_hdr.payload_length = cpu_to_le32(len);
866 lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
868 void *priv = msg->msg_private;
871 LASSERT (!cfs_in_interrupt ());
872 LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
873 (msg->msg_txcredit && msg->msg_peertxcredit));
875 rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
877 lnet_finalize(ni, msg, rc);
881 lnet_eager_recv_locked(lnet_msg_t *msg)
887 LASSERT (!msg->msg_delayed);
888 msg->msg_delayed = 1;
890 LASSERT (msg->msg_receiving);
891 LASSERT (!msg->msg_sending);
893 peer = msg->msg_rxpeer;
896 if (ni->ni_lnd->lnd_eager_recv != NULL) {
899 rc = (ni->ni_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
902 CERROR("recv from %s / send to %s aborted: "
903 "eager_recv failed %d\n",
904 libcfs_nid2str(peer->lp_nid),
905 libcfs_id2str(msg->msg_target), rc);
906 LASSERT (rc < 0); /* required by my callers */
915 /* NB: caller shall hold a ref on 'lp' as I'd drop LNET_LOCK */
917 lnet_ni_peer_alive(lnet_peer_t *lp)
919 cfs_time_t last_alive = 0;
920 lnet_ni_t *ni = lp->lp_ni;
922 LASSERT (lnet_peer_aliveness_enabled(lp));
923 LASSERT (ni->ni_lnd->lnd_query != NULL);
924 LASSERT (the_lnet.ln_routing == 1);
927 (ni->ni_lnd->lnd_query)(ni, lp->lp_nid, &last_alive);
930 lp->lp_last_query = cfs_time_current();
932 if (last_alive != 0) /* NI has updated timestamp */
933 lp->lp_last_alive = last_alive;
936 /* NB: always called with LNET_LOCK held */
938 lnet_peer_is_alive (lnet_peer_t *lp, cfs_time_t now)
943 LASSERT (lnet_peer_aliveness_enabled(lp));
944 LASSERT (the_lnet.ln_routing == 1);
946 /* Trust lnet_notify() if it has more recent aliveness news, but
947 * ignore the initial assumed death (see lnet_peers_start_down()).
949 if (!lp->lp_alive && lp->lp_alive_count > 0 &&
950 cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive))
953 deadline = cfs_time_add(lp->lp_last_alive,
954 cfs_time_seconds(lp->lp_ni->ni_peertimeout));
955 alive = cfs_time_after(deadline, now);
957 /* Update obsolete lp_alive except for routers assumed to be dead
958 * initially, because router checker would update aliveness in this
959 * case, and moreover lp_last_alive at peer creation is assumed.
961 if (alive && !lp->lp_alive &&
962 !(lnet_isrouter(lp) && lp->lp_alive_count == 0))
963 lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
969 /* NB: returns 1 when alive, 0 when dead, negative when error;
970 * may drop the LNET_LOCK */
972 lnet_peer_alive_locked (lnet_peer_t *lp)
974 cfs_time_t now = cfs_time_current();
976 /* LU-630: only router checks peer health. */
977 if (the_lnet.ln_routing == 0)
980 if (!lnet_peer_aliveness_enabled(lp))
983 if (lnet_peer_is_alive(lp, now))
986 /* Peer appears dead, but we should avoid frequent NI queries (at
987 * most once per lnet_queryinterval seconds). */
988 if (lp->lp_last_query != 0) {
989 static const int lnet_queryinterval = 1;
991 cfs_time_t next_query =
992 cfs_time_add(lp->lp_last_query,
993 cfs_time_seconds(lnet_queryinterval));
995 if (cfs_time_before(now, next_query)) {
997 CWARN("Unexpected aliveness of peer %s: "
999 libcfs_nid2str(lp->lp_nid),
1000 (int)now, (int)next_query,
1002 lp->lp_ni->ni_peertimeout);
1007 /* query NI for latest aliveness news */
1008 lnet_ni_peer_alive(lp);
1010 if (lnet_peer_is_alive(lp, now))
1013 lnet_notify_locked(lp, 0, 0, lp->lp_last_alive);
1018 lnet_post_send_locked (lnet_msg_t *msg, int do_send)
1020 /* lnet_send is going to LNET_UNLOCK immediately after this, so it sets
1021 * do_send FALSE and I don't do the unlock/send/lock bit. I return
1022 * EAGAIN if msg blocked, EHOSTUNREACH if msg_txpeer appears dead, and
1023 * 0 if sent or OK to send */
1024 lnet_peer_t *lp = msg->msg_txpeer;
1025 lnet_ni_t *ni = lp->lp_ni;
1027 /* non-lnet_send() callers have checked before */
1028 LASSERT (!do_send || msg->msg_delayed);
1029 LASSERT (!msg->msg_receiving);
1031 /* NB 'lp' is always the next hop */
1032 if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
1033 lnet_peer_alive_locked(lp) == 0) {
1034 the_lnet.ln_counters.drop_count++;
1035 the_lnet.ln_counters.drop_length += msg->msg_len;
1038 CNETERR("Dropping message for %s: peer not alive\n",
1039 libcfs_id2str(msg->msg_target));
1041 lnet_finalize(ni, msg, -EHOSTUNREACH);
1044 return EHOSTUNREACH;
1047 if (!msg->msg_peertxcredit) {
1048 LASSERT ((lp->lp_txcredits < 0) ==
1049 !cfs_list_empty(&lp->lp_txq));
1051 msg->msg_peertxcredit = 1;
1052 lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
1055 if (lp->lp_txcredits < lp->lp_mintxcredits)
1056 lp->lp_mintxcredits = lp->lp_txcredits;
1058 if (lp->lp_txcredits < 0) {
1059 msg->msg_delayed = 1;
1060 cfs_list_add_tail(&msg->msg_list, &lp->lp_txq);
1065 if (!msg->msg_txcredit) {
1066 LASSERT ((ni->ni_txcredits < 0) ==
1067 !cfs_list_empty(&ni->ni_txq));
1069 msg->msg_txcredit = 1;
1072 if (ni->ni_txcredits < ni->ni_mintxcredits)
1073 ni->ni_mintxcredits = ni->ni_txcredits;
1075 if (ni->ni_txcredits < 0) {
1076 msg->msg_delayed = 1;
1077 cfs_list_add_tail(&msg->msg_list, &ni->ni_txq);
1084 lnet_ni_send(ni, msg);
1092 lnet_commit_routedmsg (lnet_msg_t *msg)
1094 /* ALWAYS called holding the LNET_LOCK */
1095 LASSERT (msg->msg_routing);
1097 the_lnet.ln_counters.msgs_alloc++;
1098 if (the_lnet.ln_counters.msgs_alloc >
1099 the_lnet.ln_counters.msgs_max)
1100 the_lnet.ln_counters.msgs_max =
1101 the_lnet.ln_counters.msgs_alloc;
1103 the_lnet.ln_counters.route_count++;
1104 the_lnet.ln_counters.route_length += msg->msg_len;
1106 LASSERT (!msg->msg_onactivelist);
1107 msg->msg_onactivelist = 1;
1108 cfs_list_add (&msg->msg_activelist, &the_lnet.ln_active_msgs);
1112 lnet_msg2bufpool(lnet_msg_t *msg)
1114 lnet_rtrbufpool_t *rbp = &the_lnet.ln_rtrpools[0];
1116 LASSERT (msg->msg_len <= LNET_MTU);
1117 while (msg->msg_len > (unsigned int)rbp->rbp_npages * CFS_PAGE_SIZE) {
1119 LASSERT (rbp < &the_lnet.ln_rtrpools[LNET_NRBPOOLS]);
1126 lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
1128 /* lnet_parse is going to LNET_UNLOCK immediately after this, so it
1129 * sets do_recv FALSE and I don't do the unlock/send/lock bit. I
1130 * return EAGAIN if msg blocked and 0 if received or OK to receive */
1131 lnet_peer_t *lp = msg->msg_rxpeer;
1132 lnet_rtrbufpool_t *rbp;
1135 LASSERT (msg->msg_iov == NULL);
1136 LASSERT (msg->msg_kiov == NULL);
1137 LASSERT (msg->msg_niov == 0);
1138 LASSERT (msg->msg_routing);
1139 LASSERT (msg->msg_receiving);
1140 LASSERT (!msg->msg_sending);
1142 /* non-lnet_parse callers only send delayed messages */
1143 LASSERT (!do_recv || msg->msg_delayed);
1145 if (!msg->msg_peerrtrcredit) {
1146 LASSERT ((lp->lp_rtrcredits < 0) ==
1147 !cfs_list_empty(&lp->lp_rtrq));
1149 msg->msg_peerrtrcredit = 1;
1150 lp->lp_rtrcredits--;
1151 if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
1152 lp->lp_minrtrcredits = lp->lp_rtrcredits;
1154 if (lp->lp_rtrcredits < 0) {
1155 /* must have checked eager_recv before here */
1156 LASSERT (msg->msg_delayed);
1157 cfs_list_add_tail(&msg->msg_list, &lp->lp_rtrq);
1162 rbp = lnet_msg2bufpool(msg);
1164 if (!msg->msg_rtrcredit) {
1165 LASSERT ((rbp->rbp_credits < 0) ==
1166 !cfs_list_empty(&rbp->rbp_msgs));
1168 msg->msg_rtrcredit = 1;
1170 if (rbp->rbp_credits < rbp->rbp_mincredits)
1171 rbp->rbp_mincredits = rbp->rbp_credits;
1173 if (rbp->rbp_credits < 0) {
1174 /* must have checked eager_recv before here */
1175 LASSERT (msg->msg_delayed);
1176 cfs_list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
1181 LASSERT (!cfs_list_empty(&rbp->rbp_bufs));
1182 rb = cfs_list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
1183 cfs_list_del(&rb->rb_list);
1185 msg->msg_niov = rbp->rbp_npages;
1186 msg->msg_kiov = &rb->rb_kiov[0];
1190 lnet_ni_recv(lp->lp_ni, msg->msg_private, msg, 1,
1191 0, msg->msg_len, msg->msg_len);
1199 lnet_return_credits_locked (lnet_msg_t *msg)
1201 lnet_peer_t *txpeer = msg->msg_txpeer;
1202 lnet_peer_t *rxpeer = msg->msg_rxpeer;
1206 if (msg->msg_txcredit) {
1207 /* give back NI txcredits */
1208 msg->msg_txcredit = 0;
1211 LASSERT((ni->ni_txcredits < 0) == !cfs_list_empty(&ni->ni_txq));
1214 if (ni->ni_txcredits <= 0) {
1215 msg2 = cfs_list_entry(ni->ni_txq.next, lnet_msg_t,
1217 cfs_list_del(&msg2->msg_list);
1219 LASSERT(msg2->msg_txpeer->lp_ni == ni);
1220 LASSERT(msg2->msg_delayed);
1222 (void) lnet_post_send_locked(msg2, 1);
1226 if (msg->msg_peertxcredit) {
1227 /* give back peer txcredits */
1228 msg->msg_peertxcredit = 0;
1230 LASSERT((txpeer->lp_txcredits < 0) ==
1231 !cfs_list_empty(&txpeer->lp_txq));
1233 txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
1234 LASSERT (txpeer->lp_txqnob >= 0);
1236 txpeer->lp_txcredits++;
1237 if (txpeer->lp_txcredits <= 0) {
1238 msg2 = cfs_list_entry(txpeer->lp_txq.next,
1239 lnet_msg_t, msg_list);
1240 cfs_list_del(&msg2->msg_list);
1242 LASSERT (msg2->msg_txpeer == txpeer);
1243 LASSERT (msg2->msg_delayed);
1245 (void) lnet_post_send_locked(msg2, 1);
1249 if (txpeer != NULL) {
1250 msg->msg_txpeer = NULL;
1251 lnet_peer_decref_locked(txpeer);
1255 if (msg->msg_rtrcredit) {
1256 /* give back global router credits */
1258 lnet_rtrbufpool_t *rbp;
1260 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1261 * there until it gets one allocated, or aborts the wait
1263 LASSERT (msg->msg_kiov != NULL);
1265 rb = cfs_list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
1267 LASSERT (rbp == lnet_msg2bufpool(msg));
1269 msg->msg_kiov = NULL;
1270 msg->msg_rtrcredit = 0;
1272 LASSERT((rbp->rbp_credits < 0) ==
1273 !cfs_list_empty(&rbp->rbp_msgs));
1274 LASSERT((rbp->rbp_credits > 0) ==
1275 !cfs_list_empty(&rbp->rbp_bufs));
1277 cfs_list_add(&rb->rb_list, &rbp->rbp_bufs);
1279 if (rbp->rbp_credits <= 0) {
1280 msg2 = cfs_list_entry(rbp->rbp_msgs.next,
1281 lnet_msg_t, msg_list);
1282 cfs_list_del(&msg2->msg_list);
1284 (void) lnet_post_routed_recv_locked(msg2, 1);
1288 if (msg->msg_peerrtrcredit) {
1289 /* give back peer router credits */
1290 msg->msg_peerrtrcredit = 0;
1292 LASSERT((rxpeer->lp_rtrcredits < 0) ==
1293 !cfs_list_empty(&rxpeer->lp_rtrq));
1295 rxpeer->lp_rtrcredits++;
1296 if (rxpeer->lp_rtrcredits <= 0) {
1297 msg2 = cfs_list_entry(rxpeer->lp_rtrq.next,
1298 lnet_msg_t, msg_list);
1299 cfs_list_del(&msg2->msg_list);
1301 (void) lnet_post_routed_recv_locked(msg2, 1);
1305 LASSERT (!msg->msg_rtrcredit);
1306 LASSERT (!msg->msg_peerrtrcredit);
1308 if (rxpeer != NULL) {
1309 msg->msg_rxpeer = NULL;
1310 lnet_peer_decref_locked(rxpeer);
1315 lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg)
1317 lnet_nid_t dst_nid = msg->msg_target.nid;
1319 lnet_ni_t *local_ni;
1320 lnet_remotenet_t *rnet;
1321 lnet_route_t *route;
1322 lnet_route_t *best_route;
1328 LASSERT (msg->msg_txpeer == NULL);
1329 LASSERT (!msg->msg_sending);
1330 LASSERT (!msg->msg_target_is_router);
1331 LASSERT (!msg->msg_receiving);
1333 msg->msg_sending = 1;
1335 /* NB! ni != NULL == interface pre-determined (ACK/REPLY) */
1339 if (the_lnet.ln_shutdown) {
1344 if (src_nid == LNET_NID_ANY) {
1347 src_ni = lnet_nid2ni_locked(src_nid);
1348 if (src_ni == NULL) {
1350 LCONSOLE_WARN("Can't send to %s: src %s is not a "
1351 "local nid\n", libcfs_nid2str(dst_nid),
1352 libcfs_nid2str(src_nid));
1355 LASSERT (!msg->msg_routing);
1358 /* Is this for someone on a local network? */
1359 local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid));
1361 if (local_ni != NULL) {
1362 if (src_ni == NULL) {
1364 src_nid = src_ni->ni_nid;
1365 } else if (src_ni == local_ni) {
1366 lnet_ni_decref_locked(local_ni);
1368 lnet_ni_decref_locked(local_ni);
1369 lnet_ni_decref_locked(src_ni);
1371 LCONSOLE_WARN("No route to %s via from %s\n",
1372 libcfs_nid2str(dst_nid),
1373 libcfs_nid2str(src_nid));
1377 LASSERT (src_nid != LNET_NID_ANY);
1379 if (!msg->msg_routing)
1380 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1382 if (src_ni == the_lnet.ln_loni) {
1383 /* No send credit hassles with LOLND */
1385 lnet_ni_send(src_ni, msg);
1386 lnet_ni_decref(src_ni);
1390 rc = lnet_nid2peer_locked(&lp, dst_nid);
1391 lnet_ni_decref_locked(src_ni); /* lp has ref on src_ni; lose mine */
1394 LCONSOLE_WARN("Error %d finding peer %s\n", rc,
1395 libcfs_nid2str(dst_nid));
1396 /* ENOMEM or shutting down */
1399 LASSERT (lp->lp_ni == src_ni);
1405 * - once application finishes computation, check here to update
1406 * router states before it waits for pending IO in LNetEQPoll
1407 * - recursion breaker: router checker sends no message
1408 * to remote networks */
1409 if (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING)
1410 lnet_router_checker();
1414 /* sending to a remote network */
1415 rnet = lnet_find_net_locked(LNET_NIDNET(dst_nid));
1418 lnet_ni_decref_locked(src_ni);
1420 LCONSOLE_WARN("No route to %s\n",
1421 libcfs_id2str(msg->msg_target));
1422 return -EHOSTUNREACH;
1425 /* Find the best gateway I can use */
1428 cfs_list_for_each(tmp, &rnet->lrn_routes) {
1429 route = cfs_list_entry(tmp, lnet_route_t, lr_list);
1430 lp2 = route->lr_gateway;
1432 if (lp2->lp_alive &&
1433 lnet_router_down_ni(lp2, rnet->lrn_net) <= 0 &&
1434 (src_ni == NULL || lp2->lp_ni == src_ni) &&
1436 lnet_compare_routes(route, best_route) > 0)) {
1444 lnet_ni_decref_locked(src_ni);
1447 LCONSOLE_WARN("No route to %s via %s "
1448 "(all routers down)\n",
1449 libcfs_id2str(msg->msg_target),
1450 libcfs_nid2str(src_nid));
1451 return -EHOSTUNREACH;
1454 /* Place selected route at the end of the route list to ensure
1455 * fairness; everything else being equal... */
1456 cfs_list_del(&best_route->lr_list);
1457 cfs_list_add_tail(&best_route->lr_list, &rnet->lrn_routes);
1458 CDEBUG(D_NET, "Best route to %s via %s for %s %d\n",
1459 libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid),
1460 lnet_msgtyp2str(msg->msg_type), msg->msg_len);
1462 if (src_ni == NULL) {
1464 src_nid = src_ni->ni_nid;
1466 LASSERT (src_ni == lp->lp_ni);
1467 lnet_ni_decref_locked(src_ni);
1470 lnet_peer_addref_locked(lp);
1472 LASSERT (src_nid != LNET_NID_ANY);
1474 if (!msg->msg_routing) {
1475 /* I'm the source and now I know which NI to send on */
1476 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1479 msg->msg_target_is_router = 1;
1480 msg->msg_target.nid = lp->lp_nid;
1481 msg->msg_target.pid = LUSTRE_SRV_LNET_PID;
1484 /* 'lp' is our best choice of peer */
1486 LASSERT (!msg->msg_peertxcredit);
1487 LASSERT (!msg->msg_txcredit);
1488 LASSERT (msg->msg_txpeer == NULL);
1490 msg->msg_txpeer = lp; /* msg takes my ref on lp */
1492 rc = lnet_post_send_locked(msg, 0);
1495 if (rc == EHOSTUNREACH)
1496 return -EHOSTUNREACH;
1499 lnet_ni_send(src_ni, msg);
1505 lnet_commit_md (lnet_libmd_t *md, lnet_msg_t *msg)
1507 /* ALWAYS called holding the LNET_LOCK */
1508 /* Here, we commit the MD to a network OP by marking it busy and
1509 * decrementing its threshold. Come what may, the network "owns"
1510 * the MD until a call to lnet_finalize() signals completion. */
1511 LASSERT (!msg->msg_routing);
1516 if (md->md_threshold != LNET_MD_THRESH_INF) {
1517 LASSERT (md->md_threshold > 0);
1521 the_lnet.ln_counters.msgs_alloc++;
1522 if (the_lnet.ln_counters.msgs_alloc >
1523 the_lnet.ln_counters.msgs_max)
1524 the_lnet.ln_counters.msgs_max =
1525 the_lnet.ln_counters.msgs_alloc;
1527 LASSERT (!msg->msg_onactivelist);
1528 msg->msg_onactivelist = 1;
1529 cfs_list_add (&msg->msg_activelist, &the_lnet.ln_active_msgs);
1533 lnet_drop_message (lnet_ni_t *ni, void *private, unsigned int nob)
1536 the_lnet.ln_counters.drop_count++;
1537 the_lnet.ln_counters.drop_length += nob;
1540 lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
1544 lnet_drop_delayed_put(lnet_msg_t *msg, char *reason)
1546 lnet_process_id_t id = {0};
1548 id.nid = msg->msg_hdr.src_nid;
1549 id.pid = msg->msg_hdr.src_pid;
1551 LASSERT (msg->msg_md == NULL);
1552 LASSERT (msg->msg_delayed);
1553 LASSERT (msg->msg_rxpeer != NULL);
1554 LASSERT (msg->msg_hdr.type == LNET_MSG_PUT);
1556 CWARN("Dropping delayed PUT from %s portal %d match "LPU64
1557 " offset %d length %d: %s\n",
1559 msg->msg_hdr.msg.put.ptl_index,
1560 msg->msg_hdr.msg.put.match_bits,
1561 msg->msg_hdr.msg.put.offset,
1562 msg->msg_hdr.payload_length,
1565 /* NB I can't drop msg's ref on msg_rxpeer until after I've
1566 * called lnet_drop_message(), so I just hang onto msg as well
1567 * until that's done */
1569 lnet_drop_message(msg->msg_rxpeer->lp_ni,
1570 msg->msg_private, msg->msg_len);
1574 lnet_peer_decref_locked(msg->msg_rxpeer);
1575 msg->msg_rxpeer = NULL;
1577 lnet_msg_free_locked(msg);
1583 * Turn on the lazy portal attribute. Use with caution!
1585 * This portal attribute only affects incoming PUT requests to the portal,
1586 * and is off by default. By default, if there's no matching MD for an
1587 * incoming PUT request, it is simply dropped. With the lazy attribute on,
1588 * such requests are queued indefinitely until either a matching MD is
1589 * posted to the portal or the lazy attribute is turned off.
1591 * It would prevent dropped requests, however it should be regarded as the
1592 * last line of defense - i.e. users must keep a close watch on active
1593 * buffers on a lazy portal and once it becomes too low post more buffers as
1594 * soon as possible. This is because delayed requests usually have detrimental
1595 * effects on underlying network connections. A few delayed requests often
1596 * suffice to bring an underlying connection to a complete halt, due to flow
1597 * control mechanisms.
1599 * There's also a DOS attack risk. If users don't post match-all MDs on a
1600 * lazy portal, a malicious peer can easily stop a service by sending some
1601 * PUT requests with match bits that won't match any MD. A routed server is
1602 * especially vulnerable since the connections to its neighbor routers are
1603 * shared among all clients.
1605 * \param portal Index of the portal to enable the lazy attribute on.
1607 * \retval 0 On success.
1608 * \retval -EINVAL If \a portal is not a valid index.
1611 LNetSetLazyPortal(int portal)
1613 lnet_portal_t *ptl = &the_lnet.ln_portals[portal];
1615 if (portal < 0 || portal >= the_lnet.ln_nportals)
1618 CDEBUG(D_NET, "Setting portal %d lazy\n", portal);
1621 lnet_portal_setopt(ptl, LNET_PTL_LAZY);
1628 * Turn off the lazy portal attribute. Delayed requests on the portal,
1629 * if any, will be all dropped when this function returns.
1631 * \param portal Index of the portal to disable the lazy attribute on.
1633 * \retval 0 On success.
1634 * \retval -EINVAL If \a portal is not a valid index.
1637 LNetClearLazyPortal(int portal)
1640 lnet_portal_t *ptl = &the_lnet.ln_portals[portal];
1643 if (portal < 0 || portal >= the_lnet.ln_nportals)
1648 if (!lnet_portal_is_lazy(ptl)) {
1653 if (the_lnet.ln_shutdown)
1654 CWARN ("Active lazy portal %d on exit\n", portal);
1656 CDEBUG (D_NET, "clearing portal %d lazy\n", portal);
1658 /* grab all the blocked messages atomically */
1659 cfs_list_add(&zombies, &ptl->ptl_msgq);
1660 cfs_list_del_init(&ptl->ptl_msgq);
1662 ptl->ptl_msgq_version++;
1663 lnet_portal_unsetopt(ptl, LNET_PTL_LAZY);
1667 while (!cfs_list_empty(&zombies)) {
1668 msg = cfs_list_entry(zombies.next, lnet_msg_t, msg_list);
1669 cfs_list_del(&msg->msg_list);
1671 lnet_drop_delayed_put(msg, "Clearing lazy portal attr");
1678 lnet_recv_put(lnet_libmd_t *md, lnet_msg_t *msg, int delayed,
1679 unsigned int offset, unsigned int mlength)
1681 lnet_hdr_t *hdr = &msg->msg_hdr;
1685 the_lnet.ln_counters.recv_count++;
1686 the_lnet.ln_counters.recv_length += mlength;
1691 lnet_setpayloadbuffer(msg);
1693 msg->msg_ev.type = LNET_EVENT_PUT;
1694 msg->msg_ev.target.pid = hdr->dest_pid;
1695 msg->msg_ev.target.nid = hdr->dest_nid;
1696 msg->msg_ev.hdr_data = hdr->msg.put.hdr_data;
1698 /* Must I ACK? If so I'll grab the ack_wmd out of the header and put
1699 * it back into the ACK during lnet_finalize() */
1700 msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
1701 (md->md_options & LNET_MD_ACK_DISABLE) == 0);
1703 lnet_ni_recv(msg->msg_rxpeer->lp_ni,
1705 msg, delayed, offset, mlength,
1706 hdr->payload_length);
1709 /* called with LNET_LOCK held */
1711 lnet_match_blocked_msg(lnet_libmd_t *md)
1713 CFS_LIST_HEAD (drops);
1714 CFS_LIST_HEAD (matches);
1719 lnet_me_t *me = md->md_me;
1721 LASSERT (me->me_portal < (unsigned int)the_lnet.ln_nportals);
1723 ptl = &the_lnet.ln_portals[me->me_portal];
1724 if (!lnet_portal_is_lazy(ptl)) {
1725 LASSERT (cfs_list_empty(&ptl->ptl_msgq));
1729 LASSERT (md->md_refcount == 0); /* a brand new MD */
1731 cfs_list_for_each_safe (entry, tmp, &ptl->ptl_msgq) {
1734 unsigned int mlength;
1735 unsigned int offset;
1737 lnet_process_id_t src;
1739 msg = cfs_list_entry(entry, lnet_msg_t, msg_list);
1741 LASSERT (msg->msg_delayed);
1743 hdr = &msg->msg_hdr;
1744 index = hdr->msg.put.ptl_index;
1746 src.nid = hdr->src_nid;
1747 src.pid = hdr->src_pid;
1749 rc = lnet_try_match_md(index, LNET_MD_OP_PUT, src,
1750 hdr->payload_length,
1751 hdr->msg.put.offset,
1752 hdr->msg.put.match_bits,
1753 md, msg, &mlength, &offset);
1755 if (rc == LNET_MATCHMD_NONE)
1758 /* Hurrah! This _is_ a match */
1759 cfs_list_del(&msg->msg_list);
1760 ptl->ptl_msgq_version++;
1762 if (rc == LNET_MATCHMD_OK) {
1763 cfs_list_add_tail(&msg->msg_list, &matches);
1765 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
1766 "match "LPU64" offset %d length %d.\n",
1768 hdr->msg.put.ptl_index,
1769 hdr->msg.put.match_bits,
1770 hdr->msg.put.offset,
1771 hdr->payload_length);
1773 LASSERT (rc == LNET_MATCHMD_DROP);
1775 cfs_list_add_tail(&msg->msg_list, &drops);
1778 if (lnet_md_exhausted(md))
1784 cfs_list_for_each_safe (entry, tmp, &drops) {
1785 msg = cfs_list_entry(entry, lnet_msg_t, msg_list);
1787 cfs_list_del(&msg->msg_list);
1789 lnet_drop_delayed_put(msg, "Bad match");
1792 cfs_list_for_each_safe (entry, tmp, &matches) {
1793 msg = cfs_list_entry(entry, lnet_msg_t, msg_list);
1795 cfs_list_del(&msg->msg_list);
1797 /* md won't disappear under me, since each msg
1798 * holds a ref on it */
1799 lnet_recv_put(md, msg, 1,
1801 msg->msg_ev.mlength);
1808 lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
1813 lnet_hdr_t *hdr = &msg->msg_hdr;
1814 unsigned int rlength = hdr->payload_length;
1815 unsigned int mlength = 0;
1816 unsigned int offset = 0;
1817 lnet_process_id_t src= {0};
1821 src.nid = hdr->src_nid;
1822 src.pid = hdr->src_pid;
1824 /* Convert put fields to host byte order */
1825 hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
1826 hdr->msg.put.ptl_index = le32_to_cpu(hdr->msg.put.ptl_index);
1827 hdr->msg.put.offset = le32_to_cpu(hdr->msg.put.offset);
1829 index = hdr->msg.put.ptl_index;
1834 rc = lnet_match_md(index, LNET_MD_OP_PUT, src,
1835 rlength, hdr->msg.put.offset,
1836 hdr->msg.put.match_bits, msg,
1837 &mlength, &offset, &md);
1842 case LNET_MATCHMD_OK:
1844 lnet_recv_put(md, msg, msg->msg_delayed, offset, mlength);
1847 case LNET_MATCHMD_NONE:
1848 ptl = &the_lnet.ln_portals[index];
1849 version = ptl->ptl_ml_version;
1852 if (!msg->msg_delayed)
1853 rc = lnet_eager_recv_locked(msg);
1856 !the_lnet.ln_shutdown &&
1857 lnet_portal_is_lazy(ptl)) {
1858 if (version != ptl->ptl_ml_version)
1861 cfs_list_add_tail(&msg->msg_list, &ptl->ptl_msgq);
1862 ptl->ptl_msgq_version++;
1865 CDEBUG(D_NET, "Delaying PUT from %s portal %d match "
1866 LPU64" offset %d length %d: no match \n",
1867 libcfs_id2str(src), index,
1868 hdr->msg.put.match_bits,
1869 hdr->msg.put.offset, rlength);
1874 case LNET_MATCHMD_DROP:
1875 CNETERR("Dropping PUT from %s portal %d match "LPU64
1876 " offset %d length %d: %d\n",
1877 libcfs_id2str(src), index,
1878 hdr->msg.put.match_bits,
1879 hdr->msg.put.offset, rlength, rc);
1882 return ENOENT; /* +ve: OK but no match */
1887 lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
1889 lnet_hdr_t *hdr = &msg->msg_hdr;
1890 unsigned int mlength = 0;
1891 unsigned int offset = 0;
1892 lnet_process_id_t src = {0};
1893 lnet_handle_wire_t reply_wmd;
1897 src.nid = hdr->src_nid;
1898 src.pid = hdr->src_pid;
1900 /* Convert get fields to host byte order */
1901 hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits);
1902 hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index);
1903 hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
1904 hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset);
1908 rc = lnet_match_md(hdr->msg.get.ptl_index, LNET_MD_OP_GET, src,
1909 hdr->msg.get.sink_length, hdr->msg.get.src_offset,
1910 hdr->msg.get.match_bits, msg,
1911 &mlength, &offset, &md);
1912 if (rc == LNET_MATCHMD_DROP) {
1913 CNETERR("Dropping GET from %s portal %d match "LPU64
1914 " offset %d length %d\n",
1916 hdr->msg.get.ptl_index,
1917 hdr->msg.get.match_bits,
1918 hdr->msg.get.src_offset,
1919 hdr->msg.get.sink_length);
1921 return ENOENT; /* +ve: OK but no match */
1924 LASSERT (rc == LNET_MATCHMD_OK);
1926 the_lnet.ln_counters.send_count++;
1927 the_lnet.ln_counters.send_length += mlength;
1931 msg->msg_ev.type = LNET_EVENT_GET;
1932 msg->msg_ev.target.pid = hdr->dest_pid;
1933 msg->msg_ev.target.nid = hdr->dest_nid;
1934 msg->msg_ev.hdr_data = 0;
1936 reply_wmd = hdr->msg.get.return_wmd;
1938 lnet_prep_send(msg, LNET_MSG_REPLY, src, offset, mlength);
1940 msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
1943 /* The LND completes the REPLY from her recv procedure */
1944 lnet_ni_recv(ni, msg->msg_private, msg, 0,
1945 msg->msg_offset, msg->msg_len, msg->msg_len);
1949 lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
1950 msg->msg_receiving = 0;
1952 rc = lnet_send(ni->ni_nid, msg);
1954 /* didn't get as far as lnet_ni_send() */
1955 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
1956 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), rc);
1958 lnet_finalize(ni, msg, rc);
1965 lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
1967 void *private = msg->msg_private;
1968 lnet_hdr_t *hdr = &msg->msg_hdr;
1969 lnet_process_id_t src = {0};
1976 src.nid = hdr->src_nid;
1977 src.pid = hdr->src_pid;
1979 /* NB handles only looked up by creator (no flips) */
1980 md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
1981 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
1982 CNETERR("%s: Dropping REPLY from %s for %s "
1983 "MD "LPX64"."LPX64"\n",
1984 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1985 (md == NULL) ? "invalid" : "inactive",
1986 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1987 hdr->msg.reply.dst_wmd.wh_object_cookie);
1988 if (md != NULL && md->md_me != NULL)
1989 CERROR("REPLY MD also attached to portal %d\n",
1990 md->md_me->me_portal);
1993 return ENOENT; /* +ve: OK but no match */
1996 LASSERT (md->md_offset == 0);
1998 rlength = hdr->payload_length;
1999 mlength = MIN(rlength, (int)md->md_length);
2001 if (mlength < rlength &&
2002 (md->md_options & LNET_MD_TRUNCATE) == 0) {
2003 CNETERR("%s: Dropping REPLY from %s length %d "
2004 "for MD "LPX64" would overflow (%d)\n",
2005 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
2006 rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
2009 return ENOENT; /* +ve: OK but no match */
2012 CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md "LPX64"\n",
2013 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
2014 mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
2016 lnet_commit_md(md, msg);
2019 lnet_setpayloadbuffer(msg);
2021 msg->msg_ev.type = LNET_EVENT_REPLY;
2022 msg->msg_ev.target.pid = hdr->dest_pid;
2023 msg->msg_ev.target.nid = hdr->dest_nid;
2024 msg->msg_ev.initiator = src;
2025 msg->msg_ev.rlength = rlength;
2026 msg->msg_ev.mlength = mlength;
2027 msg->msg_ev.offset = 0;
2029 lnet_md_deconstruct(md, &msg->msg_ev.md);
2030 lnet_md2handle(&msg->msg_ev.md_handle, md);
2032 the_lnet.ln_counters.recv_count++;
2033 the_lnet.ln_counters.recv_length += mlength;
2037 lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
2042 lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
2044 lnet_hdr_t *hdr = &msg->msg_hdr;
2045 lnet_process_id_t src = {0};
2048 src.nid = hdr->src_nid;
2049 src.pid = hdr->src_pid;
2051 /* Convert ack fields to host byte order */
2052 hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
2053 hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
2057 /* NB handles only looked up by creator (no flips) */
2058 md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
2059 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2060 /* Don't moan; this is expected */
2062 "%s: Dropping ACK from %s to %s MD "LPX64"."LPX64"\n",
2063 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
2064 (md == NULL) ? "invalid" : "inactive",
2065 hdr->msg.ack.dst_wmd.wh_interface_cookie,
2066 hdr->msg.ack.dst_wmd.wh_object_cookie);
2067 if (md != NULL && md->md_me != NULL)
2068 CERROR("Source MD also attached to portal %d\n",
2069 md->md_me->me_portal);
2072 return ENOENT; /* +ve! */
2075 CDEBUG(D_NET, "%s: ACK from %s into md "LPX64"\n",
2076 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
2077 hdr->msg.ack.dst_wmd.wh_object_cookie);
2079 lnet_commit_md(md, msg);
2081 msg->msg_ev.type = LNET_EVENT_ACK;
2082 msg->msg_ev.target.pid = hdr->dest_pid;
2083 msg->msg_ev.target.nid = hdr->dest_nid;
2084 msg->msg_ev.initiator = src;
2085 msg->msg_ev.mlength = hdr->msg.ack.mlength;
2086 msg->msg_ev.match_bits = hdr->msg.ack.match_bits;
2088 lnet_md_deconstruct(md, &msg->msg_ev.md);
2089 lnet_md2handle(&msg->msg_ev.md_handle, md);
2091 the_lnet.ln_counters.recv_count++;
2095 lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
2100 lnet_msgtyp2str (int type)
2109 case LNET_MSG_REPLY:
2111 case LNET_MSG_HELLO:
2114 return ("<UNKNOWN>");
2119 lnet_print_hdr(lnet_hdr_t * hdr)
2121 lnet_process_id_t src = {0};
2122 lnet_process_id_t dst = {0};
2123 char *type_str = lnet_msgtyp2str (hdr->type);
2125 src.nid = hdr->src_nid;
2126 src.pid = hdr->src_pid;
2128 dst.nid = hdr->dest_nid;
2129 dst.pid = hdr->dest_pid;
2131 CWARN("P3 Header at %p of type %s\n", hdr, type_str);
2132 CWARN(" From %s\n", libcfs_id2str(src));
2133 CWARN(" To %s\n", libcfs_id2str(dst));
2135 switch (hdr->type) {
2140 CWARN(" Ptl index %d, ack md "LPX64"."LPX64", "
2141 "match bits "LPU64"\n",
2142 hdr->msg.put.ptl_index,
2143 hdr->msg.put.ack_wmd.wh_interface_cookie,
2144 hdr->msg.put.ack_wmd.wh_object_cookie,
2145 hdr->msg.put.match_bits);
2146 CWARN(" Length %d, offset %d, hdr data "LPX64"\n",
2147 hdr->payload_length, hdr->msg.put.offset,
2148 hdr->msg.put.hdr_data);
2152 CWARN(" Ptl index %d, return md "LPX64"."LPX64", "
2153 "match bits "LPU64"\n", hdr->msg.get.ptl_index,
2154 hdr->msg.get.return_wmd.wh_interface_cookie,
2155 hdr->msg.get.return_wmd.wh_object_cookie,
2156 hdr->msg.get.match_bits);
2157 CWARN(" Length %d, src offset %d\n",
2158 hdr->msg.get.sink_length,
2159 hdr->msg.get.src_offset);
2163 CWARN(" dst md "LPX64"."LPX64", "
2164 "manipulated length %d\n",
2165 hdr->msg.ack.dst_wmd.wh_interface_cookie,
2166 hdr->msg.ack.dst_wmd.wh_object_cookie,
2167 hdr->msg.ack.mlength);
2170 case LNET_MSG_REPLY:
2171 CWARN(" dst md "LPX64"."LPX64", "
2173 hdr->msg.reply.dst_wmd.wh_interface_cookie,
2174 hdr->msg.reply.dst_wmd.wh_object_cookie,
2175 hdr->payload_length);
2181 lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
2182 void *private, int rdma_req)
2187 lnet_pid_t dest_pid;
2188 lnet_nid_t dest_nid;
2190 __u32 payload_length;
2193 LASSERT (!cfs_in_interrupt ());
2195 type = le32_to_cpu(hdr->type);
2196 src_nid = le64_to_cpu(hdr->src_nid);
2197 dest_nid = le64_to_cpu(hdr->dest_nid);
2198 dest_pid = le32_to_cpu(hdr->dest_pid);
2199 payload_length = le32_to_cpu(hdr->payload_length);
2201 for_me = (ni->ni_nid == dest_nid);
2206 if (payload_length > 0) {
2207 CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
2208 libcfs_nid2str(from_nid),
2209 libcfs_nid2str(src_nid),
2210 lnet_msgtyp2str(type), payload_length);
2216 case LNET_MSG_REPLY:
2217 if (payload_length > (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
2218 CERROR("%s, src %s: bad %s payload %d "
2219 "(%d max expected)\n",
2220 libcfs_nid2str(from_nid),
2221 libcfs_nid2str(src_nid),
2222 lnet_msgtyp2str(type),
2224 for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
2230 CERROR("%s, src %s: Bad message type 0x%x\n",
2231 libcfs_nid2str(from_nid),
2232 libcfs_nid2str(src_nid), type);
2236 if (the_lnet.ln_routing) {
2237 cfs_time_t now = cfs_time_current();
2241 ni->ni_last_alive = now;
2242 if (ni->ni_status != NULL &&
2243 ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
2244 ni->ni_status->ns_status = LNET_NI_STATUS_UP;
2249 /* Regard a bad destination NID as a protocol error. Senders should
2250 * know what they're doing; if they don't they're misconfigured, buggy
2251 * or malicious so we chop them off at the knees :) */
2254 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
2255 /* should have gone direct */
2256 CERROR ("%s, src %s: Bad dest nid %s "
2257 "(should have been sent direct)\n",
2258 libcfs_nid2str(from_nid),
2259 libcfs_nid2str(src_nid),
2260 libcfs_nid2str(dest_nid));
2264 if (lnet_islocalnid(dest_nid)) {
2265 /* dest is another local NI; sender should have used
2266 * this node's NID on its own network */
2267 CERROR ("%s, src %s: Bad dest nid %s "
2268 "(it's my nid but on a different network)\n",
2269 libcfs_nid2str(from_nid),
2270 libcfs_nid2str(src_nid),
2271 libcfs_nid2str(dest_nid));
2275 if (rdma_req && type == LNET_MSG_GET) {
2276 CERROR ("%s, src %s: Bad optimized GET for %s "
2277 "(final destination must be me)\n",
2278 libcfs_nid2str(from_nid),
2279 libcfs_nid2str(src_nid),
2280 libcfs_nid2str(dest_nid));
2284 if (!the_lnet.ln_routing) {
2285 CERROR ("%s, src %s: Dropping message for %s "
2286 "(routing not enabled)\n",
2287 libcfs_nid2str(from_nid),
2288 libcfs_nid2str(src_nid),
2289 libcfs_nid2str(dest_nid));
2294 /* Message looks OK; we're not going to return an error, so we MUST
2295 * call back lnd_recv() come what may... */
2297 if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
2298 fail_peer (src_nid, 0)) /* shall we now? */
2300 CERROR("%s, src %s: Dropping %s to simulate failure\n",
2301 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2302 lnet_msgtyp2str(type));
2306 msg = lnet_msg_alloc();
2308 CERROR("%s, src %s: Dropping %s (out of memory)\n",
2309 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2310 lnet_msgtyp2str(type));
2314 /* msg zeroed in lnet_msg_alloc; i.e. flags all clear, pointers NULL etc */
2316 msg->msg_type = type;
2317 msg->msg_private = private;
2318 msg->msg_receiving = 1;
2319 msg->msg_len = msg->msg_wanted = payload_length;
2320 msg->msg_offset = 0;
2321 msg->msg_hdr = *hdr;
2324 rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid);
2327 CERROR("%s, src %s: Dropping %s "
2328 "(error %d looking up sender)\n",
2329 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2330 lnet_msgtyp2str(type), rc);
2339 msg->msg_target.pid = dest_pid;
2340 msg->msg_target.nid = dest_nid;
2341 msg->msg_routing = 1;
2342 msg->msg_offset = 0;
2345 if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
2346 lnet_msg2bufpool(msg)->rbp_credits <= 0) {
2347 rc = lnet_eager_recv_locked(msg);
2353 lnet_commit_routedmsg(msg);
2354 rc = lnet_post_routed_recv_locked(msg, 0);
2358 lnet_ni_recv(ni, msg->msg_private, msg, 0,
2359 0, payload_length, payload_length);
2363 /* convert common msg->hdr fields to host byteorder */
2364 msg->msg_hdr.type = type;
2365 msg->msg_hdr.src_nid = src_nid;
2366 msg->msg_hdr.src_pid = le32_to_cpu(msg->msg_hdr.src_pid);
2367 msg->msg_hdr.dest_nid = dest_nid;
2368 msg->msg_hdr.dest_pid = dest_pid;
2369 msg->msg_hdr.payload_length = payload_length;
2371 msg->msg_ev.sender = from_nid;
2375 rc = lnet_parse_ack(ni, msg);
2378 rc = lnet_parse_put(ni, msg);
2381 rc = lnet_parse_get(ni, msg, rdma_req);
2383 case LNET_MSG_REPLY:
2384 rc = lnet_parse_reply(ni, msg);
2388 goto free_drop; /* prevent an unused label if !kernel */
2394 LASSERT (rc == ENOENT);
2397 LASSERT (msg->msg_md == NULL);
2399 if (msg->msg_rxpeer != NULL) {
2400 lnet_peer_decref_locked(msg->msg_rxpeer);
2401 msg->msg_rxpeer = NULL;
2403 lnet_msg_free_locked(msg); /* expects LNET_LOCK held */
2407 lnet_drop_message(ni, private, payload_length);
2412 * Initiate an asynchronous PUT operation.
2414 * There are several events associated with a PUT: completion of the send on
2415 * the initiator node (LNET_EVENT_SEND), and when the send completes
2416 * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
2417 * that the operation was accepted by the target. The event LNET_EVENT_PUT is
2418 * used at the target node to indicate the completion of incoming data
2421 * The local events will be logged in the EQ associated with the MD pointed to
2422 * by \a mdh handle. Using a MD without an associated EQ results in these
2423 * events being discarded. In this case, the caller must have another
2424 * mechanism (e.g., a higher level protocol) for determining when it is safe
2425 * to modify the memory region associated with the MD.
2427 * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
2428 * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
2430 * \param self Indicates the NID of a local interface through which to send
2431 * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
2432 * \param mdh A handle for the MD that describes the memory to be sent. The MD
2433 * must be "free floating" (See LNetMDBind()).
2434 * \param ack Controls whether an acknowledgment is requested.
2435 * Acknowledgments are only sent when they are requested by the initiating
2436 * process and the target MD enables them.
2437 * \param target A process identifier for the target process.
2438 * \param portal The index in the \a target's portal table.
2439 * \param match_bits The match bits to use for MD selection at the target
2441 * \param offset The offset into the target MD (only used when the target
2442 * MD has the LNET_MD_MANAGE_REMOTE option set).
2443 * \param hdr_data 64 bits of user data that can be included in the message
2444 * header. This data is written to an event queue entry at the target if an
2445 * EQ is present on the matching MD.
2447 * \retval 0 Success, and only in this case events will be generated
2448 * and logged to EQ (if it exists).
2449 * \retval -EIO Simulated failure.
2450 * \retval -ENOMEM Memory allocation failure.
2451 * \retval -ENOENT Invalid MD object.
2453 * \see lnet_event_t::hdr_data and lnet_event_kind_t.
2456 LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
2457 lnet_process_id_t target, unsigned int portal,
2458 __u64 match_bits, unsigned int offset,
2465 LASSERT (the_lnet.ln_init);
2466 LASSERT (the_lnet.ln_refcount > 0);
2468 if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
2469 fail_peer (target.nid, 1)) /* shall we now? */
2471 CERROR("Dropping PUT to %s: simulated failure\n",
2472 libcfs_id2str(target));
2476 msg = lnet_msg_alloc();
2478 CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
2479 libcfs_id2str(target));
2482 msg->msg_vmflush = !!cfs_memory_pressure_get();
2486 md = lnet_handle2md(&mdh);
2487 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2488 lnet_msg_free_locked(msg);
2490 CERROR("Dropping PUT ("LPU64":%d:%s): MD (%d) invalid\n",
2491 match_bits, portal, libcfs_id2str(target),
2492 md == NULL ? -1 : md->md_threshold);
2493 if (md != NULL && md->md_me != NULL)
2494 CERROR("Source MD also attached to portal %d\n",
2495 md->md_me->me_portal);
2501 CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
2503 lnet_commit_md(md, msg);
2505 lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
2507 msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
2508 msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
2509 msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
2510 msg->msg_hdr.msg.put.hdr_data = hdr_data;
2512 /* NB handles only looked up by creator (no flips) */
2513 if (ack == LNET_ACK_REQ) {
2514 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2515 the_lnet.ln_interface_cookie;
2516 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2517 md->md_lh.lh_cookie;
2519 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2520 LNET_WIRE_HANDLE_COOKIE_NONE;
2521 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2522 LNET_WIRE_HANDLE_COOKIE_NONE;
2525 msg->msg_ev.type = LNET_EVENT_SEND;
2526 msg->msg_ev.initiator.nid = LNET_NID_ANY;
2527 msg->msg_ev.initiator.pid = the_lnet.ln_pid;
2528 msg->msg_ev.target = target;
2529 msg->msg_ev.sender = LNET_NID_ANY;
2530 msg->msg_ev.pt_index = portal;
2531 msg->msg_ev.match_bits = match_bits;
2532 msg->msg_ev.rlength = md->md_length;
2533 msg->msg_ev.mlength = md->md_length;
2534 msg->msg_ev.offset = offset;
2535 msg->msg_ev.hdr_data = hdr_data;
2537 lnet_md_deconstruct(md, &msg->msg_ev.md);
2538 lnet_md2handle(&msg->msg_ev.md_handle, md);
2540 the_lnet.ln_counters.send_count++;
2541 the_lnet.ln_counters.send_length += md->md_length;
2545 rc = lnet_send(self, msg);
2547 CNETERR( "Error sending PUT to %s: %d\n",
2548 libcfs_id2str(target), rc);
2549 lnet_finalize (NULL, msg, rc);
2552 /* completion will be signalled by an event */
2557 lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *getmsg)
2559 /* The LND can DMA direct to the GET md (i.e. no REPLY msg). This
2560 * returns a msg for the LND to pass to lnet_finalize() when the sink
2561 * data has been received.
2563 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
2564 * lnet_finalize() is called on it, so the LND must call this first */
2566 lnet_msg_t *msg = lnet_msg_alloc();
2567 lnet_libmd_t *getmd = getmsg->msg_md;
2568 lnet_process_id_t peer_id = getmsg->msg_target;
2570 LASSERT (!getmsg->msg_target_is_router);
2571 LASSERT (!getmsg->msg_routing);
2575 LASSERT (getmd->md_refcount > 0);
2578 CERROR ("%s: Dropping REPLY from %s: can't allocate msg\n",
2579 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
2583 if (getmd->md_threshold == 0) {
2584 CERROR ("%s: Dropping REPLY from %s for inactive MD %p\n",
2585 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
2590 LASSERT (getmd->md_offset == 0);
2592 CDEBUG(D_NET, "%s: Reply from %s md %p\n",
2593 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
2595 lnet_commit_md (getmd, msg);
2597 msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
2599 msg->msg_ev.type = LNET_EVENT_REPLY;
2600 msg->msg_ev.initiator = peer_id;
2601 msg->msg_ev.sender = peer_id.nid; /* optimized GETs can't be routed */
2602 msg->msg_ev.rlength = msg->msg_ev.mlength = getmd->md_length;
2603 msg->msg_ev.offset = 0;
2605 lnet_md_deconstruct(getmd, &msg->msg_ev.md);
2606 lnet_md2handle(&msg->msg_ev.md_handle, getmd);
2608 the_lnet.ln_counters.recv_count++;
2609 the_lnet.ln_counters.recv_length += getmd->md_length;
2616 lnet_msg_free_locked(msg);
2618 the_lnet.ln_counters.drop_count++;
2619 the_lnet.ln_counters.drop_length += getmd->md_length;
2627 lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
2629 /* Set the REPLY length, now the RDMA that elides the REPLY message has
2630 * completed and I know it. */
2631 LASSERT (reply != NULL);
2632 LASSERT (reply->msg_type == LNET_MSG_GET);
2633 LASSERT (reply->msg_ev.type == LNET_EVENT_REPLY);
2635 /* NB I trusted my peer to RDMA. If she tells me she's written beyond
2636 * the end of my buffer, I might as well be dead. */
2637 LASSERT (len <= reply->msg_ev.mlength);
2639 reply->msg_ev.mlength = len;
2643 * Initiate an asynchronous GET operation.
2645 * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
2646 * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
2647 * the target node in the REPLY has been written to local MD.
2649 * On the target node, an LNET_EVENT_GET is logged when the GET request
2650 * arrives and is accepted into a MD.
2652 * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
2653 * \param mdh A handle for the MD that describes the memory into which the
2654 * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
2656 * \retval 0 Success, and only in this case events will be generated
2657 * and logged to EQ (if it exists) of the MD.
2658 * \retval -EIO Simulated failure.
2659 * \retval -ENOMEM Memory allocation failure.
2660 * \retval -ENOENT Invalid MD object.
2663 LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
2664 lnet_process_id_t target, unsigned int portal,
2665 __u64 match_bits, unsigned int offset)
2671 LASSERT (the_lnet.ln_init);
2672 LASSERT (the_lnet.ln_refcount > 0);
2674 if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
2675 fail_peer (target.nid, 1)) /* shall we now? */
2677 CERROR("Dropping GET to %s: simulated failure\n",
2678 libcfs_id2str(target));
2682 msg = lnet_msg_alloc();
2684 CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
2685 libcfs_id2str(target));
2691 md = lnet_handle2md(&mdh);
2692 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2693 lnet_msg_free_locked(msg);
2695 CERROR("Dropping GET ("LPU64":%d:%s): MD (%d) invalid\n",
2696 match_bits, portal, libcfs_id2str(target),
2697 md == NULL ? -1 : md->md_threshold);
2698 if (md != NULL && md->md_me != NULL)
2699 CERROR("REPLY MD also attached to portal %d\n",
2700 md->md_me->me_portal);
2706 CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
2708 lnet_commit_md(md, msg);
2710 lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
2712 msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
2713 msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
2714 msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
2715 msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
2717 /* NB handles only looked up by creator (no flips) */
2718 msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
2719 the_lnet.ln_interface_cookie;
2720 msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
2721 md->md_lh.lh_cookie;
2723 msg->msg_ev.type = LNET_EVENT_SEND;
2724 msg->msg_ev.initiator.nid = LNET_NID_ANY;
2725 msg->msg_ev.initiator.pid = the_lnet.ln_pid;
2726 msg->msg_ev.target = target;
2727 msg->msg_ev.sender = LNET_NID_ANY;
2728 msg->msg_ev.pt_index = portal;
2729 msg->msg_ev.match_bits = match_bits;
2730 msg->msg_ev.rlength = md->md_length;
2731 msg->msg_ev.mlength = md->md_length;
2732 msg->msg_ev.offset = offset;
2733 msg->msg_ev.hdr_data = 0;
2735 lnet_md_deconstruct(md, &msg->msg_ev.md);
2736 lnet_md2handle(&msg->msg_ev.md_handle, md);
2738 the_lnet.ln_counters.send_count++;
2742 rc = lnet_send(self, msg);
2744 CNETERR( "Error sending GET to %s: %d\n",
2745 libcfs_id2str(target), rc);
2746 lnet_finalize (NULL, msg, rc);
2749 /* completion will be signalled by an event */
2754 * Calculate distance to node at \a dstnid.
2756 * \param dstnid Target NID.
2757 * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
2759 * \param orderp If not NULL, order of the route to reach \a dstnid is saved
2762 * \retval 0 If \a dstnid belongs to a local interface, and reserved option
2763 * local_nid_dist_zero is set, which is the default.
2764 * \retval positives Distance to target NID, i.e. number of hops plus one.
2765 * \retval -EHOSTUNREACH If \a dstnid is not reachable.
2768 LNetDist (lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
2772 lnet_remotenet_t *rnet;
2773 __u32 dstnet = LNET_NIDNET(dstnid);
2777 /* if !local_nid_dist_zero, I don't return a distance of 0 ever
2778 * (when lustre sees a distance of 0, it substitutes 0@lo), so I
2779 * keep order 0 free for 0@lo and order 1 free for a local NID
2782 LASSERT (the_lnet.ln_init);
2783 LASSERT (the_lnet.ln_refcount > 0);
2787 cfs_list_for_each (e, &the_lnet.ln_nis) {
2788 ni = cfs_list_entry(e, lnet_ni_t, ni_list);
2790 if (ni->ni_nid == dstnid) {
2791 if (srcnidp != NULL)
2793 if (orderp != NULL) {
2794 if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
2801 return local_nid_dist_zero ? 0 : 1;
2804 if (LNET_NIDNET(ni->ni_nid) == dstnet) {
2805 if (srcnidp != NULL)
2806 *srcnidp = ni->ni_nid;
2816 cfs_list_for_each (e, &the_lnet.ln_remote_nets) {
2817 rnet = cfs_list_entry(e, lnet_remotenet_t, lrn_list);
2819 if (rnet->lrn_net == dstnet) {
2820 lnet_route_t *route;
2821 lnet_route_t *shortest = NULL;
2823 LASSERT (!cfs_list_empty(&rnet->lrn_routes));
2825 cfs_list_for_each_entry(route, &rnet->lrn_routes,
2827 if (shortest == NULL ||
2828 route->lr_hops < shortest->lr_hops)
2832 LASSERT (shortest != NULL);
2833 hops = shortest->lr_hops;
2834 if (srcnidp != NULL)
2835 *srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
2845 return -EHOSTUNREACH;
2849 * Set the number of asynchronous messages expected from a target process.
2851 * This function is only meaningful for userspace callers. It's a no-op when
2852 * called from kernel.
2854 * Asynchronous messages are those that can come from a target when the
2855 * userspace process is not waiting for IO to complete; e.g., AST callbacks
2856 * from Lustre servers. Specifying the expected number of such messages
2857 * allows them to be eagerly received when user process is not running in
2858 * LNet; otherwise network errors may occur.
2860 * \param id Process ID of the target process.
2861 * \param nasync Number of asynchronous messages expected from the target.
2863 * \return 0 on success, and an error code otherwise.
2866 LNetSetAsync(lnet_process_id_t id, int nasync)
2872 lnet_remotenet_t *rnet;
2874 lnet_route_t *route;
2881 /* Target on a local network? */
2882 ni = lnet_net2ni(LNET_NIDNET(id.nid));
2884 if (ni->ni_lnd->lnd_setasync != NULL)
2885 rc = (ni->ni_lnd->lnd_setasync)(ni, id, nasync);
2890 /* Target on a remote network: apply to routers */
2892 LIBCFS_ALLOC(nids, maxnids * sizeof(*nids));
2897 /* Snapshot all the router NIDs */
2899 rnet = lnet_find_net_locked(LNET_NIDNET(id.nid));
2901 cfs_list_for_each(tmp, &rnet->lrn_routes) {
2902 if (nnids == maxnids) {
2904 LIBCFS_FREE(nids, maxnids * sizeof(*nids));
2909 route = cfs_list_entry(tmp, lnet_route_t, lr_list);
2910 nids[nnids++] = route->lr_gateway->lp_nid;
2915 /* set async on all the routers */
2916 while (nnids-- > 0) {
2917 id.pid = LUSTRE_SRV_LNET_PID;
2918 id.nid = nids[nnids];
2920 ni = lnet_net2ni(LNET_NIDNET(id.nid));
2924 if (ni->ni_lnd->lnd_setasync != NULL) {
2925 rc2 = (ni->ni_lnd->lnd_setasync)(ni, id, nasync);
2932 LIBCFS_FREE(nids, maxnids * sizeof(*nids));