1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Data movement routines
7 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * Copyright (c) 2001-2002 Sandia National Laboratories
10 * This file is part of Lustre, http://www.sf.net/projects/lustre/
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 # define DEBUG_SUBSYSTEM S_PORTALS
30 # include <linux/kp30.h>
32 #include <portals/p30.h>
33 #include <portals/lib-p30.h>
34 #include <portals/arg-blocks.h>
37 static void lib_commit_md (nal_cb_t *nal, lib_md_t *md, lib_msg_t *msg);
40 lib_match_md(nal_cb_t *nal, int index, int op_mask,
41 ptl_nid_t src_nid, ptl_pid_t src_pid,
42 ptl_size_t rlength, ptl_size_t roffset,
43 ptl_match_bits_t match_bits, lib_msg_t *msg,
44 ptl_size_t *mlength_out, ptl_size_t *offset_out)
46 lib_ni_t *ni = &nal->ni;
47 struct list_head *match_list = &ni->tbl.tbl[index];
48 struct list_head *tmp;
55 CDEBUG (D_NET, "Request from "LPU64".%d of length %d into portal %d "
56 "MB="LPX64"\n", src_nid, src_pid, rlength, index, match_bits);
58 if (index < 0 || index >= ni->tbl.size) {
59 CERROR("Invalid portal %d not in [0-%d]\n",
64 list_for_each (tmp, match_list) {
65 me = list_entry(tmp, lib_me_t, me_list);
68 /* ME attached but MD not attached yet */
72 LASSERT (me == md->me);
74 /* mismatched MD op */
75 if ((md->options & op_mask) == 0)
79 if (lib_md_exhausted(md))
82 /* mismatched ME nid/pid? */
83 if (me->match_id.nid != PTL_NID_ANY &&
84 me->match_id.nid != src_nid)
87 if (me->match_id.pid != PTL_PID_ANY &&
88 me->match_id.pid != src_pid)
91 /* mismatched ME matchbits? */
92 if (((me->match_bits ^ match_bits) & ~me->ignore_bits) != 0)
95 /* Hurrah! This _is_ a match; check it out... */
97 if ((md->options & PTL_MD_MANAGE_REMOTE) == 0)
102 if ((md->options & PTL_MD_MAX_SIZE) != 0) {
103 mlength = md->max_size;
104 LASSERT (md->offset + mlength <= md->length);
106 mlength = md->length - offset;
109 if (rlength <= mlength) { /* fits in allowed space */
111 } else if ((md->options & PTL_MD_TRUNCATE) == 0) {
112 /* this packet _really_ is too big */
113 CERROR("Matching packet %d too big: %d left, "
114 "%d allowed\n", rlength, md->length - offset,
119 /* Commit to this ME/MD */
120 CDEBUG(D_NET, "Incoming %s index %x from "LPU64"/%u of "
121 "length %d/%d into md "LPX64" [%d] + %d\n",
122 (op_mask == PTL_MD_OP_PUT) ? "put" : "get",
123 index, src_nid, src_pid, mlength, rlength,
124 md->md_lh.lh_cookie, md->md_niov, offset);
126 lib_commit_md(nal, md, msg);
127 md->offset = offset + mlength;
129 /* NB Caller sets ev.type and ev.hdr_data */
130 msg->ev.initiator.nid = src_nid;
131 msg->ev.initiator.pid = src_pid;
132 msg->ev.portal = index;
133 msg->ev.match_bits = match_bits;
134 msg->ev.rlength = rlength;
135 msg->ev.mlength = mlength;
136 msg->ev.offset = offset;
138 lib_md_deconstruct(nal, md, &msg->ev.mem_desc);
140 *offset_out = offset;
141 *mlength_out = mlength;
143 /* Auto-unlink NOW, so the ME gets unlinked if required.
144 * We bumped md->pending above so the MD just gets flagged
145 * for unlink when it is finalized. */
146 if ((md->md_flags & PTL_MD_FLAG_AUTO_UNLINK) != 0 &&
147 lib_md_exhausted(md))
148 lib_md_unlink(nal, md);
154 CERROR (LPU64": Dropping %s from "LPU64".%d portal %d match "LPX64
155 " offset %d length %d: no match\n",
156 ni->nid, (op_mask == PTL_MD_OP_GET) ? "GET" : "PUT",
157 src_nid, src_pid, index, match_bits, roffset, rlength);
161 int do_PtlFailNid (nal_cb_t *nal, void *private, void *v_args, void *v_ret)
163 PtlFailNid_in *args = v_args;
164 PtlFailNid_out *ret = v_ret;
167 struct list_head *el;
168 struct list_head *next;
169 struct list_head cull;
171 if (args->threshold != 0) {
172 /* Adding a new entry */
173 tp = (lib_test_peer_t *)nal->cb_malloc (nal, sizeof (*tp));
175 return (ret->rc = PTL_FAIL);
177 tp->tp_nid = args->nid;
178 tp->tp_threshold = args->threshold;
180 state_lock (nal, &flags);
181 list_add (&tp->tp_list, &nal->ni.ni_test_peers);
182 state_unlock (nal, &flags);
183 return (ret->rc = PTL_OK);
186 /* removing entries */
187 INIT_LIST_HEAD (&cull);
189 state_lock (nal, &flags);
191 list_for_each_safe (el, next, &nal->ni.ni_test_peers) {
192 tp = list_entry (el, lib_test_peer_t, tp_list);
194 if (tp->tp_threshold == 0 || /* needs culling anyway */
195 args->nid == PTL_NID_ANY || /* removing all entries */
196 tp->tp_nid == args->nid) /* matched this one */
198 list_del (&tp->tp_list);
199 list_add (&tp->tp_list, &cull);
203 state_unlock (nal, &flags);
205 while (!list_empty (&cull)) {
206 tp = list_entry (cull.next, lib_test_peer_t, tp_list);
208 list_del (&tp->tp_list);
209 nal->cb_free (nal, tp, sizeof (*tp));
211 return (ret->rc = PTL_OK);
215 fail_peer (nal_cb_t *nal, ptl_nid_t nid, int outgoing)
218 struct list_head *el;
219 struct list_head *next;
221 struct list_head cull;
224 INIT_LIST_HEAD (&cull);
226 state_lock (nal, &flags);
228 list_for_each_safe (el, next, &nal->ni.ni_test_peers) {
229 tp = list_entry (el, lib_test_peer_t, tp_list);
231 if (tp->tp_threshold == 0) {
234 /* only cull zombies on outgoing tests,
235 * since we may be at interrupt priority on
236 * incoming messages. */
237 list_del (&tp->tp_list);
238 list_add (&tp->tp_list, &cull);
243 if (tp->tp_nid == PTL_NID_ANY || /* fail every peer */
244 nid == tp->tp_nid) { /* fail this peer */
247 if (tp->tp_threshold != PTL_MD_THRESH_INF) {
250 tp->tp_threshold == 0) {
252 list_del (&tp->tp_list);
253 list_add (&tp->tp_list, &cull);
260 state_unlock (nal, &flags);
262 while (!list_empty (&cull)) {
263 tp = list_entry (cull.next, lib_test_peer_t, tp_list);
264 list_del (&tp->tp_list);
266 nal->cb_free (nal, tp, sizeof (*tp));
273 lib_iov_nob (int niov, struct iovec *iov)
278 nob += (iov++)->iov_len;
284 lib_copy_iov2buf (char *dest, int niov, struct iovec *iov,
285 ptl_size_t offset, ptl_size_t len)
292 /* skip complete frags before 'offset' */
294 while (offset >= iov->iov_len) {
295 offset -= iov->iov_len;
303 nob = MIN (iov->iov_len - offset, len);
304 memcpy (dest, iov->iov_base + offset, nob);
315 lib_copy_buf2iov (int niov, struct iovec *iov, ptl_size_t offset,
316 char *src, ptl_size_t len)
323 /* skip complete frags before 'offset' */
325 while (offset >= iov->iov_len) {
326 offset -= iov->iov_len;
334 nob = MIN (iov->iov_len - offset, len);
335 memcpy (iov->iov_base + offset, src, nob);
346 lib_extract_iov (int dst_niov, struct iovec *dst,
347 int src_niov, struct iovec *src,
348 ptl_size_t offset, ptl_size_t len)
350 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
351 * for exactly 'len' bytes, and return the number of entries.
352 * NB not destructive to 'src' */
356 if (len == 0) /* no data => */
357 return (0); /* no frags */
359 LASSERT (src_niov > 0);
360 while (offset >= src->iov_len) { /* skip initial frags */
361 offset -= src->iov_len;
364 LASSERT (src_niov > 0);
369 LASSERT (src_niov > 0);
370 LASSERT (niov <= dst_niov);
372 frag_len = src->iov_len - offset;
373 dst->iov_base = ((char *)src->iov_base) + offset;
375 if (len <= frag_len) {
380 dst->iov_len = frag_len;
393 lib_kiov_nob (int niov, ptl_kiov_t *kiov)
400 lib_copy_kiov2buf (char *dest, int niov, ptl_kiov_t *kiov,
401 ptl_size_t offset, ptl_size_t len)
407 lib_copy_buf2kiov (int niov, ptl_kiov_t *kiov, ptl_size_t offset,
408 char *src, ptl_size_t len)
414 lib_extract_kiov (int dst_niov, ptl_kiov_t *dst,
415 int src_niov, ptl_kiov_t *src,
416 ptl_size_t offset, ptl_size_t len)
424 lib_kiov_nob (int niov, ptl_kiov_t *kiov)
429 nob += (kiov++)->kiov_len;
435 lib_copy_kiov2buf (char *dest, int niov, ptl_kiov_t *kiov,
436 ptl_size_t offset, ptl_size_t len)
444 LASSERT (!in_interrupt ());
447 while (offset > kiov->kiov_len) {
448 offset -= kiov->kiov_len;
456 nob = MIN (kiov->kiov_len - offset, len);
458 addr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset + offset;
459 memcpy (dest, addr, nob);
460 kunmap (kiov->kiov_page);
471 lib_copy_buf2kiov (int niov, ptl_kiov_t *kiov, ptl_size_t offset,
472 char *src, ptl_size_t len)
480 LASSERT (!in_interrupt ());
483 while (offset >= kiov->kiov_len) {
484 offset -= kiov->kiov_len;
492 nob = MIN (kiov->kiov_len - offset, len);
494 addr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset + offset;
495 memcpy (addr, src, nob);
496 kunmap (kiov->kiov_page);
507 lib_extract_kiov (int dst_niov, ptl_kiov_t *dst,
508 int src_niov, ptl_kiov_t *src,
509 ptl_size_t offset, ptl_size_t len)
511 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
512 * for exactly 'len' bytes, and return the number of entries.
513 * NB not destructive to 'src' */
517 if (len == 0) /* no data => */
518 return (0); /* no frags */
520 LASSERT (src_niov > 0);
521 while (offset >= src->kiov_len) { /* skip initial frags */
522 offset -= src->kiov_len;
525 LASSERT (src_niov > 0);
530 LASSERT (src_niov > 0);
531 LASSERT (niov <= dst_niov);
533 frag_len = src->kiov_len - offset;
534 dst->kiov_page = src->kiov_page;
535 dst->kiov_offset = src->kiov_offset + offset;
537 if (len <= frag_len) {
539 LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
543 dst->kiov_len = frag_len;
544 LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
557 lib_recv (nal_cb_t *nal, void *private, lib_msg_t *msg, lib_md_t *md,
558 ptl_size_t offset, ptl_size_t mlen, ptl_size_t rlen)
561 return (nal->cb_recv(nal, private, msg,
563 offset, mlen, rlen));
565 if ((md->options & PTL_MD_KIOV) == 0)
566 return (nal->cb_recv(nal, private, msg,
567 md->md_niov, md->md_iov.iov,
568 offset, mlen, rlen));
570 return (nal->cb_recv_pages(nal, private, msg,
571 md->md_niov, md->md_iov.kiov,
572 offset, mlen, rlen));
576 lib_send (nal_cb_t *nal, void *private, lib_msg_t *msg,
577 ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid,
578 lib_md_t *md, ptl_size_t offset, ptl_size_t len)
581 return (nal->cb_send(nal, private, msg,
586 if ((md->options & PTL_MD_KIOV) == 0)
587 return (nal->cb_send(nal, private, msg,
589 md->md_niov, md->md_iov.iov,
592 return (nal->cb_send_pages(nal, private, msg,
594 md->md_niov, md->md_iov.kiov,
599 lib_commit_md (nal_cb_t *nal, lib_md_t *md, lib_msg_t *msg)
601 /* ALWAYS called holding the state_lock */
602 lib_counters_t *counters = &nal->ni.counters;
604 /* Here, we commit the MD to a network OP by marking it busy and
605 * decrementing its threshold. Come what may, the network "owns"
606 * the MD until a call to lib_finalize() signals completion. */
610 if (md->threshold != PTL_MD_THRESH_INF) {
611 LASSERT (md->threshold > 0);
615 counters->msgs_alloc++;
616 if (counters->msgs_alloc > counters->msgs_max)
617 counters->msgs_max = counters->msgs_alloc;
619 list_add (&msg->msg_list, &nal->ni.ni_active_msgs);
623 lib_drop_message (nal_cb_t *nal, void *private, ptl_hdr_t *hdr)
627 /* CAVEAT EMPTOR: this only drops messages that we've not committed
628 * to receive (init_msg() not called) and therefore can't cause an
631 state_lock(nal, &flags);
632 nal->ni.counters.drop_count++;
633 nal->ni.counters.drop_length += hdr->payload_length;
634 state_unlock(nal, &flags);
636 /* NULL msg => if NAL calls lib_finalize it will be a noop */
637 (void) lib_recv(nal, private, NULL, NULL, 0, 0, hdr->payload_length);
641 * Incoming messages have a ptl_msg_t object associated with them
642 * by the library. This object encapsulates the state of the
643 * message and allows the NAL to do non-blocking receives or sends
648 parse_put(nal_cb_t *nal, ptl_hdr_t *hdr, void *private, lib_msg_t *msg)
650 lib_ni_t *ni = &nal->ni;
651 ptl_size_t mlength = 0;
652 ptl_size_t offset = 0;
657 /* Convert put fields to host byte order */
658 hdr->msg.put.match_bits = NTOH__u64 (hdr->msg.put.match_bits);
659 hdr->msg.put.ptl_index = NTOH__u32 (hdr->msg.put.ptl_index);
660 hdr->msg.put.offset = NTOH__u32 (hdr->msg.put.offset);
662 state_lock(nal, &flags);
664 md = lib_match_md(nal, hdr->msg.put.ptl_index, PTL_MD_OP_PUT,
665 hdr->src_nid, hdr->src_pid,
666 hdr->payload_length, hdr->msg.put.offset,
667 hdr->msg.put.match_bits, msg,
670 state_unlock(nal, &flags);
674 msg->ev.type = PTL_EVENT_PUT_END;
675 msg->ev.hdr_data = hdr->msg.put.hdr_data;
677 if (!ptl_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
678 !(md->options & PTL_MD_ACK_DISABLE)) {
679 msg->ack_wmd = hdr->msg.put.ack_wmd;
682 ni->counters.recv_count++;
683 ni->counters.recv_length += mlength;
685 state_unlock(nal, &flags);
687 rc = lib_recv(nal, private, msg, md, offset, mlength,
688 hdr->payload_length);
690 CERROR(LPU64": error on receiving PUT from "LPU64": %d\n",
691 ni->nid, hdr->src_nid, rc);
697 parse_get(nal_cb_t *nal, ptl_hdr_t *hdr, void *private, lib_msg_t *msg)
699 lib_ni_t *ni = &nal->ni;
700 ptl_size_t mlength = 0;
701 ptl_size_t offset = 0;
707 /* Convert get fields to host byte order */
708 hdr->msg.get.match_bits = NTOH__u64 (hdr->msg.get.match_bits);
709 hdr->msg.get.ptl_index = NTOH__u32 (hdr->msg.get.ptl_index);
710 hdr->msg.get.sink_length = NTOH__u32 (hdr->msg.get.sink_length);
711 hdr->msg.get.src_offset = NTOH__u32 (hdr->msg.get.src_offset);
713 state_lock(nal, &flags);
715 md = lib_match_md(nal, hdr->msg.get.ptl_index, PTL_MD_OP_GET,
716 hdr->src_nid, hdr->src_pid,
717 hdr->msg.get.sink_length, hdr->msg.get.src_offset,
718 hdr->msg.get.match_bits, msg,
721 state_unlock(nal, &flags);
725 msg->ev.type = PTL_EVENT_GET_END;
726 msg->ev.hdr_data = 0;
728 ni->counters.send_count++;
729 ni->counters.send_length += mlength;
731 state_unlock(nal, &flags);
733 memset (&reply, 0, sizeof (reply));
734 reply.type = HTON__u32 (PTL_MSG_REPLY);
735 reply.dest_nid = HTON__u64 (hdr->src_nid);
736 reply.src_nid = HTON__u64 (ni->nid);
737 reply.dest_pid = HTON__u32 (hdr->src_pid);
738 reply.src_pid = HTON__u32 (ni->pid);
739 reply.payload_length = HTON__u32 (mlength);
741 reply.msg.reply.dst_wmd = hdr->msg.get.return_wmd;
743 /* NB call lib_send() _BEFORE_ lib_recv() completes the incoming
744 * message. Some NALs _require_ this to implement optimized GET */
746 rc = lib_send (nal, private, msg, &reply, PTL_MSG_REPLY,
747 hdr->src_nid, hdr->src_pid, md, offset, mlength);
749 CERROR(LPU64": Unable to send REPLY for GET from "LPU64": %d\n",
750 ni->nid, hdr->src_nid, rc);
752 /* Discard any junk after the hdr */
753 (void) lib_recv(nal, private, NULL, NULL, 0, 0, hdr->payload_length);
759 parse_reply(nal_cb_t *nal, ptl_hdr_t *hdr, void *private, lib_msg_t *msg)
761 lib_ni_t *ni = &nal->ni;
768 state_lock(nal, &flags);
770 /* NB handles only looked up by creator (no flips) */
771 md = ptl_wire_handle2md(&hdr->msg.reply.dst_wmd, nal);
772 if (md == NULL || md->threshold == 0) {
773 CERROR (LPU64": Dropping REPLY from "LPU64" for %s MD "LPX64"."LPX64"\n",
774 ni->nid, hdr->src_nid,
775 md == NULL ? "invalid" : "inactive",
776 hdr->msg.reply.dst_wmd.wh_interface_cookie,
777 hdr->msg.reply.dst_wmd.wh_object_cookie);
779 state_unlock(nal, &flags);
783 LASSERT (md->offset == 0);
785 length = rlength = hdr->payload_length;
787 if (length > md->length) {
788 if ((md->options & PTL_MD_TRUNCATE) == 0) {
789 CERROR (LPU64": Dropping REPLY from "LPU64
790 " length %d for MD "LPX64" would overflow (%d)\n",
791 ni->nid, hdr->src_nid, length,
792 hdr->msg.reply.dst_wmd.wh_object_cookie,
794 state_unlock(nal, &flags);
800 CDEBUG(D_NET, "Reply from "LPU64" of length %d/%d into md "LPX64"\n",
801 hdr->src_nid, length, rlength,
802 hdr->msg.reply.dst_wmd.wh_object_cookie);
804 lib_commit_md(nal, md, msg);
806 msg->ev.type = PTL_EVENT_REPLY_END;
807 msg->ev.initiator.nid = hdr->src_nid;
808 msg->ev.initiator.pid = hdr->src_pid;
809 msg->ev.rlength = rlength;
810 msg->ev.mlength = length;
813 lib_md_deconstruct(nal, md, &msg->ev.mem_desc);
815 ni->counters.recv_count++;
816 ni->counters.recv_length += length;
818 state_unlock(nal, &flags);
820 rc = lib_recv(nal, private, msg, md, 0, length, rlength);
822 CERROR(LPU64": error on receiving REPLY from "LPU64": %d\n",
823 ni->nid, hdr->src_nid, rc);
829 parse_ack(nal_cb_t *nal, ptl_hdr_t *hdr, void *private, lib_msg_t *msg)
831 lib_ni_t *ni = &nal->ni;
835 /* Convert ack fields to host byte order */
836 hdr->msg.ack.match_bits = NTOH__u64 (hdr->msg.ack.match_bits);
837 hdr->msg.ack.mlength = NTOH__u32 (hdr->msg.ack.mlength);
839 state_lock(nal, &flags);
841 /* NB handles only looked up by creator (no flips) */
842 md = ptl_wire_handle2md(&hdr->msg.ack.dst_wmd, nal);
843 if (md == NULL || md->threshold == 0) {
844 CDEBUG(D_INFO, LPU64": Dropping ACK from "LPU64" to %s MD "
845 LPX64"."LPX64"\n", ni->nid, hdr->src_nid,
846 (md == NULL) ? "invalid" : "inactive",
847 hdr->msg.ack.dst_wmd.wh_interface_cookie,
848 hdr->msg.ack.dst_wmd.wh_object_cookie);
850 state_unlock(nal, &flags);
854 CDEBUG(D_NET, LPU64": ACK from "LPU64" into md "LPX64"\n",
855 ni->nid, hdr->src_nid,
856 hdr->msg.ack.dst_wmd.wh_object_cookie);
858 lib_commit_md(nal, md, msg);
860 msg->ev.type = PTL_EVENT_ACK;
861 msg->ev.initiator.nid = hdr->src_nid;
862 msg->ev.initiator.pid = hdr->src_pid;
863 msg->ev.mlength = hdr->msg.ack.mlength;
864 msg->ev.match_bits = hdr->msg.ack.match_bits;
866 lib_md_deconstruct(nal, md, &msg->ev.mem_desc);
868 ni->counters.recv_count++;
870 state_unlock(nal, &flags);
872 /* We have received and matched up the ack OK, create the
873 * completion event now... */
874 lib_finalize(nal, private, msg, PTL_OK);
876 /* ...and now discard any junk after the hdr */
877 (void) lib_recv(nal, private, NULL, NULL, 0, 0, hdr->payload_length);
883 hdr_type_string (ptl_hdr_t *hdr)
897 return ("<UNKNOWN>");
901 void print_hdr(nal_cb_t * nal, ptl_hdr_t * hdr)
903 char *type_str = hdr_type_string (hdr);
905 nal->cb_printf(nal, "P3 Header at %p of type %s\n", hdr, type_str);
906 nal->cb_printf(nal, " From nid/pid %Lu/%Lu", hdr->src_nid,
908 nal->cb_printf(nal, " To nid/pid %Lu/%Lu\n", hdr->dest_nid,
917 " Ptl index %d, ack md "LPX64"."LPX64", "
918 "match bits "LPX64"\n",
919 hdr->msg.put.ptl_index,
920 hdr->msg.put.ack_wmd.wh_interface_cookie,
921 hdr->msg.put.ack_wmd.wh_object_cookie,
922 hdr->msg.put.match_bits);
924 " Length %d, offset %d, hdr data "LPX64"\n",
925 hdr->payload_length, hdr->msg.put.offset,
926 hdr->msg.put.hdr_data);
931 " Ptl index %d, return md "LPX64"."LPX64", "
932 "match bits "LPX64"\n", hdr->msg.get.ptl_index,
933 hdr->msg.get.return_wmd.wh_interface_cookie,
934 hdr->msg.get.return_wmd.wh_object_cookie,
935 hdr->msg.get.match_bits);
937 " Length %d, src offset %d\n",
938 hdr->msg.get.sink_length,
939 hdr->msg.get.src_offset);
943 nal->cb_printf(nal, " dst md "LPX64"."LPX64", "
944 "manipulated length %d\n",
945 hdr->msg.ack.dst_wmd.wh_interface_cookie,
946 hdr->msg.ack.dst_wmd.wh_object_cookie,
947 hdr->msg.ack.mlength);
951 nal->cb_printf(nal, " dst md "LPX64"."LPX64", "
953 hdr->msg.reply.dst_wmd.wh_interface_cookie,
954 hdr->msg.reply.dst_wmd.wh_object_cookie,
955 hdr->payload_length);
958 } /* end of print_hdr() */
962 lib_parse(nal_cb_t *nal, ptl_hdr_t *hdr, void *private)
968 /* convert common fields to host byte order */
969 hdr->dest_nid = NTOH__u64 (hdr->dest_nid);
970 hdr->src_nid = NTOH__u64 (hdr->src_nid);
971 hdr->dest_pid = NTOH__u32 (hdr->dest_pid);
972 hdr->src_pid = NTOH__u32 (hdr->src_pid);
973 hdr->type = NTOH__u32 (hdr->type);
974 hdr->payload_length = NTOH__u32(hdr->payload_length);
976 nal->cb_printf(nal, "%d: lib_parse: nal=%p hdr=%p type=%d\n",
977 nal->ni.nid, nal, hdr, hdr->type);
980 if (hdr->type == PTL_MSG_HELLO) {
981 /* dest_nid is really ptl_magicversion_t */
982 ptl_magicversion_t *mv = (ptl_magicversion_t *)&hdr->dest_nid;
984 CERROR (LPU64": Dropping unexpected HELLO message: "
985 "magic %d, version %d.%d from "LPD64"\n",
986 nal->ni.nid, mv->magic,
987 mv->version_major, mv->version_minor,
989 lib_drop_message(nal, private, hdr);
993 if (hdr->dest_nid != nal->ni.nid) {
994 CERROR(LPU64": Dropping %s message from "LPU64" to "LPU64
995 " (not me)\n", nal->ni.nid, hdr_type_string (hdr),
996 hdr->src_nid, hdr->dest_nid);
997 lib_drop_message(nal, private, hdr);
1001 if (!list_empty (&nal->ni.ni_test_peers) && /* normally we don't */
1002 fail_peer (nal, hdr->src_nid, 0)) /* shall we now? */
1004 CERROR(LPU64": Dropping incoming %s from "LPU64
1005 ": simulated failure\n",
1006 nal->ni.nid, hdr_type_string (hdr),
1008 lib_drop_message(nal, private, hdr);
1012 msg = lib_msg_alloc(nal);
1014 CERROR(LPU64": Dropping incoming %s from "LPU64
1015 ": can't allocate a lib_msg_t\n",
1016 nal->ni.nid, hdr_type_string (hdr),
1018 lib_drop_message(nal, private, hdr);
1022 switch (hdr->type) {
1024 rc = parse_ack(nal, hdr, private, msg);
1027 rc = parse_put(nal, hdr, private, msg);
1030 rc = parse_get(nal, hdr, private, msg);
1033 rc = parse_reply(nal, hdr, private, msg);
1036 CERROR(LPU64": Dropping <unknown> message from "LPU64
1037 ": Bad type=0x%x\n", nal->ni.nid, hdr->src_nid,
1044 if (msg->md != NULL) {
1046 lib_finalize(nal, private, msg, rc);
1048 state_lock(nal, &flags);
1049 lib_msg_free(nal, msg); /* expects state_lock held */
1050 state_unlock(nal, &flags);
1052 lib_drop_message(nal, private, hdr);
1058 do_PtlPut(nal_cb_t *nal, void *private, void *v_args, void *v_ret)
1062 * ptl_handle_md_t md_in
1063 * ptl_ack_req_t ack_req_in
1064 * ptl_process_id_t target_in
1065 * ptl_pt_index_t portal_in
1066 * ptl_ac_index_t cookie_in
1067 * ptl_match_bits_t match_bits_in
1068 * ptl_size_t offset_in
1073 PtlPut_in *args = v_args;
1074 ptl_process_id_t *id = &args->target_in;
1075 PtlPut_out *ret = v_ret;
1076 lib_ni_t *ni = &nal->ni;
1080 unsigned long flags;
1083 if (!list_empty (&nal->ni.ni_test_peers) && /* normally we don't */
1084 fail_peer (nal, id->nid, 1)) /* shall we now? */
1086 CERROR(LPU64": Dropping PUT to "LPU64": simulated failure\n",
1087 nal->ni.nid, id->nid);
1088 return (ret->rc = PTL_PROCESS_INVALID);
1091 msg = lib_msg_alloc(nal);
1093 CERROR(LPU64": Dropping PUT to "LPU64": ENOMEM on lib_msg_t\n",
1095 return (ret->rc = PTL_NO_SPACE);
1098 state_lock(nal, &flags);
1100 md = ptl_handle2md(&args->md_in, nal);
1101 if (md == NULL || md->threshold == 0) {
1102 lib_msg_free(nal, msg);
1103 state_unlock(nal, &flags);
1105 return (ret->rc = PTL_MD_INVALID);
1108 CDEBUG(D_NET, "PtlPut -> %Lu: %lu\n", (unsigned long long)id->nid,
1109 (unsigned long)id->pid);
1111 memset (&hdr, 0, sizeof (hdr));
1112 hdr.type = HTON__u32 (PTL_MSG_PUT);
1113 hdr.dest_nid = HTON__u64 (id->nid);
1114 hdr.src_nid = HTON__u64 (ni->nid);
1115 hdr.dest_pid = HTON__u32 (id->pid);
1116 hdr.src_pid = HTON__u32 (ni->pid);
1117 hdr.payload_length = HTON__u32 (md->length);
1119 /* NB handles only looked up by creator (no flips) */
1120 if (args->ack_req_in == PTL_ACK_REQ) {
1121 hdr.msg.put.ack_wmd.wh_interface_cookie = ni->ni_interface_cookie;
1122 hdr.msg.put.ack_wmd.wh_object_cookie = md->md_lh.lh_cookie;
1124 hdr.msg.put.ack_wmd = PTL_WIRE_HANDLE_NONE;
1127 hdr.msg.put.match_bits = HTON__u64 (args->match_bits_in);
1128 hdr.msg.put.ptl_index = HTON__u32 (args->portal_in);
1129 hdr.msg.put.offset = HTON__u32 (args->offset_in);
1130 hdr.msg.put.hdr_data = args->hdr_data_in;
1132 lib_commit_md(nal, md, msg);
1134 msg->ev.type = PTL_EVENT_SEND_END;
1135 msg->ev.initiator.nid = ni->nid;
1136 msg->ev.initiator.pid = ni->pid;
1137 msg->ev.portal = args->portal_in;
1138 msg->ev.match_bits = args->match_bits_in;
1139 msg->ev.rlength = md->length;
1140 msg->ev.mlength = md->length;
1141 msg->ev.offset = args->offset_in;
1142 msg->ev.hdr_data = args->hdr_data_in;
1144 lib_md_deconstruct(nal, md, &msg->ev.mem_desc);
1146 ni->counters.send_count++;
1147 ni->counters.send_length += md->length;
1149 state_unlock(nal, &flags);
1151 rc = lib_send (nal, private, msg, &hdr, PTL_MSG_PUT,
1152 id->nid, id->pid, md, 0, md->length);
1154 CERROR(LPU64": error sending PUT to "LPU64": %d\n",
1155 ni->nid, id->nid, rc);
1156 lib_finalize (nal, private, msg, rc);
1159 /* completion will be signalled by an event */
1160 return ret->rc = PTL_OK;
1164 lib_create_reply_msg (nal_cb_t *nal, ptl_nid_t peer_nid, lib_msg_t *getmsg)
1166 /* The NAL can DMA direct to the GET md (i.e. no REPLY msg). This
1167 * returns a msg for the NAL to pass to lib_finalize() when the sink
1168 * data has been received.
1170 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
1171 * lib_finalize() is called on it, so the NAL must call this first */
1173 lib_ni_t *ni = &nal->ni;
1174 lib_msg_t *msg = lib_msg_alloc(nal);
1175 lib_md_t *getmd = getmsg->md;
1176 unsigned long flags;
1178 state_lock(nal, &flags);
1180 LASSERT (getmd->pending > 0);
1183 CERROR ("Dropping REPLY from "LPU64": can't allocate msg\n",
1188 if (getmd->threshold == 0) {
1189 CERROR ("Dropping REPLY from "LPU64" for inactive MD %p\n",
1194 LASSERT (getmd->offset == 0);
1196 CDEBUG(D_NET, "Reply from "LPU64" md %p\n", peer_nid, getmd);
1198 lib_commit_md (nal, getmd, msg);
1200 msg->ev.type = PTL_EVENT_REPLY_END;
1201 msg->ev.initiator.nid = peer_nid;
1202 msg->ev.initiator.pid = 0; /* XXX FIXME!!! */
1203 msg->ev.rlength = msg->ev.mlength = getmd->length;
1206 lib_md_deconstruct(nal, getmd, &msg->ev.mem_desc);
1208 ni->counters.recv_count++;
1209 ni->counters.recv_length += getmd->length;
1211 state_unlock(nal, &flags);
1216 lib_msg_free(nal, msg);
1218 nal->ni.counters.drop_count++;
1219 nal->ni.counters.drop_length += getmd->length;
1221 state_unlock (nal, &flags);
1227 do_PtlGet(nal_cb_t *nal, void *private, void *v_args, void *v_ret)
1231 * ptl_handle_md_t md_in
1232 * ptl_process_id_t target_in
1233 * ptl_pt_index_t portal_in
1234 * ptl_ac_index_t cookie_in
1235 * ptl_match_bits_t match_bits_in
1236 * ptl_size_t offset_in
1241 PtlGet_in *args = v_args;
1242 ptl_process_id_t *id = &args->target_in;
1243 PtlGet_out *ret = v_ret;
1244 lib_ni_t *ni = &nal->ni;
1248 unsigned long flags;
1251 if (!list_empty (&nal->ni.ni_test_peers) && /* normally we don't */
1252 fail_peer (nal, id->nid, 1)) /* shall we now? */
1254 CERROR(LPU64": Dropping PUT to "LPU64": simulated failure\n",
1255 nal->ni.nid, id->nid);
1256 return (ret->rc = PTL_PROCESS_INVALID);
1259 msg = lib_msg_alloc(nal);
1261 CERROR(LPU64": Dropping GET to "LPU64": ENOMEM on lib_msg_t\n",
1263 return (ret->rc = PTL_NO_SPACE);
1266 state_lock(nal, &flags);
1268 md = ptl_handle2md(&args->md_in, nal);
1269 if (md == NULL || !md->threshold) {
1270 lib_msg_free(nal, msg);
1271 state_unlock(nal, &flags);
1273 return ret->rc = PTL_MD_INVALID;
1276 CDEBUG(D_NET, "PtlGet -> %Lu: %lu\n", (unsigned long long)id->nid,
1277 (unsigned long)id->pid);
1279 memset (&hdr, 0, sizeof (hdr));
1280 hdr.type = HTON__u32 (PTL_MSG_GET);
1281 hdr.dest_nid = HTON__u64 (id->nid);
1282 hdr.src_nid = HTON__u64 (ni->nid);
1283 hdr.dest_pid = HTON__u32 (id->pid);
1284 hdr.src_pid = HTON__u32 (ni->pid);
1285 hdr.payload_length = 0;
1287 /* NB handles only looked up by creator (no flips) */
1288 hdr.msg.get.return_wmd.wh_interface_cookie = ni->ni_interface_cookie;
1289 hdr.msg.get.return_wmd.wh_object_cookie = md->md_lh.lh_cookie;
1291 hdr.msg.get.match_bits = HTON__u64 (args->match_bits_in);
1292 hdr.msg.get.ptl_index = HTON__u32 (args->portal_in);
1293 hdr.msg.get.src_offset = HTON__u32 (args->offset_in);
1294 hdr.msg.get.sink_length = HTON__u32 (md->length);
1296 lib_commit_md(nal, md, msg);
1298 msg->ev.type = PTL_EVENT_SEND_END;
1299 msg->ev.initiator.nid = ni->nid;
1300 msg->ev.initiator.pid = ni->pid;
1301 msg->ev.portal = args->portal_in;
1302 msg->ev.match_bits = args->match_bits_in;
1303 msg->ev.rlength = md->length;
1304 msg->ev.mlength = md->length;
1305 msg->ev.offset = args->offset_in;
1306 msg->ev.hdr_data = 0;
1308 lib_md_deconstruct(nal, md, &msg->ev.mem_desc);
1310 ni->counters.send_count++;
1312 state_unlock(nal, &flags);
1314 rc = lib_send (nal, private, msg, &hdr, PTL_MSG_GET,
1315 id->nid, id->pid, NULL, 0, 0);
1317 CERROR(LPU64": error sending GET to "LPU64": %d\n",
1318 ni->nid, id->nid, rc);
1319 lib_finalize (nal, private, msg, rc);
1322 /* completion will be signalled by an event */
1323 return ret->rc = PTL_OK;
1326 void lib_assert_wire_constants (void)
1328 /* Wire protocol assertions generated by 'wirecheck'
1329 * running on Linux robert.bartonsoftware.com 2.4.20-18.9 #1 Thu May 29 06:54:41 EDT 2003 i68
1330 * with gcc version 3.2.2 20030222 (Red Hat Linux 3.2.2-5) */
1334 LASSERT (PORTALS_PROTO_MAGIC == 0xeebc0ded);
1335 LASSERT (PORTALS_PROTO_VERSION_MAJOR == 0);
1336 LASSERT (PORTALS_PROTO_VERSION_MINOR == 3);
1337 LASSERT (PTL_MSG_ACK == 0);
1338 LASSERT (PTL_MSG_PUT == 1);
1339 LASSERT (PTL_MSG_GET == 2);
1340 LASSERT (PTL_MSG_REPLY == 3);
1341 LASSERT (PTL_MSG_HELLO == 4);
1343 /* Checks for struct ptl_handle_wire_t */
1344 LASSERT ((int)sizeof(ptl_handle_wire_t) == 16);
1345 LASSERT (offsetof(ptl_handle_wire_t, wh_interface_cookie) == 0);
1346 LASSERT ((int)sizeof(((ptl_handle_wire_t *)0)->wh_interface_cookie) == 8);
1347 LASSERT (offsetof(ptl_handle_wire_t, wh_object_cookie) == 8);
1348 LASSERT ((int)sizeof(((ptl_handle_wire_t *)0)->wh_object_cookie) == 8);
1350 /* Checks for struct ptl_magicversion_t */
1351 LASSERT ((int)sizeof(ptl_magicversion_t) == 8);
1352 LASSERT (offsetof(ptl_magicversion_t, magic) == 0);
1353 LASSERT ((int)sizeof(((ptl_magicversion_t *)0)->magic) == 4);
1354 LASSERT (offsetof(ptl_magicversion_t, version_major) == 4);
1355 LASSERT ((int)sizeof(((ptl_magicversion_t *)0)->version_major) == 2);
1356 LASSERT (offsetof(ptl_magicversion_t, version_minor) == 6);
1357 LASSERT ((int)sizeof(((ptl_magicversion_t *)0)->version_minor) == 2);
1359 /* Checks for struct ptl_hdr_t */
1360 LASSERT ((int)sizeof(ptl_hdr_t) == 72);
1361 LASSERT (offsetof(ptl_hdr_t, dest_nid) == 0);
1362 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->dest_nid) == 8);
1363 LASSERT (offsetof(ptl_hdr_t, src_nid) == 8);
1364 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->src_nid) == 8);
1365 LASSERT (offsetof(ptl_hdr_t, dest_pid) == 16);
1366 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->dest_pid) == 4);
1367 LASSERT (offsetof(ptl_hdr_t, src_pid) == 20);
1368 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->src_pid) == 4);
1369 LASSERT (offsetof(ptl_hdr_t, type) == 24);
1370 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->type) == 4);
1371 LASSERT (offsetof(ptl_hdr_t, payload_length) == 28);
1372 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->payload_length) == 4);
1373 LASSERT (offsetof(ptl_hdr_t, msg) == 32);
1374 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg) == 40);
1377 LASSERT (offsetof(ptl_hdr_t, msg.ack.dst_wmd) == 32);
1378 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.ack.dst_wmd) == 16);
1379 LASSERT (offsetof(ptl_hdr_t, msg.ack.match_bits) == 48);
1380 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.ack.match_bits) == 8);
1381 LASSERT (offsetof(ptl_hdr_t, msg.ack.mlength) == 56);
1382 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.ack.mlength) == 4);
1385 LASSERT (offsetof(ptl_hdr_t, msg.put.ack_wmd) == 32);
1386 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.ack_wmd) == 16);
1387 LASSERT (offsetof(ptl_hdr_t, msg.put.match_bits) == 48);
1388 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.match_bits) == 8);
1389 LASSERT (offsetof(ptl_hdr_t, msg.put.hdr_data) == 56);
1390 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.hdr_data) == 8);
1391 LASSERT (offsetof(ptl_hdr_t, msg.put.ptl_index) == 64);
1392 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.ptl_index) == 4);
1393 LASSERT (offsetof(ptl_hdr_t, msg.put.offset) == 68);
1394 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.offset) == 4);
1397 LASSERT (offsetof(ptl_hdr_t, msg.get.return_wmd) == 32);
1398 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.return_wmd) == 16);
1399 LASSERT (offsetof(ptl_hdr_t, msg.get.match_bits) == 48);
1400 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.match_bits) == 8);
1401 LASSERT (offsetof(ptl_hdr_t, msg.get.ptl_index) == 56);
1402 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.ptl_index) == 4);
1403 LASSERT (offsetof(ptl_hdr_t, msg.get.src_offset) == 60);
1404 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.src_offset) == 4);
1405 LASSERT (offsetof(ptl_hdr_t, msg.get.sink_length) == 64);
1406 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.sink_length) == 4);
1409 LASSERT (offsetof(ptl_hdr_t, msg.reply.dst_wmd) == 32);
1410 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.reply.dst_wmd) == 16);
1413 LASSERT (offsetof(ptl_hdr_t, msg.hello.incarnation) == 32);
1414 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.hello.incarnation) == 8);
1415 LASSERT (offsetof(ptl_hdr_t, msg.hello.type) == 40);
1416 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.hello.type) == 4);