1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Data movement routines
7 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * Copyright (c) 2001-2002 Sandia National Laboratories
10 * This file is part of Lustre, http://www.sf.net/projects/lustre/
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 # define DEBUG_SUBSYSTEM S_PORTALS
30 # include <linux/kp30.h>
32 #include <portals/p30.h>
33 #include <portals/lib-p30.h>
36 static void lib_commit_md (lib_nal_t *nal, lib_md_t *md, lib_msg_t *msg);
39 lib_match_md(lib_nal_t *nal, int index, int op_mask,
40 ptl_nid_t src_nid, ptl_pid_t src_pid,
41 ptl_size_t rlength, ptl_size_t roffset,
42 ptl_match_bits_t match_bits, lib_msg_t *msg,
43 ptl_size_t *mlength_out, ptl_size_t *offset_out)
45 lib_ni_t *ni = &nal->libnal_ni;
46 struct list_head *match_list = &ni->ni_portals.tbl[index];
47 struct list_head *tmp;
54 CDEBUG (D_NET, "Request from "LPU64".%d of length %d into portal %d "
55 "MB="LPX64"\n", src_nid, src_pid, rlength, index, match_bits);
57 if (index < 0 || index >= ni->ni_portals.size) {
58 CERROR("Invalid portal %d not in [0-%d]\n",
59 index, ni->ni_portals.size);
63 list_for_each (tmp, match_list) {
64 me = list_entry(tmp, lib_me_t, me_list);
67 /* ME attached but MD not attached yet */
71 LASSERT (me == md->me);
73 /* mismatched MD op */
74 if ((md->options & op_mask) == 0)
78 if (lib_md_exhausted(md))
81 /* mismatched ME nid/pid? */
82 if (me->match_id.nid != PTL_NID_ANY &&
83 me->match_id.nid != src_nid)
86 CDEBUG(D_NET, "match_id.pid [%x], src_pid [%x]\n",
87 me->match_id.pid, src_pid);
89 if (me->match_id.pid != PTL_PID_ANY &&
90 me->match_id.pid != src_pid)
93 /* mismatched ME matchbits? */
94 if (((me->match_bits ^ match_bits) & ~me->ignore_bits) != 0)
97 /* Hurrah! This _is_ a match; check it out... */
99 if ((md->options & PTL_MD_MANAGE_REMOTE) == 0)
104 if ((md->options & PTL_MD_MAX_SIZE) != 0) {
105 mlength = md->max_size;
106 LASSERT (md->offset + mlength <= md->length);
108 mlength = md->length - offset;
111 if (rlength <= mlength) { /* fits in allowed space */
113 } else if ((md->options & PTL_MD_TRUNCATE) == 0) {
114 /* this packet _really_ is too big */
115 CERROR("Matching packet %d too big: %d left, "
116 "%d allowed\n", rlength, md->length - offset,
121 /* Commit to this ME/MD */
122 CDEBUG(D_NET, "Incoming %s index %x from "LPU64"/%u of "
123 "length %d/%d into md "LPX64" [%d] + %d\n",
124 (op_mask == PTL_MD_OP_PUT) ? "put" : "get",
125 index, src_nid, src_pid, mlength, rlength,
126 md->md_lh.lh_cookie, md->md_niov, offset);
128 lib_commit_md(nal, md, msg);
129 md->offset = offset + mlength;
131 /* NB Caller sets ev.type and ev.hdr_data */
132 msg->ev.initiator.nid = src_nid;
133 msg->ev.initiator.pid = src_pid;
134 msg->ev.pt_index = index;
135 msg->ev.match_bits = match_bits;
136 msg->ev.rlength = rlength;
137 msg->ev.mlength = mlength;
138 msg->ev.offset = offset;
140 lib_md_deconstruct(nal, md, &msg->ev.md);
141 ptl_md2handle(&msg->ev.md_handle, nal, md);
143 *offset_out = offset;
144 *mlength_out = mlength;
146 /* Auto-unlink NOW, so the ME gets unlinked if required.
147 * We bumped md->pending above so the MD just gets flagged
148 * for unlink when it is finalized. */
149 if ((md->md_flags & PTL_MD_FLAG_AUTO_UNLINK) != 0 &&
150 lib_md_exhausted(md))
151 lib_md_unlink(nal, md);
157 CERROR (LPU64": Dropping %s from "LPU64".%d portal %d match "LPX64
158 " offset %d length %d: no match\n",
159 ni->ni_pid.nid, (op_mask == PTL_MD_OP_GET) ? "GET" : "PUT",
160 src_nid, src_pid, index, match_bits, roffset, rlength);
164 int lib_api_fail_nid (nal_t *apinal, ptl_nid_t nid, unsigned int threshold)
166 lib_nal_t *nal = apinal->nal_data;
169 struct list_head *el;
170 struct list_head *next;
171 struct list_head cull;
173 if (threshold != 0) {
174 /* Adding a new entry */
175 PORTAL_ALLOC(tp, sizeof(*tp));
180 tp->tp_threshold = threshold;
182 LIB_LOCK(nal, flags);
183 list_add_tail (&tp->tp_list, &nal->libnal_ni.ni_test_peers);
184 LIB_UNLOCK(nal, flags);
188 /* removing entries */
189 INIT_LIST_HEAD (&cull);
191 LIB_LOCK(nal, flags);
193 list_for_each_safe (el, next, &nal->libnal_ni.ni_test_peers) {
194 tp = list_entry (el, lib_test_peer_t, tp_list);
196 if (tp->tp_threshold == 0 || /* needs culling anyway */
197 nid == PTL_NID_ANY || /* removing all entries */
198 tp->tp_nid == nid) /* matched this one */
200 list_del (&tp->tp_list);
201 list_add (&tp->tp_list, &cull);
205 LIB_UNLOCK(nal, flags);
207 while (!list_empty (&cull)) {
208 tp = list_entry (cull.next, lib_test_peer_t, tp_list);
210 list_del (&tp->tp_list);
211 PORTAL_FREE(tp, sizeof (*tp));
217 fail_peer (lib_nal_t *nal, ptl_nid_t nid, int outgoing)
220 struct list_head *el;
221 struct list_head *next;
223 struct list_head cull;
226 INIT_LIST_HEAD (&cull);
228 LIB_LOCK (nal, flags);
230 list_for_each_safe (el, next, &nal->libnal_ni.ni_test_peers) {
231 tp = list_entry (el, lib_test_peer_t, tp_list);
233 if (tp->tp_threshold == 0) {
236 /* only cull zombies on outgoing tests,
237 * since we may be at interrupt priority on
238 * incoming messages. */
239 list_del (&tp->tp_list);
240 list_add (&tp->tp_list, &cull);
245 if (tp->tp_nid == PTL_NID_ANY || /* fail every peer */
246 nid == tp->tp_nid) { /* fail this peer */
249 if (tp->tp_threshold != PTL_MD_THRESH_INF) {
252 tp->tp_threshold == 0) {
254 list_del (&tp->tp_list);
255 list_add (&tp->tp_list, &cull);
262 LIB_UNLOCK (nal, flags);
264 while (!list_empty (&cull)) {
265 tp = list_entry (cull.next, lib_test_peer_t, tp_list);
266 list_del (&tp->tp_list);
268 PORTAL_FREE(tp, sizeof (*tp));
275 lib_iov_nob (int niov, struct iovec *iov)
280 nob += (iov++)->iov_len;
286 lib_copy_iov2buf (char *dest, int niov, struct iovec *iov,
287 ptl_size_t offset, ptl_size_t len)
294 /* skip complete frags before 'offset' */
296 while (offset >= iov->iov_len) {
297 offset -= iov->iov_len;
305 nob = MIN (iov->iov_len - offset, len);
306 memcpy (dest, iov->iov_base + offset, nob);
317 lib_copy_buf2iov (int niov, struct iovec *iov, ptl_size_t offset,
318 char *src, ptl_size_t len)
325 /* skip complete frags before 'offset' */
327 while (offset >= iov->iov_len) {
328 offset -= iov->iov_len;
336 nob = MIN (iov->iov_len - offset, len);
337 memcpy (iov->iov_base + offset, src, nob);
348 lib_extract_iov (int dst_niov, struct iovec *dst,
349 int src_niov, struct iovec *src,
350 ptl_size_t offset, ptl_size_t len)
352 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
353 * for exactly 'len' bytes, and return the number of entries.
354 * NB not destructive to 'src' */
358 if (len == 0) /* no data => */
359 return (0); /* no frags */
361 LASSERT (src_niov > 0);
362 while (offset >= src->iov_len) { /* skip initial frags */
363 offset -= src->iov_len;
366 LASSERT (src_niov > 0);
371 LASSERT (src_niov > 0);
372 LASSERT (niov <= dst_niov);
374 frag_len = src->iov_len - offset;
375 dst->iov_base = ((char *)src->iov_base) + offset;
377 if (len <= frag_len) {
382 dst->iov_len = frag_len;
395 lib_kiov_nob (int niov, ptl_kiov_t *kiov)
402 lib_copy_kiov2buf (char *dest, int niov, ptl_kiov_t *kiov,
403 ptl_size_t offset, ptl_size_t len)
409 lib_copy_buf2kiov (int niov, ptl_kiov_t *kiov, ptl_size_t offset,
410 char *src, ptl_size_t len)
416 lib_extract_kiov (int dst_niov, ptl_kiov_t *dst,
417 int src_niov, ptl_kiov_t *src,
418 ptl_size_t offset, ptl_size_t len)
426 lib_kiov_nob (int niov, ptl_kiov_t *kiov)
431 nob += (kiov++)->kiov_len;
437 lib_copy_kiov2buf (char *dest, int niov, ptl_kiov_t *kiov,
438 ptl_size_t offset, ptl_size_t len)
446 LASSERT (!in_interrupt ());
449 while (offset > kiov->kiov_len) {
450 offset -= kiov->kiov_len;
458 nob = MIN (kiov->kiov_len - offset, len);
460 addr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset + offset;
461 memcpy (dest, addr, nob);
462 kunmap (kiov->kiov_page);
473 lib_copy_buf2kiov (int niov, ptl_kiov_t *kiov, ptl_size_t offset,
474 char *src, ptl_size_t len)
482 LASSERT (!in_interrupt ());
485 while (offset >= kiov->kiov_len) {
486 offset -= kiov->kiov_len;
494 nob = MIN (kiov->kiov_len - offset, len);
496 addr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset + offset;
497 memcpy (addr, src, nob);
498 kunmap (kiov->kiov_page);
509 lib_extract_kiov (int dst_niov, ptl_kiov_t *dst,
510 int src_niov, ptl_kiov_t *src,
511 ptl_size_t offset, ptl_size_t len)
513 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
514 * for exactly 'len' bytes, and return the number of entries.
515 * NB not destructive to 'src' */
519 if (len == 0) /* no data => */
520 return (0); /* no frags */
522 LASSERT (src_niov > 0);
523 while (offset >= src->kiov_len) { /* skip initial frags */
524 offset -= src->kiov_len;
527 LASSERT (src_niov > 0);
532 LASSERT (src_niov > 0);
533 LASSERT (niov <= dst_niov);
535 frag_len = src->kiov_len - offset;
536 dst->kiov_page = src->kiov_page;
537 dst->kiov_offset = src->kiov_offset + offset;
539 if (len <= frag_len) {
541 LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
545 dst->kiov_len = frag_len;
546 LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
559 lib_recv (lib_nal_t *nal, void *private, lib_msg_t *msg, lib_md_t *md,
560 ptl_size_t offset, ptl_size_t mlen, ptl_size_t rlen)
563 return (nal->libnal_recv(nal, private, msg,
565 offset, mlen, rlen));
567 if ((md->options & PTL_MD_KIOV) == 0)
568 return (nal->libnal_recv(nal, private, msg,
569 md->md_niov, md->md_iov.iov,
570 offset, mlen, rlen));
572 return (nal->libnal_recv_pages(nal, private, msg,
573 md->md_niov, md->md_iov.kiov,
574 offset, mlen, rlen));
578 lib_send (lib_nal_t *nal, void *private, lib_msg_t *msg,
579 ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid,
580 lib_md_t *md, ptl_size_t offset, ptl_size_t len)
583 return (nal->libnal_send(nal, private, msg,
588 if ((md->options & PTL_MD_KIOV) == 0)
589 return (nal->libnal_send(nal, private, msg,
591 md->md_niov, md->md_iov.iov,
594 return (nal->libnal_send_pages(nal, private, msg,
596 md->md_niov, md->md_iov.kiov,
601 lib_commit_md (lib_nal_t *nal, lib_md_t *md, lib_msg_t *msg)
603 /* ALWAYS called holding the LIB_LOCK */
604 lib_counters_t *counters = &nal->libnal_ni.ni_counters;
606 /* Here, we commit the MD to a network OP by marking it busy and
607 * decrementing its threshold. Come what may, the network "owns"
608 * the MD until a call to lib_finalize() signals completion. */
612 if (md->threshold != PTL_MD_THRESH_INF) {
613 LASSERT (md->threshold > 0);
617 counters->msgs_alloc++;
618 if (counters->msgs_alloc > counters->msgs_max)
619 counters->msgs_max = counters->msgs_alloc;
621 list_add (&msg->msg_list, &nal->libnal_ni.ni_active_msgs);
625 lib_drop_message (lib_nal_t *nal, void *private, ptl_hdr_t *hdr)
629 /* CAVEAT EMPTOR: this only drops messages that we've not committed
630 * to receive (init_msg() not called) and therefore can't cause an
633 LIB_LOCK(nal, flags);
634 nal->libnal_ni.ni_counters.drop_count++;
635 nal->libnal_ni.ni_counters.drop_length += hdr->payload_length;
636 LIB_UNLOCK(nal, flags);
638 /* NULL msg => if NAL calls lib_finalize it will be a noop */
639 (void) lib_recv(nal, private, NULL, NULL, 0, 0, hdr->payload_length);
643 * Incoming messages have a ptl_msg_t object associated with them
644 * by the library. This object encapsulates the state of the
645 * message and allows the NAL to do non-blocking receives or sends
650 parse_put(lib_nal_t *nal, ptl_hdr_t *hdr, void *private, lib_msg_t *msg)
652 lib_ni_t *ni = &nal->libnal_ni;
653 ptl_size_t mlength = 0;
654 ptl_size_t offset = 0;
659 /* Convert put fields to host byte order */
660 hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
661 hdr->msg.put.ptl_index = le32_to_cpu(hdr->msg.put.ptl_index);
662 hdr->msg.put.offset = le32_to_cpu(hdr->msg.put.offset);
664 LIB_LOCK(nal, flags);
666 md = lib_match_md(nal, hdr->msg.put.ptl_index, PTL_MD_OP_PUT,
667 hdr->src_nid, hdr->src_pid,
668 hdr->payload_length, hdr->msg.put.offset,
669 hdr->msg.put.match_bits, msg,
672 LIB_UNLOCK(nal, flags);
676 msg->ev.type = PTL_EVENT_PUT_END;
677 msg->ev.hdr_data = hdr->msg.put.hdr_data;
679 if (!ptl_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
680 !(md->options & PTL_MD_ACK_DISABLE)) {
681 msg->ack_wmd = hdr->msg.put.ack_wmd;
684 ni->ni_counters.recv_count++;
685 ni->ni_counters.recv_length += mlength;
687 LIB_UNLOCK(nal, flags);
689 rc = lib_recv(nal, private, msg, md, offset, mlength,
690 hdr->payload_length);
692 CERROR(LPU64": error on receiving PUT from "LPU64": %d\n",
693 ni->ni_pid.nid, hdr->src_nid, rc);
699 parse_get(lib_nal_t *nal, ptl_hdr_t *hdr, void *private, lib_msg_t *msg)
701 lib_ni_t *ni = &nal->libnal_ni;
702 ptl_size_t mlength = 0;
703 ptl_size_t offset = 0;
709 /* Convert get fields to host byte order */
710 hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits);
711 hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index);
712 hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
713 hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset);
715 LIB_LOCK(nal, flags);
717 md = lib_match_md(nal, hdr->msg.get.ptl_index, PTL_MD_OP_GET,
718 hdr->src_nid, hdr->src_pid,
719 hdr->msg.get.sink_length, hdr->msg.get.src_offset,
720 hdr->msg.get.match_bits, msg,
723 LIB_UNLOCK(nal, flags);
727 msg->ev.type = PTL_EVENT_GET_END;
728 msg->ev.hdr_data = 0;
730 ni->ni_counters.send_count++;
731 ni->ni_counters.send_length += mlength;
733 LIB_UNLOCK(nal, flags);
735 memset (&reply, 0, sizeof (reply));
736 reply.type = cpu_to_le32(PTL_MSG_REPLY);
737 reply.dest_nid = cpu_to_le64(hdr->src_nid);
738 reply.dest_pid = cpu_to_le32(hdr->src_pid);
739 reply.src_nid = cpu_to_le64(ni->ni_pid.nid);
740 reply.src_pid = cpu_to_le32(ni->ni_pid.pid);
741 reply.payload_length = cpu_to_le32(mlength);
743 reply.msg.reply.dst_wmd = hdr->msg.get.return_wmd;
745 /* NB call lib_send() _BEFORE_ lib_recv() completes the incoming
746 * message. Some NALs _require_ this to implement optimized GET */
748 rc = lib_send (nal, private, msg, &reply, PTL_MSG_REPLY,
749 hdr->src_nid, hdr->src_pid, md, offset, mlength);
751 CERROR(LPU64": Unable to send REPLY for GET from "LPU64": %d\n",
752 ni->ni_pid.nid, hdr->src_nid, rc);
754 /* Discard any junk after the hdr */
755 (void) lib_recv(nal, private, NULL, NULL, 0, 0, hdr->payload_length);
761 parse_reply(lib_nal_t *nal, ptl_hdr_t *hdr, void *private, lib_msg_t *msg)
763 lib_ni_t *ni = &nal->libnal_ni;
770 LIB_LOCK(nal, flags);
772 /* NB handles only looked up by creator (no flips) */
773 md = ptl_wire_handle2md(&hdr->msg.reply.dst_wmd, nal);
774 if (md == NULL || md->threshold == 0) {
775 CERROR (LPU64": Dropping REPLY from "LPU64" for %s MD "LPX64"."LPX64"\n",
776 ni->ni_pid.nid, hdr->src_nid,
777 md == NULL ? "invalid" : "inactive",
778 hdr->msg.reply.dst_wmd.wh_interface_cookie,
779 hdr->msg.reply.dst_wmd.wh_object_cookie);
781 LIB_UNLOCK(nal, flags);
785 LASSERT (md->offset == 0);
787 length = rlength = hdr->payload_length;
789 if (length > md->length) {
790 if ((md->options & PTL_MD_TRUNCATE) == 0) {
791 CERROR (LPU64": Dropping REPLY from "LPU64
792 " length %d for MD "LPX64" would overflow (%d)\n",
793 ni->ni_pid.nid, hdr->src_nid, length,
794 hdr->msg.reply.dst_wmd.wh_object_cookie,
796 LIB_UNLOCK(nal, flags);
802 CDEBUG(D_NET, "Reply from "LPU64" of length %d/%d into md "LPX64"\n",
803 hdr->src_nid, length, rlength,
804 hdr->msg.reply.dst_wmd.wh_object_cookie);
806 lib_commit_md(nal, md, msg);
808 msg->ev.type = PTL_EVENT_REPLY_END;
809 msg->ev.initiator.nid = hdr->src_nid;
810 msg->ev.initiator.pid = hdr->src_pid;
811 msg->ev.rlength = rlength;
812 msg->ev.mlength = length;
815 lib_md_deconstruct(nal, md, &msg->ev.md);
816 ptl_md2handle(&msg->ev.md_handle, nal, md);
818 ni->ni_counters.recv_count++;
819 ni->ni_counters.recv_length += length;
821 LIB_UNLOCK(nal, flags);
823 rc = lib_recv(nal, private, msg, md, 0, length, rlength);
825 CERROR(LPU64": error on receiving REPLY from "LPU64": %d\n",
826 ni->ni_pid.nid, hdr->src_nid, rc);
832 parse_ack(lib_nal_t *nal, ptl_hdr_t *hdr, void *private, lib_msg_t *msg)
834 lib_ni_t *ni = &nal->libnal_ni;
838 /* Convert ack fields to host byte order */
839 hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
840 hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
842 LIB_LOCK(nal, flags);
844 /* NB handles only looked up by creator (no flips) */
845 md = ptl_wire_handle2md(&hdr->msg.ack.dst_wmd, nal);
846 if (md == NULL || md->threshold == 0) {
847 CDEBUG(D_INFO, LPU64": Dropping ACK from "LPU64" to %s MD "
848 LPX64"."LPX64"\n", ni->ni_pid.nid, hdr->src_nid,
849 (md == NULL) ? "invalid" : "inactive",
850 hdr->msg.ack.dst_wmd.wh_interface_cookie,
851 hdr->msg.ack.dst_wmd.wh_object_cookie);
853 LIB_UNLOCK(nal, flags);
857 CDEBUG(D_NET, LPU64": ACK from "LPU64" into md "LPX64"\n",
858 ni->ni_pid.nid, hdr->src_nid,
859 hdr->msg.ack.dst_wmd.wh_object_cookie);
861 lib_commit_md(nal, md, msg);
863 msg->ev.type = PTL_EVENT_ACK;
864 msg->ev.initiator.nid = hdr->src_nid;
865 msg->ev.initiator.pid = hdr->src_pid;
866 msg->ev.mlength = hdr->msg.ack.mlength;
867 msg->ev.match_bits = hdr->msg.ack.match_bits;
869 lib_md_deconstruct(nal, md, &msg->ev.md);
870 ptl_md2handle(&msg->ev.md_handle, nal, md);
872 ni->ni_counters.recv_count++;
874 LIB_UNLOCK(nal, flags);
876 /* We have received and matched up the ack OK, create the
877 * completion event now... */
878 lib_finalize(nal, private, msg, PTL_OK);
880 /* ...and now discard any junk after the hdr */
881 (void) lib_recv(nal, private, NULL, NULL, 0, 0, hdr->payload_length);
887 hdr_type_string (ptl_hdr_t *hdr)
901 return ("<UNKNOWN>");
905 void print_hdr(lib_nal_t *nal, ptl_hdr_t * hdr)
907 char *type_str = hdr_type_string (hdr);
909 CWARN("P3 Header at %p of type %s\n", hdr, type_str);
910 CWARN(" From nid/pid "LPX64"/%u", hdr->src_nid, hdr->src_pid);
911 CWARN(" To nid/pid "LPX64"/%u\n", hdr->dest_nid, hdr->dest_pid);
918 CWARN(" Ptl index %d, ack md "LPX64"."LPX64", "
919 "match bits "LPX64"\n",
920 hdr->msg.put.ptl_index,
921 hdr->msg.put.ack_wmd.wh_interface_cookie,
922 hdr->msg.put.ack_wmd.wh_object_cookie,
923 hdr->msg.put.match_bits);
924 CWARN(" Length %d, offset %d, hdr data "LPX64"\n",
925 hdr->payload_length, hdr->msg.put.offset,
926 hdr->msg.put.hdr_data);
930 CWARN(" Ptl index %d, return md "LPX64"."LPX64", "
931 "match bits "LPX64"\n", hdr->msg.get.ptl_index,
932 hdr->msg.get.return_wmd.wh_interface_cookie,
933 hdr->msg.get.return_wmd.wh_object_cookie,
934 hdr->msg.get.match_bits);
935 CWARN(" Length %d, src offset %d\n",
936 hdr->msg.get.sink_length,
937 hdr->msg.get.src_offset);
941 CWARN(" dst md "LPX64"."LPX64", "
942 "manipulated length %d\n",
943 hdr->msg.ack.dst_wmd.wh_interface_cookie,
944 hdr->msg.ack.dst_wmd.wh_object_cookie,
945 hdr->msg.ack.mlength);
949 CWARN(" dst md "LPX64"."LPX64", "
951 hdr->msg.reply.dst_wmd.wh_interface_cookie,
952 hdr->msg.reply.dst_wmd.wh_object_cookie,
953 hdr->payload_length);
956 } /* end of print_hdr() */
960 lib_parse(lib_nal_t *nal, ptl_hdr_t *hdr, void *private)
966 /* NB we return PTL_OK if we manage to parse the header and believe
967 * it looks OK. Anything that goes wrong with receiving the
968 * message after that point is the responsibility of the NAL */
970 /* convert common fields to host byte order */
971 hdr->type = le32_to_cpu(hdr->type);
972 hdr->src_nid = le64_to_cpu(hdr->src_nid);
973 hdr->src_pid = le32_to_cpu(hdr->src_pid);
974 hdr->dest_pid = le32_to_cpu(hdr->dest_pid);
975 hdr->payload_length = le32_to_cpu(hdr->payload_length);
978 case PTL_MSG_HELLO: {
979 /* dest_nid is really ptl_magicversion_t */
980 ptl_magicversion_t *mv = (ptl_magicversion_t *)&hdr->dest_nid;
982 mv->magic = le32_to_cpu(mv->magic);
983 mv->version_major = le16_to_cpu(mv->version_major);
984 mv->version_minor = le16_to_cpu(mv->version_minor);
986 if (mv->magic == PORTALS_PROTO_MAGIC &&
987 mv->version_major == PORTALS_PROTO_VERSION_MAJOR &&
988 mv->version_minor == PORTALS_PROTO_VERSION_MINOR) {
989 CWARN (LPU64": Dropping unexpected HELLO message: "
990 "magic %d, version %d.%d from "LPD64"\n",
991 nal->libnal_ni.ni_pid.nid, mv->magic,
992 mv->version_major, mv->version_minor,
995 /* it's good but we don't want it */
996 lib_drop_message(nal, private, hdr);
1000 /* we got garbage */
1001 CERROR (LPU64": Bad HELLO message: "
1002 "magic %d, version %d.%d from "LPD64"\n",
1003 nal->libnal_ni.ni_pid.nid, mv->magic,
1004 mv->version_major, mv->version_minor,
1013 hdr->dest_nid = le64_to_cpu(hdr->dest_nid);
1014 if (hdr->dest_nid != nal->libnal_ni.ni_pid.nid) {
1015 CERROR(LPU64": BAD dest NID in %s message from"
1016 LPU64" to "LPU64" (not me)\n",
1017 nal->libnal_ni.ni_pid.nid, hdr_type_string (hdr),
1018 hdr->src_nid, hdr->dest_nid);
1024 CERROR(LPU64": Bad message type 0x%x from "LPU64"\n",
1025 nal->libnal_ni.ni_pid.nid, hdr->type, hdr->src_nid);
1029 /* We've decided we're not receiving garbage since we can parse the
1030 * header. We will return PTL_OK come what may... */
1032 if (!list_empty (&nal->libnal_ni.ni_test_peers) && /* normally we don't */
1033 fail_peer (nal, hdr->src_nid, 0)) /* shall we now? */
1035 CERROR(LPU64": Dropping incoming %s from "LPU64
1036 ": simulated failure\n",
1037 nal->libnal_ni.ni_pid.nid, hdr_type_string (hdr),
1039 lib_drop_message(nal, private, hdr);
1043 msg = lib_msg_alloc(nal);
1045 CERROR(LPU64": Dropping incoming %s from "LPU64
1046 ": can't allocate a lib_msg_t\n",
1047 nal->libnal_ni.ni_pid.nid, hdr_type_string (hdr),
1049 lib_drop_message(nal, private, hdr);
1053 switch (hdr->type) {
1055 rc = parse_ack(nal, hdr, private, msg);
1058 rc = parse_put(nal, hdr, private, msg);
1061 rc = parse_get(nal, hdr, private, msg);
1064 rc = parse_reply(nal, hdr, private, msg);
1068 rc = PTL_FAIL; /* no compiler warning please */
1073 if (msg->md != NULL) {
1075 lib_finalize(nal, private, msg, rc);
1077 LIB_LOCK(nal, flags);
1078 lib_msg_free(nal, msg); /* expects LIB_LOCK held */
1079 LIB_UNLOCK(nal, flags);
1081 lib_drop_message(nal, private, hdr);
1086 /* That's "OK I can parse it", not "OK I like it" :) */
1090 lib_api_put(nal_t *apinal, ptl_handle_md_t *mdh,
1091 ptl_ack_req_t ack, ptl_process_id_t *id,
1092 ptl_pt_index_t portal, ptl_ac_index_t ac,
1093 ptl_match_bits_t match_bits,
1094 ptl_size_t offset, ptl_hdr_data_t hdr_data)
1096 lib_nal_t *nal = apinal->nal_data;
1097 lib_ni_t *ni = &nal->libnal_ni;
1101 unsigned long flags;
1104 if (!list_empty (&ni->ni_test_peers) && /* normally we don't */
1105 fail_peer (nal, id->nid, 1)) /* shall we now? */
1107 CERROR("Dropping PUT to "LPU64": simulated failure\n",
1109 return PTL_PROCESS_INVALID;
1112 msg = lib_msg_alloc(nal);
1114 CERROR(LPU64": Dropping PUT to "LPU64": ENOMEM on lib_msg_t\n",
1115 ni->ni_pid.nid, id->nid);
1116 return PTL_NO_SPACE;
1119 LIB_LOCK(nal, flags);
1121 md = ptl_handle2md(mdh, nal);
1122 if (md == NULL || md->threshold == 0) {
1123 lib_msg_free(nal, msg);
1124 LIB_UNLOCK(nal, flags);
1126 return PTL_MD_INVALID;
1129 CDEBUG(D_NET, "PtlPut -> "LPX64"\n", id->nid);
1131 memset (&hdr, 0, sizeof (hdr));
1132 hdr.type = cpu_to_le32(PTL_MSG_PUT);
1133 hdr.dest_nid = cpu_to_le64(id->nid);
1134 hdr.dest_pid = cpu_to_le32(id->pid);
1135 hdr.src_nid = cpu_to_le64(ni->ni_pid.nid);
1136 hdr.src_pid = cpu_to_le32(ni->ni_pid.pid);
1137 hdr.payload_length = cpu_to_le32(md->length);
1139 /* NB handles only looked up by creator (no flips) */
1140 if (ack == PTL_ACK_REQ) {
1141 hdr.msg.put.ack_wmd.wh_interface_cookie = ni->ni_interface_cookie;
1142 hdr.msg.put.ack_wmd.wh_object_cookie = md->md_lh.lh_cookie;
1144 hdr.msg.put.ack_wmd = PTL_WIRE_HANDLE_NONE;
1147 hdr.msg.put.match_bits = cpu_to_le64(match_bits);
1148 hdr.msg.put.ptl_index = cpu_to_le32(portal);
1149 hdr.msg.put.offset = cpu_to_le32(offset);
1150 hdr.msg.put.hdr_data = hdr_data;
1152 lib_commit_md(nal, md, msg);
1154 msg->ev.type = PTL_EVENT_SEND_END;
1155 msg->ev.initiator.nid = ni->ni_pid.nid;
1156 msg->ev.initiator.pid = ni->ni_pid.pid;
1157 msg->ev.pt_index = portal;
1158 msg->ev.match_bits = match_bits;
1159 msg->ev.rlength = md->length;
1160 msg->ev.mlength = md->length;
1161 msg->ev.offset = offset;
1162 msg->ev.hdr_data = hdr_data;
1164 lib_md_deconstruct(nal, md, &msg->ev.md);
1165 ptl_md2handle(&msg->ev.md_handle, nal, md);
1167 ni->ni_counters.send_count++;
1168 ni->ni_counters.send_length += md->length;
1170 LIB_UNLOCK(nal, flags);
1172 rc = lib_send (nal, NULL, msg, &hdr, PTL_MSG_PUT,
1173 id->nid, id->pid, md, 0, md->length);
1175 CERROR("Error sending PUT to "LPX64": %d\n",
1177 lib_finalize (nal, NULL, msg, rc);
1180 /* completion will be signalled by an event */
1185 lib_create_reply_msg (lib_nal_t *nal, ptl_nid_t peer_nid, lib_msg_t *getmsg)
1187 /* The NAL can DMA direct to the GET md (i.e. no REPLY msg). This
1188 * returns a msg for the NAL to pass to lib_finalize() when the sink
1189 * data has been received.
1191 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
1192 * lib_finalize() is called on it, so the NAL must call this first */
1194 lib_ni_t *ni = &nal->libnal_ni;
1195 lib_msg_t *msg = lib_msg_alloc(nal);
1196 lib_md_t *getmd = getmsg->md;
1197 unsigned long flags;
1199 LIB_LOCK(nal, flags);
1201 LASSERT (getmd->pending > 0);
1204 CERROR ("Dropping REPLY from "LPU64": can't allocate msg\n",
1209 if (getmd->threshold == 0) {
1210 CERROR ("Dropping REPLY from "LPU64" for inactive MD %p\n",
1215 LASSERT (getmd->offset == 0);
1217 CDEBUG(D_NET, "Reply from "LPU64" md %p\n", peer_nid, getmd);
1219 lib_commit_md (nal, getmd, msg);
1221 msg->ev.type = PTL_EVENT_REPLY_END;
1222 msg->ev.initiator.nid = peer_nid;
1223 msg->ev.initiator.pid = 0; /* XXX FIXME!!! */
1224 msg->ev.rlength = msg->ev.mlength = getmd->length;
1227 lib_md_deconstruct(nal, getmd, &msg->ev.md);
1228 ptl_md2handle(&msg->ev.md_handle, nal, getmd);
1230 ni->ni_counters.recv_count++;
1231 ni->ni_counters.recv_length += getmd->length;
1233 LIB_UNLOCK(nal, flags);
1238 lib_msg_free(nal, msg);
1240 nal->libnal_ni.ni_counters.drop_count++;
1241 nal->libnal_ni.ni_counters.drop_length += getmd->length;
1243 LIB_UNLOCK (nal, flags);
1249 lib_api_get(nal_t *apinal, ptl_handle_md_t *mdh, ptl_process_id_t *id,
1250 ptl_pt_index_t portal, ptl_ac_index_t ac,
1251 ptl_match_bits_t match_bits, ptl_size_t offset)
1253 lib_nal_t *nal = apinal->nal_data;
1254 lib_ni_t *ni = &nal->libnal_ni;
1258 unsigned long flags;
1261 if (!list_empty (&ni->ni_test_peers) && /* normally we don't */
1262 fail_peer (nal, id->nid, 1)) /* shall we now? */
1264 CERROR("Dropping PUT to "LPX64": simulated failure\n",
1266 return PTL_PROCESS_INVALID;
1269 msg = lib_msg_alloc(nal);
1271 CERROR("Dropping GET to "LPU64": ENOMEM on lib_msg_t\n",
1273 return PTL_NO_SPACE;
1276 LIB_LOCK(nal, flags);
1278 md = ptl_handle2md(mdh, nal);
1279 if (md == NULL || !md->threshold) {
1280 lib_msg_free(nal, msg);
1281 LIB_UNLOCK(nal, flags);
1283 return PTL_MD_INVALID;
1286 CDEBUG(D_NET, "PtlGet -> %Lu: %lu\n", (unsigned long long)id->nid,
1287 (unsigned long)id->pid);
1289 memset (&hdr, 0, sizeof (hdr));
1290 hdr.type = cpu_to_le32(PTL_MSG_GET);
1291 hdr.dest_nid = cpu_to_le64(id->nid);
1292 hdr.dest_pid = cpu_to_le32(id->pid);
1293 hdr.src_nid = cpu_to_le64(ni->ni_pid.nid);
1294 hdr.src_pid = cpu_to_le32(ni->ni_pid.pid);
1295 hdr.payload_length = 0;
1297 /* NB handles only looked up by creator (no flips) */
1298 hdr.msg.get.return_wmd.wh_interface_cookie = ni->ni_interface_cookie;
1299 hdr.msg.get.return_wmd.wh_object_cookie = md->md_lh.lh_cookie;
1301 hdr.msg.get.match_bits = cpu_to_le64(match_bits);
1302 hdr.msg.get.ptl_index = cpu_to_le32(portal);
1303 hdr.msg.get.src_offset = cpu_to_le32(offset);
1304 hdr.msg.get.sink_length = cpu_to_le32(md->length);
1306 lib_commit_md(nal, md, msg);
1308 msg->ev.type = PTL_EVENT_SEND_END;
1309 msg->ev.initiator = ni->ni_pid;
1310 msg->ev.pt_index = portal;
1311 msg->ev.match_bits = match_bits;
1312 msg->ev.rlength = md->length;
1313 msg->ev.mlength = md->length;
1314 msg->ev.offset = offset;
1315 msg->ev.hdr_data = 0;
1317 lib_md_deconstruct(nal, md, &msg->ev.md);
1318 ptl_md2handle(&msg->ev.md_handle, nal, md);
1320 ni->ni_counters.send_count++;
1322 LIB_UNLOCK(nal, flags);
1324 rc = lib_send (nal, NULL, msg, &hdr, PTL_MSG_GET,
1325 id->nid, id->pid, NULL, 0, 0);
1327 CERROR(LPU64": error sending GET to "LPU64": %d\n",
1328 ni->ni_pid.nid, id->nid, rc);
1329 lib_finalize (nal, NULL, msg, rc);
1332 /* completion will be signalled by an event */
1336 void lib_assert_wire_constants (void)
1338 /* Wire protocol assertions generated by 'wirecheck'
1339 * running on Linux mdevi 2.4.21-p4smp-55chaos #1 SMP Tue Jun 8 14:38:44 PDT 2004 i686 i686 i
1340 * with gcc version 3.2.3 20030502 (Red Hat Linux 3.2.3-34) */
1344 LASSERT (PORTALS_PROTO_MAGIC == 0xeebc0ded);
1345 LASSERT (PORTALS_PROTO_VERSION_MAJOR == 1);
1346 LASSERT (PORTALS_PROTO_VERSION_MINOR == 0);
1347 LASSERT (PTL_MSG_ACK == 0);
1348 LASSERT (PTL_MSG_PUT == 1);
1349 LASSERT (PTL_MSG_GET == 2);
1350 LASSERT (PTL_MSG_REPLY == 3);
1351 LASSERT (PTL_MSG_HELLO == 4);
1353 /* Checks for struct ptl_handle_wire_t */
1354 LASSERT ((int)sizeof(ptl_handle_wire_t) == 16);
1355 LASSERT ((int)offsetof(ptl_handle_wire_t, wh_interface_cookie) == 0);
1356 LASSERT ((int)sizeof(((ptl_handle_wire_t *)0)->wh_interface_cookie) == 8);
1357 LASSERT ((int)offsetof(ptl_handle_wire_t, wh_object_cookie) == 8);
1358 LASSERT ((int)sizeof(((ptl_handle_wire_t *)0)->wh_object_cookie) == 8);
1360 /* Checks for struct ptl_magicversion_t */
1361 LASSERT ((int)sizeof(ptl_magicversion_t) == 8);
1362 LASSERT ((int)offsetof(ptl_magicversion_t, magic) == 0);
1363 LASSERT ((int)sizeof(((ptl_magicversion_t *)0)->magic) == 4);
1364 LASSERT ((int)offsetof(ptl_magicversion_t, version_major) == 4);
1365 LASSERT ((int)sizeof(((ptl_magicversion_t *)0)->version_major) == 2);
1366 LASSERT ((int)offsetof(ptl_magicversion_t, version_minor) == 6);
1367 LASSERT ((int)sizeof(((ptl_magicversion_t *)0)->version_minor) == 2);
1369 /* Checks for struct ptl_hdr_t */
1370 LASSERT ((int)sizeof(ptl_hdr_t) == 72);
1371 LASSERT ((int)offsetof(ptl_hdr_t, dest_nid) == 0);
1372 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->dest_nid) == 8);
1373 LASSERT ((int)offsetof(ptl_hdr_t, src_nid) == 8);
1374 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->src_nid) == 8);
1375 LASSERT ((int)offsetof(ptl_hdr_t, dest_pid) == 16);
1376 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->dest_pid) == 4);
1377 LASSERT ((int)offsetof(ptl_hdr_t, src_pid) == 20);
1378 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->src_pid) == 4);
1379 LASSERT ((int)offsetof(ptl_hdr_t, type) == 24);
1380 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->type) == 4);
1381 LASSERT ((int)offsetof(ptl_hdr_t, payload_length) == 28);
1382 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->payload_length) == 4);
1383 LASSERT ((int)offsetof(ptl_hdr_t, msg) == 32);
1384 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg) == 40);
1387 LASSERT ((int)offsetof(ptl_hdr_t, msg.ack.dst_wmd) == 32);
1388 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.ack.dst_wmd) == 16);
1389 LASSERT ((int)offsetof(ptl_hdr_t, msg.ack.match_bits) == 48);
1390 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.ack.match_bits) == 8);
1391 LASSERT ((int)offsetof(ptl_hdr_t, msg.ack.mlength) == 56);
1392 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.ack.mlength) == 4);
1395 LASSERT ((int)offsetof(ptl_hdr_t, msg.put.ack_wmd) == 32);
1396 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.ack_wmd) == 16);
1397 LASSERT ((int)offsetof(ptl_hdr_t, msg.put.match_bits) == 48);
1398 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.match_bits) == 8);
1399 LASSERT ((int)offsetof(ptl_hdr_t, msg.put.hdr_data) == 56);
1400 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.hdr_data) == 8);
1401 LASSERT ((int)offsetof(ptl_hdr_t, msg.put.ptl_index) == 64);
1402 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.ptl_index) == 4);
1403 LASSERT ((int)offsetof(ptl_hdr_t, msg.put.offset) == 68);
1404 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.offset) == 4);
1407 LASSERT ((int)offsetof(ptl_hdr_t, msg.get.return_wmd) == 32);
1408 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.return_wmd) == 16);
1409 LASSERT ((int)offsetof(ptl_hdr_t, msg.get.match_bits) == 48);
1410 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.match_bits) == 8);
1411 LASSERT ((int)offsetof(ptl_hdr_t, msg.get.ptl_index) == 56);
1412 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.ptl_index) == 4);
1413 LASSERT ((int)offsetof(ptl_hdr_t, msg.get.src_offset) == 60);
1414 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.src_offset) == 4);
1415 LASSERT ((int)offsetof(ptl_hdr_t, msg.get.sink_length) == 64);
1416 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.sink_length) == 4);
1419 LASSERT ((int)offsetof(ptl_hdr_t, msg.reply.dst_wmd) == 32);
1420 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.reply.dst_wmd) == 16);
1423 LASSERT ((int)offsetof(ptl_hdr_t, msg.hello.incarnation) == 32);
1424 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.hello.incarnation) == 8);
1425 LASSERT ((int)offsetof(ptl_hdr_t, msg.hello.type) == 40);
1426 LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.hello.type) == 4);