#include <portals/lib-p30.h>
#include <portals/arg-blocks.h>
-/*
- * Right now it does not check access control lists.
- *
- * We only support one MD per ME, which is how the Portals 3.1 spec is written.
- * All previous complication is removed.
- */
-
-static lib_me_t *
-lib_find_me(nal_cb_t *nal, int index, int op_mask, ptl_nid_t src_nid,
- ptl_pid_t src_pid, ptl_size_t rlength, ptl_size_t roffset,
- ptl_match_bits_t match_bits, ptl_size_t *mlength_out,
- ptl_size_t *offset_out, int *unlink_out)
+/* forward ref */
+static void lib_commit_md (nal_cb_t *nal, lib_md_t *md, lib_msg_t *msg);
+
+static lib_md_t *
+lib_match_md(nal_cb_t *nal, int index, int op_mask,
+ ptl_nid_t src_nid, ptl_pid_t src_pid,
+ ptl_size_t rlength, ptl_size_t roffset,
+ ptl_match_bits_t match_bits, lib_msg_t *msg,
+ ptl_size_t *mlength_out, ptl_size_t *offset_out)
{
lib_ni_t *ni = &nal->ni;
struct list_head *match_list = &ni->tbl.tbl[index];
lib_md_t *md;
ptl_size_t mlength;
ptl_size_t offset;
-
ENTRY;
CDEBUG (D_NET, "Request from "LPU64".%d of length %d into portal %d "
LASSERT (me == md->me);
- /* MD deactivated */
- if (md->threshold == 0)
- continue;
-
/* mismatched MD op */
if ((md->options & op_mask) == 0)
continue;
+ /* MD exhausted */
+ if (lib_md_exhausted(md))
+ continue;
+
/* mismatched ME nid/pid? */
if (me->match_id.nid != PTL_NID_ANY &&
me->match_id.nid != src_nid)
else
offset = roffset;
- mlength = md->length - offset;
- if ((md->options & PTL_MD_MAX_SIZE) != 0 &&
- mlength > md->max_size)
+ if ((md->options & PTL_MD_MAX_SIZE) != 0) {
mlength = md->max_size;
+ LASSERT (md->offset + mlength <= md->length);
+ } else {
+ mlength = md->length - offset;
+ }
if (rlength <= mlength) { /* fits in allowed space */
mlength = rlength;
goto failed;
}
+ /* Commit to this ME/MD */
+ CDEBUG(D_NET, "Incoming %s index %x from "LPU64"/%u of "
+ "length %d/%d into md "LPX64" [%d] + %d\n",
+ (op_mask == PTL_MD_OP_PUT) ? "put" : "get",
+ index, src_nid, src_pid, mlength, rlength,
+ md->md_lh.lh_cookie, md->md_niov, offset);
+
+ lib_commit_md(nal, md, msg);
md->offset = offset + mlength;
+ /* NB Caller sets ev.type and ev.hdr_data */
+ msg->ev.initiator.nid = src_nid;
+ msg->ev.initiator.pid = src_pid;
+ msg->ev.portal = index;
+ msg->ev.match_bits = match_bits;
+ msg->ev.rlength = rlength;
+ msg->ev.mlength = mlength;
+ msg->ev.offset = offset;
+
+ lib_md_deconstruct(nal, md, &msg->ev.mem_desc);
+
*offset_out = offset;
*mlength_out = mlength;
- *unlink_out = ((md->options & PTL_MD_AUTO_UNLINK) != 0 &&
- md->offset >= (md->length - md->max_size));
- RETURN (me);
+
+ /* Auto-unlink NOW, so the ME gets unlinked if required.
+ * We bumped md->pending above so the MD just gets flagged
+ * for unlink when it is finalized. */
+ if ((md->md_flags & PTL_MD_FLAG_AUTO_UNLINK) != 0 &&
+ lib_md_exhausted(md))
+ lib_md_unlink(nal, md);
+
+ RETURN (md);
}
failed:
}
void
-lib_copy_iov2buf (char *dest, int niov, struct iovec *iov, ptl_size_t len)
+lib_copy_iov2buf (char *dest, int niov, struct iovec *iov,
+ ptl_size_t offset, ptl_size_t len)
{
ptl_size_t nob;
- while (len > 0)
- {
+ if (len == 0)
+ return;
+
+ /* skip complete frags before 'offset' */
+ LASSERT (niov > 0);
+ while (offset >= iov->iov_len) {
+ offset -= iov->iov_len;
+ iov++;
+ niov--;
LASSERT (niov > 0);
- nob = MIN (iov->iov_len, len);
- memcpy (dest, iov->iov_base, nob);
+ }
+
+ do {
+ LASSERT (niov > 0);
+ nob = MIN (iov->iov_len - offset, len);
+ memcpy (dest, iov->iov_base + offset, nob);
len -= nob;
dest += nob;
niov--;
iov++;
- }
+ offset = 0;
+ } while (len > 0);
}
void
-lib_copy_buf2iov (int niov, struct iovec *iov, char *src, ptl_size_t len)
+lib_copy_buf2iov (int niov, struct iovec *iov, ptl_size_t offset,
+ char *src, ptl_size_t len)
{
ptl_size_t nob;
- while (len > 0)
- {
+ if (len == 0)
+ return;
+
+ /* skip complete frags before 'offset' */
+ LASSERT (niov > 0);
+ while (offset >= iov->iov_len) {
+ offset -= iov->iov_len;
+ iov++;
+ niov--;
LASSERT (niov > 0);
- nob = MIN (iov->iov_len, len);
- memcpy (iov->iov_base, src, nob);
+ }
+
+ do {
+ LASSERT (niov > 0);
+ nob = MIN (iov->iov_len - offset, len);
+ memcpy (iov->iov_base + offset, src, nob);
len -= nob;
src += nob;
niov--;
iov++;
- }
+ offset = 0;
+ } while (len > 0);
}
-static int
-lib_extract_iov (struct iovec *dst, lib_md_t *md,
+int
+lib_extract_iov (int dst_niov, struct iovec *dst,
+ int src_niov, struct iovec *src,
ptl_size_t offset, ptl_size_t len)
{
/* Initialise 'dst' to the subset of 'src' starting at 'offset',
* for exactly 'len' bytes, and return the number of entries.
* NB not destructive to 'src' */
- int src_niov = md->md_niov;
- struct iovec *src = md->md_iov.iov;
ptl_size_t frag_len;
- int dst_niov;
+ int niov;
- LASSERT (len >= 0);
- LASSERT (offset >= 0);
- LASSERT (offset + len <= md->length);
-
if (len == 0) /* no data => */
return (0); /* no frags */
LASSERT (src_niov > 0);
}
- dst_niov = 1;
+ niov = 1;
for (;;) {
LASSERT (src_niov > 0);
- LASSERT (dst_niov <= PTL_MD_MAX_IOV);
+ LASSERT (niov <= dst_niov);
frag_len = src->iov_len - offset;
dst->iov_base = ((char *)src->iov_base) + offset;
if (len <= frag_len) {
dst->iov_len = len;
- return (dst_niov);
+ return (niov);
}
dst->iov_len = frag_len;
len -= frag_len;
dst++;
src++;
- dst_niov++;
+ niov++;
src_niov--;
offset = 0;
}
}
void
-lib_copy_kiov2buf (char *dest, int niov, ptl_kiov_t *kiov, ptl_size_t len)
+lib_copy_kiov2buf (char *dest, int niov, ptl_kiov_t *kiov,
+ ptl_size_t offset, ptl_size_t len)
{
LASSERT (0);
}
void
-lib_copy_buf2kiov (int niov, ptl_kiov_t *kiov, char *dest, ptl_size_t len)
+lib_copy_buf2kiov (int niov, ptl_kiov_t *kiov, ptl_size_t offset,
+ char *src, ptl_size_t len)
{
LASSERT (0);
}
-static int
-lib_extract_kiov (ptl_kiov_t *dst, lib_md_t *md,
+int
+lib_extract_kiov (int dst_niov, ptl_kiov_t *dst,
+ int src_niov, ptl_kiov_t *src,
ptl_size_t offset, ptl_size_t len)
{
LASSERT (0);
}
void
-lib_copy_kiov2buf (char *dest, int niov, ptl_kiov_t *kiov, ptl_size_t len)
+lib_copy_kiov2buf (char *dest, int niov, ptl_kiov_t *kiov,
+ ptl_size_t offset, ptl_size_t len)
{
ptl_size_t nob;
char *addr;
+
+ if (len == 0)
+ return;
LASSERT (!in_interrupt ());
- while (len > 0)
- {
+
+ LASSERT (niov > 0);
+ while (offset > kiov->kiov_len) {
+ offset -= kiov->kiov_len;
+ kiov++;
+ niov--;
LASSERT (niov > 0);
- nob = MIN (kiov->kiov_len, len);
+ }
+
+ do{
+ LASSERT (niov > 0);
+ nob = MIN (kiov->kiov_len - offset, len);
- addr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset;
+ addr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset + offset;
memcpy (dest, addr, nob);
kunmap (kiov->kiov_page);
dest += nob;
niov--;
kiov++;
- }
+ offset = 0;
+ } while (len > 0);
}
void
-lib_copy_buf2kiov (int niov, ptl_kiov_t *kiov, char *src, ptl_size_t len)
+lib_copy_buf2kiov (int niov, ptl_kiov_t *kiov, ptl_size_t offset,
+ char *src, ptl_size_t len)
{
ptl_size_t nob;
char *addr;
+ if (len == 0)
+ return;
+
LASSERT (!in_interrupt ());
- while (len > 0)
- {
+
+ LASSERT (niov > 0);
+ while (offset >= kiov->kiov_len) {
+ offset -= kiov->kiov_len;
+ kiov++;
+ niov--;
+ LASSERT (niov > 0);
+ }
+
+ do {
LASSERT (niov > 0);
- nob = MIN (kiov->kiov_len, len);
+ nob = MIN (kiov->kiov_len - offset, len);
- addr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset;
+ addr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset + offset;
memcpy (addr, src, nob);
kunmap (kiov->kiov_page);
src += nob;
niov--;
kiov++;
- }
+ offset = 0;
+ } while (len > 0);
}
-static int
-lib_extract_kiov (ptl_kiov_t *dst, lib_md_t *md,
+int
+lib_extract_kiov (int dst_niov, ptl_kiov_t *dst,
+ int src_niov, ptl_kiov_t *src,
ptl_size_t offset, ptl_size_t len)
{
/* Initialise 'dst' to the subset of 'src' starting at 'offset',
* for exactly 'len' bytes, and return the number of entries.
* NB not destructive to 'src' */
- int src_niov = md->md_niov;
- ptl_kiov_t *src = md->md_iov.kiov;
ptl_size_t frag_len;
- int dst_niov;
+ int niov;
- LASSERT (len >= 0);
- LASSERT (offset >= 0);
- LASSERT (offset + len <= md->length);
-
if (len == 0) /* no data => */
return (0); /* no frags */
LASSERT (src_niov > 0);
}
- dst_niov = 1;
+ niov = 1;
for (;;) {
LASSERT (src_niov > 0);
- LASSERT (dst_niov <= PTL_MD_MAX_IOV);
+ LASSERT (niov <= dst_niov);
frag_len = src->kiov_len - offset;
dst->kiov_page = src->kiov_page;
if (len <= frag_len) {
dst->kiov_len = len;
LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
- return (dst_niov);
+ return (niov);
}
dst->kiov_len = frag_len;
len -= frag_len;
dst++;
src++;
- dst_niov++;
+ niov++;
src_niov--;
offset = 0;
}
}
#endif
-void
+ptl_err_t
lib_recv (nal_cb_t *nal, void *private, lib_msg_t *msg, lib_md_t *md,
ptl_size_t offset, ptl_size_t mlen, ptl_size_t rlen)
{
- int niov;
-
if (mlen == 0)
- nal->cb_recv (nal, private, msg, 0, NULL, 0, rlen);
- else if ((md->options & PTL_MD_KIOV) == 0) {
- niov = lib_extract_iov (msg->msg_iov.iov, md, offset, mlen);
- nal->cb_recv (nal, private, msg,
- niov, msg->msg_iov.iov, mlen, rlen);
- } else {
- niov = lib_extract_kiov (msg->msg_iov.kiov, md, offset, mlen);
- nal->cb_recv_pages (nal, private, msg,
- niov, msg->msg_iov.kiov, mlen, rlen);
- }
+ return (nal->cb_recv(nal, private, msg,
+ 0, NULL,
+ offset, mlen, rlen));
+
+ if ((md->options & PTL_MD_KIOV) == 0)
+ return (nal->cb_recv(nal, private, msg,
+ md->md_niov, md->md_iov.iov,
+ offset, mlen, rlen));
+
+ return (nal->cb_recv_pages(nal, private, msg,
+ md->md_niov, md->md_iov.kiov,
+ offset, mlen, rlen));
}
-int
+ptl_err_t
lib_send (nal_cb_t *nal, void *private, lib_msg_t *msg,
ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid,
lib_md_t *md, ptl_size_t offset, ptl_size_t len)
{
- int niov;
-
if (len == 0)
- return (nal->cb_send (nal, private, msg,
- hdr, type, nid, pid,
- 0, NULL, 0));
+ return (nal->cb_send(nal, private, msg,
+ hdr, type, nid, pid,
+ 0, NULL,
+ offset, len));
- if ((md->options & PTL_MD_KIOV) == 0) {
- niov = lib_extract_iov (msg->msg_iov.iov, md, offset, len);
- return (nal->cb_send (nal, private, msg,
- hdr, type, nid, pid,
- niov, msg->msg_iov.iov, len));
- }
-
- niov = lib_extract_kiov (msg->msg_iov.kiov, md, offset, len);
- return (nal->cb_send_pages (nal, private, msg,
- hdr, type, nid, pid,
- niov, msg->msg_iov.kiov, len));
+ if ((md->options & PTL_MD_KIOV) == 0)
+ return (nal->cb_send(nal, private, msg,
+ hdr, type, nid, pid,
+ md->md_niov, md->md_iov.iov,
+ offset, len));
+
+ return (nal->cb_send_pages(nal, private, msg,
+ hdr, type, nid, pid,
+ md->md_niov, md->md_iov.kiov,
+ offset, len));
}
-static lib_msg_t *
-get_new_msg (nal_cb_t *nal, lib_md_t *md)
+static void
+lib_commit_md (nal_cb_t *nal, lib_md_t *md, lib_msg_t *msg)
{
/* ALWAYS called holding the state_lock */
lib_counters_t *counters = &nal->ni.counters;
- lib_msg_t *msg = lib_msg_alloc (nal);
-
- if (msg == NULL)
- return (NULL);
-
- memset (msg, 0, sizeof (*msg));
-
- msg->send_ack = 0;
+ /* Here, we commit the MD to a network OP by marking it busy and
+ * decrementing its threshold. Come what may, the network "owns"
+ * the MD until a call to lib_finalize() signals completion. */
msg->md = md;
- do_gettimeofday(&msg->ev.arrival_time);
+
md->pending++;
if (md->threshold != PTL_MD_THRESH_INF) {
LASSERT (md->threshold > 0);
counters->msgs_max = counters->msgs_alloc;
list_add (&msg->msg_list, &nal->ni.ni_active_msgs);
-
- return (msg);
}
+static void
+lib_drop_message (nal_cb_t *nal, void *private, ptl_hdr_t *hdr)
+{
+ unsigned long flags;
+
+ /* CAVEAT EMPTOR: this only drops messages that we've not committed
+ * to receive (init_msg() not called) and therefore can't cause an
+ * event. */
+
+ state_lock(nal, &flags);
+ nal->ni.counters.drop_count++;
+ nal->ni.counters.drop_length += hdr->payload_length;
+ state_unlock(nal, &flags);
+
+ /* NULL msg => if NAL calls lib_finalize it will be a noop */
+ (void) lib_recv(nal, private, NULL, NULL, 0, 0, hdr->payload_length);
+}
/*
* Incoming messages have a ptl_msg_t object associated with them
* of long messages.
*
*/
-static int parse_put(nal_cb_t * nal, ptl_hdr_t * hdr, void *private)
+static ptl_err_t
+parse_put(nal_cb_t *nal, ptl_hdr_t *hdr, void *private, lib_msg_t *msg)
{
lib_ni_t *ni = &nal->ni;
ptl_size_t mlength = 0;
ptl_size_t offset = 0;
- int unlink = 0;
- lib_me_t *me;
+ ptl_err_t rc;
lib_md_t *md;
- lib_msg_t *msg;
unsigned long flags;
-
+
/* Convert put fields to host byte order */
hdr->msg.put.match_bits = NTOH__u64 (hdr->msg.put.match_bits);
hdr->msg.put.ptl_index = NTOH__u32 (hdr->msg.put.ptl_index);
state_lock(nal, &flags);
- me = lib_find_me(nal, hdr->msg.put.ptl_index, PTL_MD_OP_PUT,
- hdr->src_nid, hdr->src_pid,
- PTL_HDR_LENGTH (hdr), hdr->msg.put.offset,
- hdr->msg.put.match_bits,
- &mlength, &offset, &unlink);
- if (me == NULL)
- goto drop;
-
- md = me->md;
- CDEBUG(D_NET, "Incoming put index %x from "LPU64"/%u of length %d/%d "
- "into md "LPX64" [%d] + %d\n", hdr->msg.put.ptl_index,
- hdr->src_nid, hdr->src_pid, mlength, PTL_HDR_LENGTH(hdr),
- md->md_lh.lh_cookie, md->md_niov, offset);
-
- msg = get_new_msg (nal, md);
- if (msg == NULL) {
- CERROR(LPU64": Dropping PUT from "LPU64": can't allocate msg\n",
- ni->nid, hdr->src_nid);
- goto drop;
+ md = lib_match_md(nal, hdr->msg.put.ptl_index, PTL_MD_OP_PUT,
+ hdr->src_nid, hdr->src_pid,
+ hdr->payload_length, hdr->msg.put.offset,
+ hdr->msg.put.match_bits, msg,
+ &mlength, &offset);
+ if (md == NULL) {
+ state_unlock(nal, &flags);
+ return (PTL_FAIL);
}
+ msg->ev.type = PTL_EVENT_PUT_END;
+ msg->ev.hdr_data = hdr->msg.put.hdr_data;
+
if (!ptl_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
!(md->options & PTL_MD_ACK_DISABLE)) {
- msg->send_ack = 1;
msg->ack_wmd = hdr->msg.put.ack_wmd;
- msg->nid = hdr->src_nid;
- msg->pid = hdr->src_pid;
- msg->ev.match_bits = hdr->msg.put.match_bits;
- }
-
- if (md->eq) {
- msg->ev.type = PTL_EVENT_PUT;
- msg->ev.initiator.nid = hdr->src_nid;
- msg->ev.initiator.pid = hdr->src_pid;
- msg->ev.portal = hdr->msg.put.ptl_index;
- msg->ev.match_bits = hdr->msg.put.match_bits;
- msg->ev.rlength = PTL_HDR_LENGTH(hdr);
- msg->ev.mlength = mlength;
- msg->ev.offset = offset;
- msg->ev.hdr_data = hdr->msg.put.hdr_data;
-
- /* NB if this match has exhausted the MD, we can't be sure
- * that this event will the the last one associated with
- * this MD in the event queue (another message already
- * matching this ME/MD could end up being last). So we
- * remember the ME handle anyway and check again when we're
- * allocating our slot in the event queue.
- */
- ptl_me2handle (&msg->ev.unlinked_me, me);
-
- lib_md_deconstruct(nal, md, &msg->ev.mem_desc);
}
ni->counters.recv_count++;
ni->counters.recv_length += mlength;
- /* only unlink after MD's pending count has been bumped
- * in get_new_msg() otherwise lib_me_unlink() will nuke it */
- if (unlink) {
- md->md_flags |= PTL_MD_FLAG_AUTO_UNLINKED;
- lib_me_unlink (nal, me);
- }
-
state_unlock(nal, &flags);
- lib_recv (nal, private, msg, md, offset, mlength, PTL_HDR_LENGTH (hdr));
- return 0;
+ rc = lib_recv(nal, private, msg, md, offset, mlength,
+ hdr->payload_length);
+ if (rc != PTL_OK)
+ CERROR(LPU64": error on receiving PUT from "LPU64": %d\n",
+ ni->nid, hdr->src_nid, rc);
- drop:
- nal->ni.counters.drop_count++;
- nal->ni.counters.drop_length += PTL_HDR_LENGTH(hdr);
- state_unlock (nal, &flags);
- lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr));
- return -1;
+ return (rc);
}
-static int parse_get(nal_cb_t * nal, ptl_hdr_t * hdr, void *private)
+static ptl_err_t
+parse_get(nal_cb_t *nal, ptl_hdr_t *hdr, void *private, lib_msg_t *msg)
{
lib_ni_t *ni = &nal->ni;
ptl_size_t mlength = 0;
ptl_size_t offset = 0;
- int unlink = 0;
- lib_me_t *me;
lib_md_t *md;
- lib_msg_t *msg;
ptl_hdr_t reply;
unsigned long flags;
int rc;
hdr->msg.get.sink_length = NTOH__u32 (hdr->msg.get.sink_length);
hdr->msg.get.src_offset = NTOH__u32 (hdr->msg.get.src_offset);
- /* compatibility check until field is deleted */
- if (hdr->msg.get.return_offset != 0)
- CERROR("Unexpected non-zero get.return_offset %x from "
- LPU64"\n", hdr->msg.get.return_offset, hdr->src_nid);
-
state_lock(nal, &flags);
- me = lib_find_me(nal, hdr->msg.get.ptl_index, PTL_MD_OP_GET,
- hdr->src_nid, hdr->src_pid,
- hdr->msg.get.sink_length, hdr->msg.get.src_offset,
- hdr->msg.get.match_bits,
- &mlength, &offset, &unlink);
- if (me == NULL)
- goto drop;
-
- md = me->md;
- CDEBUG(D_NET, "Incoming get index %d from "LPU64".%u of length %d/%d "
- "from md "LPX64" [%d] + %d\n", hdr->msg.get.ptl_index,
- hdr->src_nid, hdr->src_pid, mlength, PTL_HDR_LENGTH(hdr),
- md->md_lh.lh_cookie, md->md_niov, offset);
-
- msg = get_new_msg (nal, md);
- if (msg == NULL) {
- CERROR(LPU64": Dropping GET from "LPU64": can't allocate msg\n",
- ni->nid, hdr->src_nid);
- goto drop;
+ md = lib_match_md(nal, hdr->msg.get.ptl_index, PTL_MD_OP_GET,
+ hdr->src_nid, hdr->src_pid,
+ hdr->msg.get.sink_length, hdr->msg.get.src_offset,
+ hdr->msg.get.match_bits, msg,
+ &mlength, &offset);
+ if (md == NULL) {
+ state_unlock(nal, &flags);
+ return (PTL_FAIL);
}
- if (md->eq) {
- msg->ev.type = PTL_EVENT_GET;
- msg->ev.initiator.nid = hdr->src_nid;
- msg->ev.initiator.pid = hdr->src_pid;
- msg->ev.portal = hdr->msg.get.ptl_index;
- msg->ev.match_bits = hdr->msg.get.match_bits;
- msg->ev.rlength = PTL_HDR_LENGTH(hdr);
- msg->ev.mlength = mlength;
- msg->ev.offset = offset;
- msg->ev.hdr_data = 0;
-
- /* NB if this match has exhausted the MD, we can't be sure
- * that this event will the the last one associated with
- * this MD in the event queue (another message already
- * matching this ME/MD could end up being last). So we
- * remember the ME handle anyway and check again when we're
- * allocating our slot in the event queue.
- */
- ptl_me2handle (&msg->ev.unlinked_me, me);
-
- lib_md_deconstruct(nal, md, &msg->ev.mem_desc);
- }
+ msg->ev.type = PTL_EVENT_GET_END;
+ msg->ev.hdr_data = 0;
ni->counters.send_count++;
ni->counters.send_length += mlength;
- /* only unlink after MD's refcount has been bumped
- * in get_new_msg() otherwise lib_me_unlink() will nuke it */
- if (unlink) {
- md->md_flags |= PTL_MD_FLAG_AUTO_UNLINKED;
- lib_me_unlink (nal, me);
- }
-
state_unlock(nal, &flags);
memset (&reply, 0, sizeof (reply));
reply.src_nid = HTON__u64 (ni->nid);
reply.dest_pid = HTON__u32 (hdr->src_pid);
reply.src_pid = HTON__u32 (ni->pid);
- PTL_HDR_LENGTH(&reply) = HTON__u32 (mlength);
+ reply.payload_length = HTON__u32 (mlength);
reply.msg.reply.dst_wmd = hdr->msg.get.return_wmd;
+ /* NB call lib_send() _BEFORE_ lib_recv() completes the incoming
+ * message. Some NALs _require_ this to implement optimized GET */
+
rc = lib_send (nal, private, msg, &reply, PTL_MSG_REPLY,
hdr->src_nid, hdr->src_pid, md, offset, mlength);
- if (rc != 0) {
- CERROR(LPU64": Dropping GET from "LPU64": send REPLY failed\n",
- ni->nid, hdr->src_nid);
- state_lock (nal, &flags);
- goto drop;
- }
+ if (rc != PTL_OK)
+ CERROR(LPU64": Unable to send REPLY for GET from "LPU64": %d\n",
+ ni->nid, hdr->src_nid, rc);
+
+ /* Discard any junk after the hdr */
+ (void) lib_recv(nal, private, NULL, NULL, 0, 0, hdr->payload_length);
- /* Complete the incoming message */
- lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr));
return (rc);
- drop:
- ni->counters.drop_count++;
- ni->counters.drop_length += hdr->msg.get.sink_length;
- state_unlock(nal, &flags);
- lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr));
- return -1;
}
-static int parse_reply(nal_cb_t * nal, ptl_hdr_t * hdr, void *private)
+static ptl_err_t
+parse_reply(nal_cb_t *nal, ptl_hdr_t *hdr, void *private, lib_msg_t *msg)
{
lib_ni_t *ni = &nal->ni;
lib_md_t *md;
int rlength;
int length;
- lib_msg_t *msg;
unsigned long flags;
-
- /* compatibility check until field is deleted */
- if (hdr->msg.reply.dst_offset != 0)
- CERROR("Unexpected non-zero reply.dst_offset %x from "LPU64"\n",
- hdr->msg.reply.dst_offset, hdr->src_nid);
+ ptl_err_t rc;
state_lock(nal, &flags);
md == NULL ? "invalid" : "inactive",
hdr->msg.reply.dst_wmd.wh_interface_cookie,
hdr->msg.reply.dst_wmd.wh_object_cookie);
- goto drop;
+
+ state_unlock(nal, &flags);
+ return (PTL_FAIL);
}
LASSERT (md->offset == 0);
- length = rlength = PTL_HDR_LENGTH(hdr);
+ length = rlength = hdr->payload_length;
if (length > md->length) {
if ((md->options & PTL_MD_TRUNCATE) == 0) {
ni->nid, hdr->src_nid, length,
hdr->msg.reply.dst_wmd.wh_object_cookie,
md->length);
- goto drop;
+ state_unlock(nal, &flags);
+ return (PTL_FAIL);
}
length = md->length;
}
hdr->src_nid, length, rlength,
hdr->msg.reply.dst_wmd.wh_object_cookie);
- msg = get_new_msg (nal, md);
- if (msg == NULL) {
- CERROR(LPU64": Dropping REPLY from "LPU64": can't "
- "allocate msg\n", ni->nid, hdr->src_nid);
- goto drop;
- }
+ lib_commit_md(nal, md, msg);
- if (md->eq) {
- msg->ev.type = PTL_EVENT_REPLY;
- msg->ev.initiator.nid = hdr->src_nid;
- msg->ev.initiator.pid = hdr->src_pid;
- msg->ev.rlength = rlength;
- msg->ev.mlength = length;
- msg->ev.offset = 0;
+ msg->ev.type = PTL_EVENT_REPLY_END;
+ msg->ev.initiator.nid = hdr->src_nid;
+ msg->ev.initiator.pid = hdr->src_pid;
+ msg->ev.rlength = rlength;
+ msg->ev.mlength = length;
+ msg->ev.offset = 0;
- lib_md_deconstruct(nal, md, &msg->ev.mem_desc);
- }
+ lib_md_deconstruct(nal, md, &msg->ev.mem_desc);
ni->counters.recv_count++;
ni->counters.recv_length += length;
state_unlock(nal, &flags);
- lib_recv (nal, private, msg, md, 0, length, rlength);
- return 0;
+ rc = lib_recv(nal, private, msg, md, 0, length, rlength);
+ if (rc != PTL_OK)
+ CERROR(LPU64": error on receiving REPLY from "LPU64": %d\n",
+ ni->nid, hdr->src_nid, rc);
- drop:
- nal->ni.counters.drop_count++;
- nal->ni.counters.drop_length += PTL_HDR_LENGTH(hdr);
- state_unlock (nal, &flags);
- lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr));
- return -1;
+ return (rc);
}
-static int parse_ack(nal_cb_t * nal, ptl_hdr_t * hdr, void *private)
+static ptl_err_t
+parse_ack(nal_cb_t *nal, ptl_hdr_t *hdr, void *private, lib_msg_t *msg)
{
- lib_ni_t *ni = &nal->ni;
- lib_md_t *md;
- lib_msg_t *msg = NULL;
- unsigned long flags;
+ lib_ni_t *ni = &nal->ni;
+ lib_md_t *md;
+ unsigned long flags;
/* Convert ack fields to host byte order */
hdr->msg.ack.match_bits = NTOH__u64 (hdr->msg.ack.match_bits);
(md == NULL) ? "invalid" : "inactive",
hdr->msg.ack.dst_wmd.wh_interface_cookie,
hdr->msg.ack.dst_wmd.wh_object_cookie);
- goto drop;
+
+ state_unlock(nal, &flags);
+ return (PTL_FAIL);
}
CDEBUG(D_NET, LPU64": ACK from "LPU64" into md "LPX64"\n",
ni->nid, hdr->src_nid,
hdr->msg.ack.dst_wmd.wh_object_cookie);
- msg = get_new_msg (nal, md);
- if (msg == NULL) {
- CERROR(LPU64": Dropping ACK from "LPU64": can't allocate msg\n",
- ni->nid, hdr->src_nid);
- goto drop;
- }
+ lib_commit_md(nal, md, msg);
- if (md->eq) {
- msg->ev.type = PTL_EVENT_ACK;
- msg->ev.initiator.nid = hdr->src_nid;
- msg->ev.initiator.pid = hdr->src_pid;
- msg->ev.mlength = hdr->msg.ack.mlength;
- msg->ev.match_bits = hdr->msg.ack.match_bits;
+ msg->ev.type = PTL_EVENT_ACK;
+ msg->ev.initiator.nid = hdr->src_nid;
+ msg->ev.initiator.pid = hdr->src_pid;
+ msg->ev.mlength = hdr->msg.ack.mlength;
+ msg->ev.match_bits = hdr->msg.ack.match_bits;
- lib_md_deconstruct(nal, md, &msg->ev.mem_desc);
- }
+ lib_md_deconstruct(nal, md, &msg->ev.mem_desc);
ni->counters.recv_count++;
- state_unlock(nal, &flags);
- lib_recv (nal, private, msg, NULL, 0, 0, PTL_HDR_LENGTH (hdr));
- return 0;
- drop:
- nal->ni.counters.drop_count++;
- state_unlock (nal, &flags);
- lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr));
- return -1;
+ state_unlock(nal, &flags);
+
+ /* We have received and matched up the ack OK, create the
+ * completion event now... */
+ lib_finalize(nal, private, msg, PTL_OK);
+
+ /* ...and now discard any junk after the hdr */
+ (void) lib_recv(nal, private, NULL, NULL, 0, 0, hdr->payload_length);
+
+ return (PTL_OK);
}
static char *
hdr->msg.put.match_bits);
nal->cb_printf(nal,
" Length %d, offset %d, hdr data "LPX64"\n",
- PTL_HDR_LENGTH(hdr), hdr->msg.put.offset,
+ hdr->payload_length, hdr->msg.put.offset,
hdr->msg.put.hdr_data);
break;
"length %d\n",
hdr->msg.reply.dst_wmd.wh_interface_cookie,
hdr->msg.reply.dst_wmd.wh_object_cookie,
- PTL_HDR_LENGTH(hdr));
+ hdr->payload_length);
}
} /* end of print_hdr() */
-int lib_parse(nal_cb_t * nal, ptl_hdr_t * hdr, void *private)
+void
+lib_parse(nal_cb_t *nal, ptl_hdr_t *hdr, void *private)
{
unsigned long flags;
-
- /* NB static check; optimizer will elide this if it's right */
- LASSERT (offsetof (ptl_hdr_t, msg.ack.length) ==
- offsetof (ptl_hdr_t, msg.put.length));
- LASSERT (offsetof (ptl_hdr_t, msg.ack.length) ==
- offsetof (ptl_hdr_t, msg.get.length));
- LASSERT (offsetof (ptl_hdr_t, msg.ack.length) ==
- offsetof (ptl_hdr_t, msg.reply.length));
-
+ ptl_err_t rc;
+ lib_msg_t *msg;
+
/* convert common fields to host byte order */
hdr->dest_nid = NTOH__u64 (hdr->dest_nid);
hdr->src_nid = NTOH__u64 (hdr->src_nid);
hdr->dest_pid = NTOH__u32 (hdr->dest_pid);
hdr->src_pid = NTOH__u32 (hdr->src_pid);
hdr->type = NTOH__u32 (hdr->type);
- PTL_HDR_LENGTH(hdr) = NTOH__u32 (PTL_HDR_LENGTH(hdr));
+ hdr->payload_length = NTOH__u32(hdr->payload_length);
#if 0
nal->cb_printf(nal, "%d: lib_parse: nal=%p hdr=%p type=%d\n",
nal->ni.nid, nal, hdr, hdr->type);
nal->ni.nid, mv->magic,
mv->version_major, mv->version_minor,
hdr->src_nid);
- lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr));
- return (-1);
+ lib_drop_message(nal, private, hdr);
+ return;
}
if (hdr->dest_nid != nal->ni.nid) {
CERROR(LPU64": Dropping %s message from "LPU64" to "LPU64
" (not me)\n", nal->ni.nid, hdr_type_string (hdr),
hdr->src_nid, hdr->dest_nid);
-
- state_lock (nal, &flags);
- nal->ni.counters.drop_count++;
- nal->ni.counters.drop_length += PTL_HDR_LENGTH(hdr);
- state_unlock (nal, &flags);
-
- lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr));
- return (-1);
+ lib_drop_message(nal, private, hdr);
+ return;
}
if (!list_empty (&nal->ni.ni_test_peers) && /* normally we don't */
": simulated failure\n",
nal->ni.nid, hdr_type_string (hdr),
hdr->src_nid);
- return (-1);
+ lib_drop_message(nal, private, hdr);
+ return;
}
-
+
+ msg = lib_msg_alloc(nal);
+ if (msg == NULL) {
+ CERROR(LPU64": Dropping incoming %s from "LPU64
+ ": can't allocate a lib_msg_t\n",
+ nal->ni.nid, hdr_type_string (hdr),
+ hdr->src_nid);
+ lib_drop_message(nal, private, hdr);
+ return;
+ }
+
switch (hdr->type) {
case PTL_MSG_ACK:
- return (parse_ack(nal, hdr, private));
+ rc = parse_ack(nal, hdr, private, msg);
+ break;
case PTL_MSG_PUT:
- return (parse_put(nal, hdr, private));
+ rc = parse_put(nal, hdr, private, msg);
break;
case PTL_MSG_GET:
- return (parse_get(nal, hdr, private));
+ rc = parse_get(nal, hdr, private, msg);
break;
case PTL_MSG_REPLY:
- return (parse_reply(nal, hdr, private));
+ rc = parse_reply(nal, hdr, private, msg);
break;
default:
CERROR(LPU64": Dropping <unknown> message from "LPU64
": Bad type=0x%x\n", nal->ni.nid, hdr->src_nid,
hdr->type);
-
- lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr));
- return (-1);
+ rc = PTL_FAIL;
+ break;
+ }
+
+ if (rc != PTL_OK) {
+ if (msg->md != NULL) {
+ /* committed... */
+ lib_finalize(nal, private, msg, rc);
+ } else {
+ state_lock(nal, &flags);
+ lib_msg_free(nal, msg); /* expects state_lock held */
+ state_unlock(nal, &flags);
+
+ lib_drop_message(nal, private, hdr);
+ }
}
}
-
-int do_PtlPut(nal_cb_t * nal, void *private, void *v_args, void *v_ret)
+int
+do_PtlPut(nal_cb_t *nal, void *private, void *v_args, void *v_ret)
{
/*
* Incoming:
* Outgoing:
*/
- PtlPut_in *args = v_args;
- PtlPut_out *ret = v_ret;
- ptl_hdr_t hdr;
-
- lib_ni_t *ni = &nal->ni;
- lib_md_t *md;
- lib_msg_t *msg = NULL;
+ PtlPut_in *args = v_args;
ptl_process_id_t *id = &args->target_in;
- unsigned long flags;
-
+ PtlPut_out *ret = v_ret;
+ lib_ni_t *ni = &nal->ni;
+ lib_msg_t *msg;
+ ptl_hdr_t hdr;
+ lib_md_t *md;
+ unsigned long flags;
+ int rc;
+
if (!list_empty (&nal->ni.ni_test_peers) && /* normally we don't */
fail_peer (nal, id->nid, 1)) /* shall we now? */
{
CERROR(LPU64": Dropping PUT to "LPU64": simulated failure\n",
nal->ni.nid, id->nid);
- return (ret->rc = PTL_INV_PROC);
+ return (ret->rc = PTL_PROCESS_INVALID);
}
-
- ret->rc = PTL_OK;
+
+ msg = lib_msg_alloc(nal);
+ if (msg == NULL) {
+ CERROR(LPU64": Dropping PUT to "LPU64": ENOMEM on lib_msg_t\n",
+ ni->nid, id->nid);
+ return (ret->rc = PTL_NO_SPACE);
+ }
+
state_lock(nal, &flags);
+
md = ptl_handle2md(&args->md_in, nal);
- if (md == NULL || !md->threshold) {
+ if (md == NULL || md->threshold == 0) {
+ lib_msg_free(nal, msg);
state_unlock(nal, &flags);
- return ret->rc = PTL_INV_MD;
+
+ return (ret->rc = PTL_MD_INVALID);
}
CDEBUG(D_NET, "PtlPut -> %Lu: %lu\n", (unsigned long long)id->nid,
hdr.src_nid = HTON__u64 (ni->nid);
hdr.dest_pid = HTON__u32 (id->pid);
hdr.src_pid = HTON__u32 (ni->pid);
- PTL_HDR_LENGTH(&hdr) = HTON__u32 (md->length);
+ hdr.payload_length = HTON__u32 (md->length);
/* NB handles only looked up by creator (no flips) */
if (args->ack_req_in == PTL_ACK_REQ) {
hdr.msg.put.offset = HTON__u32 (args->offset_in);
hdr.msg.put.hdr_data = args->hdr_data_in;
+ lib_commit_md(nal, md, msg);
+
+ msg->ev.type = PTL_EVENT_SEND_END;
+ msg->ev.initiator.nid = ni->nid;
+ msg->ev.initiator.pid = ni->pid;
+ msg->ev.portal = args->portal_in;
+ msg->ev.match_bits = args->match_bits_in;
+ msg->ev.rlength = md->length;
+ msg->ev.mlength = md->length;
+ msg->ev.offset = args->offset_in;
+ msg->ev.hdr_data = args->hdr_data_in;
+
+ lib_md_deconstruct(nal, md, &msg->ev.mem_desc);
+
ni->counters.send_count++;
ni->counters.send_length += md->length;
- msg = get_new_msg (nal, md);
- if (msg == NULL) {
- CERROR("BAD: could not allocate msg!\n");
- state_unlock(nal, &flags);
- return ret->rc = PTL_NOSPACE;
+ state_unlock(nal, &flags);
+
+ rc = lib_send (nal, private, msg, &hdr, PTL_MSG_PUT,
+ id->nid, id->pid, md, 0, md->length);
+ if (rc != PTL_OK) {
+ CERROR(LPU64": error sending PUT to "LPU64": %d\n",
+ ni->nid, id->nid, rc);
+ lib_finalize (nal, private, msg, rc);
}
+
+ /* completion will be signalled by an event */
+ return ret->rc = PTL_OK;
+}
- /*
- * If this memory descriptor has an event queue associated with
- * it we need to allocate a message state object and record the
- * information about this operation that will be recorded into
- * event queue once the message has been completed.
+lib_msg_t *
+lib_create_reply_msg (nal_cb_t *nal, ptl_nid_t peer_nid, lib_msg_t *getmsg)
+{
+ /* The NAL can DMA direct to the GET md (i.e. no REPLY msg). This
+ * returns a msg for the NAL to pass to lib_finalize() when the sink
+ * data has been received.
*
- * NB. We're now committed to the GET, since we just marked the MD
- * busy. Callers who observe this (by getting PTL_MD_INUSE from
- * PtlMDUnlink()) expect a completion event to tell them when the
- * MD becomes idle.
- */
- if (md->eq) {
- msg->ev.type = PTL_EVENT_SENT;
- msg->ev.initiator.nid = ni->nid;
- msg->ev.initiator.pid = ni->pid;
- msg->ev.portal = args->portal_in;
- msg->ev.match_bits = args->match_bits_in;
- msg->ev.rlength = md->length;
- msg->ev.mlength = md->length;
- msg->ev.offset = args->offset_in;
- msg->ev.hdr_data = args->hdr_data_in;
+ * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
+ * lib_finalize() is called on it, so the NAL must call this first */
- lib_md_deconstruct(nal, md, &msg->ev.mem_desc);
+ lib_ni_t *ni = &nal->ni;
+ lib_msg_t *msg = lib_msg_alloc(nal);
+ lib_md_t *getmd = getmsg->md;
+ unsigned long flags;
+
+ state_lock(nal, &flags);
+
+ LASSERT (getmd->pending > 0);
+
+ if (msg == NULL) {
+ CERROR ("Dropping REPLY from "LPU64": can't allocate msg\n",
+ peer_nid);
+ goto drop;
+ }
+
+ if (getmd->threshold == 0) {
+ CERROR ("Dropping REPLY from "LPU64" for inactive MD %p\n",
+ peer_nid, getmd);
+ goto drop_msg;
}
+ LASSERT (getmd->offset == 0);
+
+ CDEBUG(D_NET, "Reply from "LPU64" md %p\n", peer_nid, getmd);
+
+ lib_commit_md (nal, getmd, msg);
+
+ msg->ev.type = PTL_EVENT_REPLY_END;
+ msg->ev.initiator.nid = peer_nid;
+ msg->ev.initiator.pid = 0; /* XXX FIXME!!! */
+ msg->ev.rlength = msg->ev.mlength = getmd->length;
+ msg->ev.offset = 0;
+
+ lib_md_deconstruct(nal, getmd, &msg->ev.mem_desc);
+
+ ni->counters.recv_count++;
+ ni->counters.recv_length += getmd->length;
+
state_unlock(nal, &flags);
-
- lib_send (nal, private, msg, &hdr, PTL_MSG_PUT,
- id->nid, id->pid, md, 0, md->length);
- return ret->rc = PTL_OK;
-}
+ return msg;
+ drop_msg:
+ lib_msg_free(nal, msg);
+ drop:
+ nal->ni.counters.drop_count++;
+ nal->ni.counters.drop_length += getmd->length;
-int do_PtlGet(nal_cb_t * nal, void *private, void *v_args, void *v_ret)
+ state_unlock (nal, &flags);
+
+ return NULL;
+}
+
+int
+do_PtlGet(nal_cb_t *nal, void *private, void *v_args, void *v_ret)
{
/*
* Incoming:
* Outgoing:
*/
- PtlGet_in *args = v_args;
- PtlGet_out *ret = v_ret;
- ptl_hdr_t hdr;
- lib_msg_t *msg = NULL;
- lib_ni_t *ni = &nal->ni;
+ PtlGet_in *args = v_args;
ptl_process_id_t *id = &args->target_in;
- lib_md_t *md;
- unsigned long flags;
-
+ PtlGet_out *ret = v_ret;
+ lib_ni_t *ni = &nal->ni;
+ lib_msg_t *msg;
+ ptl_hdr_t hdr;
+ lib_md_t *md;
+ unsigned long flags;
+ int rc;
+
if (!list_empty (&nal->ni.ni_test_peers) && /* normally we don't */
fail_peer (nal, id->nid, 1)) /* shall we now? */
{
CERROR(LPU64": Dropping PUT to "LPU64": simulated failure\n",
nal->ni.nid, id->nid);
- return (ret->rc = PTL_INV_PROC);
+ return (ret->rc = PTL_PROCESS_INVALID);
}
-
+
+ msg = lib_msg_alloc(nal);
+ if (msg == NULL) {
+ CERROR(LPU64": Dropping GET to "LPU64": ENOMEM on lib_msg_t\n",
+ ni->nid, id->nid);
+ return (ret->rc = PTL_NO_SPACE);
+ }
+
state_lock(nal, &flags);
+
md = ptl_handle2md(&args->md_in, nal);
if (md == NULL || !md->threshold) {
+ lib_msg_free(nal, msg);
state_unlock(nal, &flags);
- return ret->rc = PTL_INV_MD;
- }
- LASSERT (md->offset == 0);
+ return ret->rc = PTL_MD_INVALID;
+ }
CDEBUG(D_NET, "PtlGet -> %Lu: %lu\n", (unsigned long long)id->nid,
(unsigned long)id->pid);
hdr.src_nid = HTON__u64 (ni->nid);
hdr.dest_pid = HTON__u32 (id->pid);
hdr.src_pid = HTON__u32 (ni->pid);
- PTL_HDR_LENGTH(&hdr) = 0;
+ hdr.payload_length = 0;
/* NB handles only looked up by creator (no flips) */
hdr.msg.get.return_wmd.wh_interface_cookie = ni->ni_interface_cookie;
hdr.msg.get.src_offset = HTON__u32 (args->offset_in);
hdr.msg.get.sink_length = HTON__u32 (md->length);
- ni->counters.send_count++;
+ lib_commit_md(nal, md, msg);
- msg = get_new_msg (nal, md);
- if (msg == NULL) {
- CERROR("do_PtlGet: BAD - could not allocate cookie!\n");
- state_unlock(nal, &flags);
- return ret->rc = PTL_NOSPACE;
- }
+ msg->ev.type = PTL_EVENT_SEND_END;
+ msg->ev.initiator.nid = ni->nid;
+ msg->ev.initiator.pid = ni->pid;
+ msg->ev.portal = args->portal_in;
+ msg->ev.match_bits = args->match_bits_in;
+ msg->ev.rlength = md->length;
+ msg->ev.mlength = md->length;
+ msg->ev.offset = args->offset_in;
+ msg->ev.hdr_data = 0;
- /*
- * If this memory descriptor has an event queue associated with
- * it we must allocate a message state object that will record
- * the information to be filled in once the message has been
- * completed. More information is in the do_PtlPut() comments.
- *
- * NB. We're now committed to the GET, since we just marked the MD
- * busy. Callers who observe this (by getting PTL_MD_INUSE from
- * PtlMDUnlink()) expect a completion event to tell them when the
- * MD becomes idle.
- */
- if (md->eq) {
- msg->ev.type = PTL_EVENT_SENT;
- msg->ev.initiator.nid = ni->nid;
- msg->ev.initiator.pid = ni->pid;
- msg->ev.portal = args->portal_in;
- msg->ev.match_bits = args->match_bits_in;
- msg->ev.rlength = md->length;
- msg->ev.mlength = md->length;
- msg->ev.offset = args->offset_in;
- msg->ev.hdr_data = 0;
+ lib_md_deconstruct(nal, md, &msg->ev.mem_desc);
- lib_md_deconstruct(nal, md, &msg->ev.mem_desc);
- }
+ ni->counters.send_count++;
state_unlock(nal, &flags);
- lib_send (nal, private, msg, &hdr, PTL_MSG_GET,
- id->nid, id->pid, NULL, 0, 0);
-
+ rc = lib_send (nal, private, msg, &hdr, PTL_MSG_GET,
+ id->nid, id->pid, NULL, 0, 0);
+ if (rc != PTL_OK) {
+ CERROR(LPU64": error sending GET to "LPU64": %d\n",
+ ni->nid, id->nid, rc);
+ lib_finalize (nal, private, msg, rc);
+ }
+
+ /* completion will be signalled by an event */
return ret->rc = PTL_OK;
}
void lib_assert_wire_constants (void)
{
- /* Wire protocol assertions generated by 'wirecheck' */
+ /* Wire protocol assertions generated by 'wirecheck'
+ * running on Linux robert.bartonsoftware.com 2.4.20-18.9 #1 Thu May 29 06:54:41 EDT 2003 i68
+ * with gcc version 3.2.2 20030222 (Red Hat Linux 3.2.2-5) */
+
/* Constants... */
LASSERT (PORTALS_PROTO_MAGIC == 0xeebc0ded);
LASSERT (PORTALS_PROTO_VERSION_MAJOR == 0);
- LASSERT (PORTALS_PROTO_VERSION_MINOR == 1);
+ LASSERT (PORTALS_PROTO_VERSION_MINOR == 3);
LASSERT (PTL_MSG_ACK == 0);
LASSERT (PTL_MSG_PUT == 1);
LASSERT (PTL_MSG_GET == 2);
LASSERT (PTL_MSG_HELLO == 4);
/* Checks for struct ptl_handle_wire_t */
- LASSERT (sizeof (ptl_handle_wire_t) == 16);
- LASSERT (offsetof (ptl_handle_wire_t, wh_interface_cookie) == 0);
- LASSERT (sizeof (((ptl_handle_wire_t *)0)->wh_interface_cookie) == 8);
- LASSERT (offsetof (ptl_handle_wire_t, wh_object_cookie) == 8);
- LASSERT (sizeof (((ptl_handle_wire_t *)0)->wh_object_cookie) == 8);
+ LASSERT ((int)sizeof(ptl_handle_wire_t) == 16);
+ LASSERT (offsetof(ptl_handle_wire_t, wh_interface_cookie) == 0);
+ LASSERT ((int)sizeof(((ptl_handle_wire_t *)0)->wh_interface_cookie) == 8);
+ LASSERT (offsetof(ptl_handle_wire_t, wh_object_cookie) == 8);
+ LASSERT ((int)sizeof(((ptl_handle_wire_t *)0)->wh_object_cookie) == 8);
/* Checks for struct ptl_magicversion_t */
- LASSERT (sizeof (ptl_magicversion_t) == 8);
- LASSERT (offsetof (ptl_magicversion_t, magic) == 0);
- LASSERT (sizeof (((ptl_magicversion_t *)0)->magic) == 4);
- LASSERT (offsetof (ptl_magicversion_t, version_major) == 4);
- LASSERT (sizeof (((ptl_magicversion_t *)0)->version_major) == 2);
- LASSERT (offsetof (ptl_magicversion_t, version_minor) == 6);
- LASSERT (sizeof (((ptl_magicversion_t *)0)->version_minor) == 2);
+ LASSERT ((int)sizeof(ptl_magicversion_t) == 8);
+ LASSERT (offsetof(ptl_magicversion_t, magic) == 0);
+ LASSERT ((int)sizeof(((ptl_magicversion_t *)0)->magic) == 4);
+ LASSERT (offsetof(ptl_magicversion_t, version_major) == 4);
+ LASSERT ((int)sizeof(((ptl_magicversion_t *)0)->version_major) == 2);
+ LASSERT (offsetof(ptl_magicversion_t, version_minor) == 6);
+ LASSERT ((int)sizeof(((ptl_magicversion_t *)0)->version_minor) == 2);
/* Checks for struct ptl_hdr_t */
- LASSERT (sizeof (ptl_hdr_t) == 72);
- LASSERT (offsetof (ptl_hdr_t, dest_nid) == 0);
- LASSERT (sizeof (((ptl_hdr_t *)0)->dest_nid) == 8);
- LASSERT (offsetof (ptl_hdr_t, src_nid) == 8);
- LASSERT (sizeof (((ptl_hdr_t *)0)->src_nid) == 8);
- LASSERT (offsetof (ptl_hdr_t, dest_pid) == 16);
- LASSERT (sizeof (((ptl_hdr_t *)0)->dest_pid) == 4);
- LASSERT (offsetof (ptl_hdr_t, src_pid) == 20);
- LASSERT (sizeof (((ptl_hdr_t *)0)->src_pid) == 4);
- LASSERT (offsetof (ptl_hdr_t, type) == 24);
- LASSERT (sizeof (((ptl_hdr_t *)0)->type) == 4);
+ LASSERT ((int)sizeof(ptl_hdr_t) == 72);
+ LASSERT (offsetof(ptl_hdr_t, dest_nid) == 0);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->dest_nid) == 8);
+ LASSERT (offsetof(ptl_hdr_t, src_nid) == 8);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->src_nid) == 8);
+ LASSERT (offsetof(ptl_hdr_t, dest_pid) == 16);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->dest_pid) == 4);
+ LASSERT (offsetof(ptl_hdr_t, src_pid) == 20);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->src_pid) == 4);
+ LASSERT (offsetof(ptl_hdr_t, type) == 24);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->type) == 4);
+ LASSERT (offsetof(ptl_hdr_t, payload_length) == 28);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->payload_length) == 4);
+ LASSERT (offsetof(ptl_hdr_t, msg) == 32);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg) == 40);
/* Ack */
- LASSERT (offsetof (ptl_hdr_t, msg.ack.mlength) == 28);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.ack.mlength) == 4);
- LASSERT (offsetof (ptl_hdr_t, msg.ack.dst_wmd) == 32);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.ack.dst_wmd) == 16);
- LASSERT (offsetof (ptl_hdr_t, msg.ack.match_bits) == 48);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.ack.match_bits) == 8);
- LASSERT (offsetof (ptl_hdr_t, msg.ack.length) == 56);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.ack.length) == 4);
+ LASSERT (offsetof(ptl_hdr_t, msg.ack.dst_wmd) == 32);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.ack.dst_wmd) == 16);
+ LASSERT (offsetof(ptl_hdr_t, msg.ack.match_bits) == 48);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.ack.match_bits) == 8);
+ LASSERT (offsetof(ptl_hdr_t, msg.ack.mlength) == 56);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.ack.mlength) == 4);
/* Put */
- LASSERT (offsetof (ptl_hdr_t, msg.put.ptl_index) == 28);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.ptl_index) == 4);
- LASSERT (offsetof (ptl_hdr_t, msg.put.ack_wmd) == 32);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.ack_wmd) == 16);
- LASSERT (offsetof (ptl_hdr_t, msg.put.match_bits) == 48);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.match_bits) == 8);
- LASSERT (offsetof (ptl_hdr_t, msg.put.length) == 56);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.length) == 4);
- LASSERT (offsetof (ptl_hdr_t, msg.put.offset) == 60);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.offset) == 4);
- LASSERT (offsetof (ptl_hdr_t, msg.put.hdr_data) == 64);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.hdr_data) == 8);
+ LASSERT (offsetof(ptl_hdr_t, msg.put.ack_wmd) == 32);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.ack_wmd) == 16);
+ LASSERT (offsetof(ptl_hdr_t, msg.put.match_bits) == 48);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.match_bits) == 8);
+ LASSERT (offsetof(ptl_hdr_t, msg.put.hdr_data) == 56);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.hdr_data) == 8);
+ LASSERT (offsetof(ptl_hdr_t, msg.put.ptl_index) == 64);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.ptl_index) == 4);
+ LASSERT (offsetof(ptl_hdr_t, msg.put.offset) == 68);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.offset) == 4);
/* Get */
- LASSERT (offsetof (ptl_hdr_t, msg.get.ptl_index) == 28);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.ptl_index) == 4);
- LASSERT (offsetof (ptl_hdr_t, msg.get.return_wmd) == 32);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.return_wmd) == 16);
- LASSERT (offsetof (ptl_hdr_t, msg.get.match_bits) == 48);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.match_bits) == 8);
- LASSERT (offsetof (ptl_hdr_t, msg.get.length) == 56);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.length) == 4);
- LASSERT (offsetof (ptl_hdr_t, msg.get.src_offset) == 60);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.src_offset) == 4);
- LASSERT (offsetof (ptl_hdr_t, msg.get.return_offset) == 64);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.return_offset) == 4);
- LASSERT (offsetof (ptl_hdr_t, msg.get.sink_length) == 68);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.sink_length) == 4);
+ LASSERT (offsetof(ptl_hdr_t, msg.get.return_wmd) == 32);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.return_wmd) == 16);
+ LASSERT (offsetof(ptl_hdr_t, msg.get.match_bits) == 48);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.match_bits) == 8);
+ LASSERT (offsetof(ptl_hdr_t, msg.get.ptl_index) == 56);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.ptl_index) == 4);
+ LASSERT (offsetof(ptl_hdr_t, msg.get.src_offset) == 60);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.src_offset) == 4);
+ LASSERT (offsetof(ptl_hdr_t, msg.get.sink_length) == 64);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.sink_length) == 4);
/* Reply */
- LASSERT (offsetof (ptl_hdr_t, msg.reply.dst_wmd) == 32);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.reply.dst_wmd) == 16);
- LASSERT (offsetof (ptl_hdr_t, msg.reply.dst_offset) == 48);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.reply.dst_offset) == 4);
- LASSERT (offsetof (ptl_hdr_t, msg.reply.length) == 56);
- LASSERT (sizeof (((ptl_hdr_t *)0)->msg.reply.length) == 4);
+ LASSERT (offsetof(ptl_hdr_t, msg.reply.dst_wmd) == 32);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.reply.dst_wmd) == 16);
+
+ /* Hello */
+ LASSERT (offsetof(ptl_hdr_t, msg.hello.incarnation) == 32);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.hello.incarnation) == 8);
+ LASSERT (offsetof(ptl_hdr_t, msg.hello.type) == 40);
+ LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.hello.type) == 4);
}