* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#ifndef __KERNEL__
-# include <stdio.h>
-#else
-# define DEBUG_SUBSYSTEM S_PORTALS
-# include <linux/kp30.h>
-#endif
+#define DEBUG_SUBSYSTEM S_LNET
-#include <portals/lib-p30.h>
+#include <lnet/lib-lnet.h>
-/* must be called with state lock held */
+/* must be called with LNET_LOCK held */
void
-lib_md_unlink(lib_nal_t *nal, lib_md_t *md)
+lnet_md_unlink(lnet_libmd_t *md)
{
- if ((md->md_flags & PTL_MD_FLAG_ZOMBIE) == 0) {
+ if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) {
/* first unlink attempt... */
- lib_me_t *me = md->me;
+ lnet_me_t *me = md->md_me;
- md->md_flags |= PTL_MD_FLAG_ZOMBIE;
+ md->md_flags |= LNET_MD_FLAG_ZOMBIE;
/* Disassociate from ME (if any), and unlink it if it was created
- * with PTL_UNLINK */
+ * with LNET_UNLINK */
if (me != NULL) {
- me->md = NULL;
- if (me->unlink == PTL_UNLINK)
- lib_me_unlink(nal, me);
+ md->md_me = NULL;
+ me->me_md = NULL;
+ if (me->me_unlink == LNET_UNLINK)
+ lnet_me_unlink(me);
}
- /* emsure all future handle lookups fail */
- lib_invalidate_handle(nal, &md->md_lh);
+ /* ensure all future handle lookups fail */
+ lnet_invalidate_handle(&md->md_lh);
}
- if (md->pending != 0) {
+ if (md->md_refcount != 0) {
CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
return;
}
CDEBUG(D_NET, "Unlinking md %p\n", md);
- if ((md->options & PTL_MD_KIOV) != 0) {
- if (nal->libnal_unmap_pages != NULL)
- nal->libnal_unmap_pages (nal,
- md->md_niov,
- md->md_iov.kiov,
- &md->md_addrkey);
- } else if (nal->libnal_unmap != NULL) {
- nal->libnal_unmap (nal,
- md->md_niov, md->md_iov.iov,
- &md->md_addrkey);
- }
-
- if (md->eq != NULL) {
- md->eq->eq_refcount--;
- LASSERT (md->eq->eq_refcount >= 0);
+ if (md->md_eq != NULL) {
+ md->md_eq->eq_refcount--;
+ LASSERT (md->md_eq->eq_refcount >= 0);
}
list_del (&md->md_list);
- lib_md_free(nal, md);
+ lnet_md_free(md);
}
-/* must be called with state lock held */
+/* must be called with LNET_LOCK held */
static int
-lib_md_build(lib_nal_t *nal, lib_md_t *lmd, ptl_md_t *umd, int unlink)
+lib_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
{
- lib_eq_t *eq = NULL;
- int rc;
- int i;
- int niov;
- int total_length = 0;
+ lnet_eq_t *eq = NULL;
+ int i;
+ unsigned int niov;
+ int total_length = 0;
/* NB we are passed an allocated, but uninitialised/active md.
- * if we return success, caller may lib_md_unlink() it.
- * otherwise caller may only lib_md_free() it.
+ * if we return success, caller may lnet_md_unlink() it.
+ * otherwise caller may only lnet_md_free() it.
*/
- if (!PtlHandleIsEqual (umd->eq_handle, PTL_EQ_NONE)) {
- eq = ptl_handle2eq(&umd->eq_handle, nal);
+ if (!LNetHandleIsEqual (umd->eq_handle, LNET_EQ_NONE)) {
+ eq = lnet_handle2eq(&umd->eq_handle);
if (eq == NULL)
- return PTL_EQ_INVALID;
+ return -ENOENT;
}
/* This implementation doesn't know how to create START events or
* disable END events. Best to LASSERT our caller is compliant so
* we find out quickly... */
- LASSERT (eq == NULL ||
- ((umd->options & PTL_MD_EVENT_START_DISABLE) != 0 &&
- (umd->options & PTL_MD_EVENT_END_DISABLE) == 0));
-
- lmd->me = NULL;
- lmd->start = umd->start;
- lmd->offset = 0;
- lmd->max_size = umd->max_size;
- lmd->options = umd->options;
- lmd->user_ptr = umd->user_ptr;
- lmd->eq = eq;
- lmd->threshold = umd->threshold;
- lmd->pending = 0;
- lmd->md_flags = (unlink == PTL_UNLINK) ? PTL_MD_FLAG_AUTO_UNLINK : 0;
-
- if ((umd->options & PTL_MD_IOVEC) != 0) {
-
- if ((umd->options & PTL_MD_KIOV) != 0) /* Can't specify both */
- return PTL_MD_ILLEGAL;
+ /* TODO - reevaluate what should be here in light of
+ * the removal of the start and end events
+ * maybe there we shouldn't even allow LNET_EQ_NONE!)
+ LASSERT (eq == NULL);
+ */
+
+ lmd->md_me = NULL;
+ lmd->md_start = umd->start;
+ lmd->md_offset = 0;
+ lmd->md_max_size = umd->max_size;
+ lmd->md_options = umd->options;
+ lmd->md_user_ptr = umd->user_ptr;
+ lmd->md_eq = eq;
+ lmd->md_threshold = umd->threshold;
+ lmd->md_refcount = 0;
+ lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0;
+
+ if ((umd->options & LNET_MD_IOVEC) != 0) {
+
+ if ((umd->options & LNET_MD_KIOV) != 0) /* Can't specify both */
+ return -EINVAL;
lmd->md_niov = niov = umd->length;
memcpy(lmd->md_iov.iov, umd->start,
for (i = 0; i < niov; i++) {
/* We take the base address on trust */
if (lmd->md_iov.iov[i].iov_len <= 0) /* invalid length */
- return PTL_MD_ILLEGAL;
+ return -EINVAL;
total_length += lmd->md_iov.iov[i].iov_len;
}
- lmd->length = total_length;
+ lmd->md_length = total_length;
- if ((umd->options & PTL_MD_MAX_SIZE) != 0 && /* max size used */
- (umd->max_size < 0 ||
+ if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
+ (umd->max_size < 0 ||
umd->max_size > total_length)) // illegal max_size
- return PTL_MD_ILLEGAL;
+ return -EINVAL;
- if (nal->libnal_map != NULL) {
- rc = nal->libnal_map (nal, niov, lmd->md_iov.iov,
- &lmd->md_addrkey);
- if (rc != PTL_OK)
- return (rc);
- }
- } else if ((umd->options & PTL_MD_KIOV) != 0) {
+ } else if ((umd->options & LNET_MD_KIOV) != 0) {
#ifndef __KERNEL__
- return PTL_MD_ILLEGAL;
-#else
- /* Trap attempt to use paged I/O if unsupported early. */
- if (nal->libnal_send_pages == NULL ||
- nal->libnal_recv_pages == NULL)
- return PTL_MD_INVALID;
-
+ return -EINVAL;
+#else
lmd->md_niov = niov = umd->length;
memcpy(lmd->md_iov.kiov, umd->start,
niov * sizeof (lmd->md_iov.kiov[0]));
for (i = 0; i < niov; i++) {
/* We take the page pointer on trust */
- if (lmd->md_iov.kiov[i].kiov_offset +
- lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE )
- return PTL_VAL_FAILED; /* invalid length */
+ if (lmd->md_iov.kiov[i].kiov_offset +
+ lmd->md_iov.kiov[i].kiov_len > CFS_PAGE_SIZE )
+ return -EINVAL; /* invalid length */
total_length += lmd->md_iov.kiov[i].kiov_len;
}
- lmd->length = total_length;
+ lmd->md_length = total_length;
- if ((umd->options & PTL_MD_MAX_SIZE) != 0 && /* max size used */
- (umd->max_size < 0 ||
+ if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
+ (umd->max_size < 0 ||
umd->max_size > total_length)) // illegal max_size
- return PTL_MD_ILLEGAL;
-
- if (nal->libnal_map_pages != NULL) {
- rc = nal->libnal_map_pages (nal, niov, lmd->md_iov.kiov,
- &lmd->md_addrkey);
- if (rc != PTL_OK)
- return (rc);
- }
+ return -EINVAL;
#endif
} else { /* contiguous */
- lmd->length = umd->length;
+ lmd->md_length = umd->length;
lmd->md_niov = niov = 1;
lmd->md_iov.iov[0].iov_base = umd->start;
lmd->md_iov.iov[0].iov_len = umd->length;
- if ((umd->options & PTL_MD_MAX_SIZE) != 0 && /* max size used */
- (umd->max_size < 0 ||
+ if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
+ (umd->max_size < 0 ||
umd->max_size > umd->length)) // illegal max_size
- return PTL_MD_ILLEGAL;
-
- if (nal->libnal_map != NULL) {
- rc = nal->libnal_map (nal, niov, lmd->md_iov.iov,
- &lmd->md_addrkey);
- if (rc != PTL_OK)
- return (rc);
- }
- }
+ return -EINVAL;
+ }
if (eq != NULL)
eq->eq_refcount++;
/* It's good; let handle2md succeed and add to active mds */
- lib_initialise_handle (nal, &lmd->md_lh, PTL_COOKIE_TYPE_MD);
- list_add (&lmd->md_list, &nal->libnal_ni.ni_active_mds);
+ lnet_initialise_handle (&lmd->md_lh, LNET_COOKIE_TYPE_MD);
+ list_add (&lmd->md_list, &the_lnet.ln_active_mds);
- return PTL_OK;
+ return 0;
}
-/* must be called with state lock held */
+/* must be called with LNET_LOCK held */
void
-lib_md_deconstruct(lib_nal_t *nal, lib_md_t *lmd, ptl_md_t *umd)
+lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd)
{
/* NB this doesn't copy out all the iov entries so when a
* discontiguous MD is copied out, the target gets to know the
* original iov pointer (in start) and the number of entries it had
* and that's all.
*/
- umd->start = lmd->start;
- umd->length = ((lmd->options & (PTL_MD_IOVEC | PTL_MD_KIOV)) == 0) ?
- lmd->length : lmd->md_niov;
- umd->threshold = lmd->threshold;
- umd->max_size = lmd->max_size;
- umd->options = lmd->options;
- umd->user_ptr = lmd->user_ptr;
- ptl_eq2handle(&umd->eq_handle, nal, lmd->eq);
+ umd->start = lmd->md_start;
+ umd->length = ((lmd->md_options & (LNET_MD_IOVEC | LNET_MD_KIOV)) == 0) ?
+ lmd->md_length : lmd->md_niov;
+ umd->threshold = lmd->md_threshold;
+ umd->max_size = lmd->md_max_size;
+ umd->options = lmd->md_options;
+ umd->user_ptr = lmd->md_user_ptr;
+ lnet_eq2handle(&umd->eq_handle, lmd->md_eq);
}
-int
-lib_api_md_attach(nal_t *apinal, ptl_handle_me_t *meh,
- ptl_md_t *umd, ptl_unlink_t unlink,
- ptl_handle_md_t *handle)
+int
+LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
+ lnet_unlink_t unlink, lnet_handle_md_t *handle)
{
- lib_nal_t *nal = apinal->nal_data;
- lib_me_t *me;
- lib_md_t *md;
- unsigned long flags;
- int rc;
-
- if ((umd->options & (PTL_MD_KIOV | PTL_MD_IOVEC)) != 0 &&
- umd->length > PTL_MD_MAX_IOV) /* too many fragments */
- return PTL_IOV_INVALID;
-
- md = lib_md_alloc(nal, umd);
+ lnet_me_t *me;
+ lnet_libmd_t *md;
+ int rc;
+
+ LASSERT (the_lnet.ln_init);
+ LASSERT (the_lnet.ln_refcount > 0);
+
+ if ((umd.options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 &&
+ umd.length > LNET_MAX_IOV) /* too many fragments */
+ return -EINVAL;
+
+ md = lnet_md_alloc(&umd);
if (md == NULL)
- return PTL_NO_SPACE;
+ return -ENOMEM;
- LIB_LOCK(nal, flags);
+ LNET_LOCK();
- me = ptl_handle2me(meh, nal);
+ me = lnet_handle2me(&meh);
if (me == NULL) {
- rc = PTL_ME_INVALID;
- } else if (me->md != NULL) {
- rc = PTL_ME_IN_USE;
+ rc = -ENOENT;
+ } else if (me->me_md != NULL) {
+ rc = -EBUSY;
} else {
- rc = lib_md_build(nal, md, umd, unlink);
- if (rc == PTL_OK) {
- me->md = md;
- md->me = me;
+ rc = lib_md_build(md, &umd, unlink);
+ if (rc == 0) {
+ me->me_md = md;
+ md->md_me = me;
- ptl_md2handle(handle, nal, md);
+ lnet_md2handle(handle, md);
- LIB_UNLOCK(nal, flags);
- return (PTL_OK);
+ /* check if this MD matches any blocked msgs */
+ lnet_match_blocked_msg(md); /* expects LNET_LOCK held */
+
+ LNET_UNLOCK();
+ return (0);
}
}
- lib_md_free (nal, md);
+ lnet_md_free (md);
- LIB_UNLOCK(nal, flags);
+ LNET_UNLOCK();
return (rc);
}
int
-lib_api_md_bind(nal_t *apinal,
- ptl_md_t *umd, ptl_unlink_t unlink,
- ptl_handle_md_t *handle)
+LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle)
{
- lib_nal_t *nal = apinal->nal_data;
- lib_md_t *md;
- unsigned long flags;
- int rc;
+ lnet_libmd_t *md;
+ int rc;
- if ((umd->options & (PTL_MD_KIOV | PTL_MD_IOVEC)) != 0 &&
- umd->length > PTL_MD_MAX_IOV) /* too many fragments */
- return PTL_IOV_INVALID;
+ LASSERT (the_lnet.ln_init);
+ LASSERT (the_lnet.ln_refcount > 0);
+
+ if ((umd.options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 &&
+ umd.length > LNET_MAX_IOV) /* too many fragments */
+ return -EINVAL;
- md = lib_md_alloc(nal, umd);
+ md = lnet_md_alloc(&umd);
if (md == NULL)
- return PTL_NO_SPACE;
+ return -ENOMEM;
- LIB_LOCK(nal, flags);
+ LNET_LOCK();
- rc = lib_md_build(nal, md, umd, unlink);
+ rc = lib_md_build(md, &umd, unlink);
- if (rc == PTL_OK) {
- ptl_md2handle(handle, nal, md);
+ if (rc == 0) {
+ lnet_md2handle(handle, md);
- LIB_UNLOCK(nal, flags);
- return (PTL_OK);
+ LNET_UNLOCK();
+ return (0);
}
- lib_md_free (nal, md);
+ lnet_md_free (md);
- LIB_UNLOCK(nal, flags);
+ LNET_UNLOCK();
return (rc);
}
int
-lib_api_md_unlink (nal_t *apinal, ptl_handle_md_t *mdh)
+LNetMDUnlink (lnet_handle_md_t mdh)
{
- lib_nal_t *nal = apinal->nal_data;
- ptl_event_t ev;
- lib_md_t *md;
- unsigned long flags;
+ lnet_event_t ev;
+ lnet_libmd_t *md;
+
+ LASSERT (the_lnet.ln_init);
+ LASSERT (the_lnet.ln_refcount > 0);
- LIB_LOCK(nal, flags);
+ LNET_LOCK();
- md = ptl_handle2md(mdh, nal);
+ md = lnet_handle2md(&mdh);
if (md == NULL) {
- LIB_UNLOCK(nal, flags);
- return PTL_MD_INVALID;
+ LNET_UNLOCK();
+ return -ENOENT;
}
- /* If the MD is busy, lib_md_unlink just marks it for deletion, and
+ /* If the MD is busy, lnet_md_unlink just marks it for deletion, and
* when the NAL is done, the completion event flags that the MD was
* unlinked. Otherwise, we enqueue an event now... */
- if (md->eq != NULL &&
- md->pending == 0) {
- memset(&ev, 0, sizeof(ev));
-
- ev.type = PTL_EVENT_UNLINK;
- ev.ni_fail_type = PTL_OK;
- ev.unlinked = 1;
- lib_md_deconstruct(nal, md, &ev.md);
- ptl_md2handle(&ev.md_handle, nal, md);
-
- lib_enq_event_locked(nal, NULL, md->eq, &ev);
+ if (md->md_eq != NULL &&
+ md->md_refcount == 0) {
+ lnet_build_unlink_event(md, &ev);
+ lnet_enq_event_locked(md->md_eq, &ev);
}
- lib_md_unlink(nal, md);
+ lnet_md_unlink(md);
- LIB_UNLOCK(nal, flags);
- return PTL_OK;
+ LNET_UNLOCK();
+ return 0;
}
-int
-lib_api_md_update (nal_t *apinal,
- ptl_handle_md_t *mdh,
- ptl_md_t *oldumd, ptl_md_t *newumd,
- ptl_handle_eq_t *testqh)
-{
- lib_nal_t *nal = apinal->nal_data;
- lib_md_t *md;
- lib_eq_t *test_eq = NULL;
- unsigned long flags;
- int rc;
-
- LIB_LOCK(nal, flags);
-
- md = ptl_handle2md(mdh, nal);
- if (md == NULL) {
- rc = PTL_MD_INVALID;
- goto out;
- }
-
- if (oldumd != NULL)
- lib_md_deconstruct(nal, md, oldumd);
-
- if (newumd == NULL) {
- rc = PTL_OK;
- goto out;
- }
-
- /* XXX fttb, the new MD must be the same "shape" wrt fragmentation,
- * since we simply overwrite the old lib-md */
- if ((((newumd->options ^ md->options) &
- (PTL_MD_IOVEC | PTL_MD_KIOV)) != 0) ||
- ((newumd->options & (PTL_MD_IOVEC | PTL_MD_KIOV)) != 0 &&
- newumd->length != md->md_niov)) {
- rc = PTL_IOV_INVALID;
- goto out;
- }
-
- if (!PtlHandleIsEqual (*testqh, PTL_EQ_NONE)) {
- test_eq = ptl_handle2eq(testqh, nal);
- if (test_eq == NULL) {
- rc = PTL_EQ_INVALID;
- goto out;
- }
- }
-
- if (md->pending != 0) {
- rc = PTL_MD_NO_UPDATE;
- goto out;
- }
-
- if (test_eq == NULL ||
- test_eq->eq_deq_seq == test_eq->eq_enq_seq) {
- lib_me_t *me = md->me;
- int unlink = (md->md_flags & PTL_MD_FLAG_AUTO_UNLINK) ?
- PTL_UNLINK : PTL_RETAIN;
-
- // #warning this does not track eq refcounts properly
- rc = lib_md_build(nal, md, newumd, unlink);
-
- md->me = me;
- } else {
- rc = PTL_MD_NO_UPDATE;
- }
-
- out:
- LIB_UNLOCK(nal, flags);
-
- return rc;
-}