* GPL HEADER END
*/
/*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <lnet/lib-lnet.h>
+/* NB: add /proc interfaces in upcoming patches */
+int portal_rotor = LNET_PTL_ROTOR_HASH_RT;
+CFS_MODULE_PARM(portal_rotor, "i", int, 0644,
+ "redirect PUTs to different cpu-partitions");
+
static int
lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id,
__u64 mbits, __u64 ignore_bits)
goto match;
/* unset, new portal */
- lnet_res_lock();
+ lnet_ptl_lock(ptl);
/* check again with lock */
if (unlikely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl))) {
- lnet_res_unlock();
+ lnet_ptl_unlock(ptl);
goto match;
}
else
lnet_ptl_setopt(ptl, LNET_PTL_MATCH_WILDCARD);
- lnet_res_unlock();
+ lnet_ptl_unlock(ptl);
return 1;
return 1;
}
+static void
+lnet_ptl_enable_mt(struct lnet_portal *ptl, int cpt)
+{
+ struct lnet_match_table *mtable = ptl->ptl_mtables[cpt];
+ int i;
+
+ /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */
+ LASSERT(lnet_ptl_is_wildcard(ptl));
+
+ mtable->mt_enabled = 1;
+
+ ptl->ptl_mt_maps[ptl->ptl_mt_nmaps] = cpt;
+ for (i = ptl->ptl_mt_nmaps - 1; i >= 0; i--) {
+ LASSERT(ptl->ptl_mt_maps[i] != cpt);
+ if (ptl->ptl_mt_maps[i] < cpt)
+ break;
+
+ /* swap to order */
+ ptl->ptl_mt_maps[i + 1] = ptl->ptl_mt_maps[i];
+ ptl->ptl_mt_maps[i] = cpt;
+ }
+
+ ptl->ptl_mt_nmaps++;
+}
+
+static void
+lnet_ptl_disable_mt(struct lnet_portal *ptl, int cpt)
+{
+ struct lnet_match_table *mtable = ptl->ptl_mtables[cpt];
+ int i;
+
+ /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */
+ LASSERT(lnet_ptl_is_wildcard(ptl));
+
+ if (LNET_CPT_NUMBER == 1)
+ return; /* never disable the only match-table */
+
+ mtable->mt_enabled = 0;
+
+ LASSERT(ptl->ptl_mt_nmaps > 0 &&
+ ptl->ptl_mt_nmaps <= LNET_CPT_NUMBER);
+
+ /* remove it from mt_maps */
+ ptl->ptl_mt_nmaps--;
+ for (i = 0; i < ptl->ptl_mt_nmaps; i++) {
+ if (ptl->ptl_mt_maps[i] >= cpt) /* overwrite it */
+ ptl->ptl_mt_maps[i] = ptl->ptl_mt_maps[i + 1];
+ }
+}
+
static int
lnet_try_match_md(lnet_libmd_t *md,
struct lnet_match_info *info, struct lnet_msg *msg)
unsigned int mlength;
lnet_me_t *me = md->md_me;
- /* mismatched MD op */
- if ((md->md_options & info->mi_opc) == 0)
- return LNET_MATCHMD_NONE;
-
/* MD exhausted */
if (lnet_md_exhausted(md))
+ return LNET_MATCHMD_NONE | LNET_MATCHMD_EXHAUSTED;
+
+ /* mismatched MD op */
+ if ((md->md_options & info->mi_opc) == 0)
return LNET_MATCHMD_NONE;
/* mismatched ME nid/pid? */
mlength = md->md_length - offset;
}
- if (info->mi_rlength <= mlength) { /* fits in allowed space */
+ if (info->mi_rlength <= mlength) { /* fits in allowed space */
mlength = info->mi_rlength;
} else if ((md->md_options & LNET_MD_TRUNCATE) == 0) {
/* this packet _really_ is too big */
lnet_msg_attach_md(msg, md, offset, mlength);
md->md_offset = offset + mlength;
+ if (!lnet_md_exhausted(md))
+ return LNET_MATCHMD_OK;
+
/* Auto-unlink NOW, so the ME gets unlinked if required.
* We bumped md->md_refcount above so the MD just gets flagged
* for unlink when it is finalized. */
- if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 &&
- lnet_md_exhausted(md)) {
+ if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0)
lnet_md_unlink(md);
- }
- return LNET_MATCHMD_OK;
+ return LNET_MATCHMD_OK | LNET_MATCHMD_EXHAUSTED;
+}
+
+static struct lnet_match_table *
+lnet_match2mt(struct lnet_portal *ptl, lnet_process_id_t id, __u64 mbits)
+{
+ if (LNET_CPT_NUMBER == 1)
+ return ptl->ptl_mtables[0]; /* the only one */
+
+ /* if it's a unique portal, return match-table hashed by NID */
+ return lnet_ptl_is_unique(ptl) ?
+ ptl->ptl_mtables[lnet_cpt_of_nid(id.nid)] : NULL;
}
struct lnet_match_table *
lnet_mt_of_attach(unsigned int index, lnet_process_id_t id,
__u64 mbits, __u64 ignore_bits, lnet_ins_pos_t pos)
{
- struct lnet_portal *ptl;
+ struct lnet_portal *ptl;
+ struct lnet_match_table *mtable;
+ /* NB: called w/o lock */
LASSERT(index < the_lnet.ln_nportals);
if (!lnet_ptl_match_type(index, id, mbits, ignore_bits))
return NULL;
ptl = the_lnet.ln_portals[index];
- /* NB: Now we only have one match-table for each portal,
- * and will have match-table per CPT in upcoming changes,
- * ME will be scattered to different match-tables based
- * on attaching information */
- return ptl->ptl_mtable;
+
+ mtable = lnet_match2mt(ptl, id, mbits);
+ if (mtable != NULL) /* unique portal or only one match-table */
+ return mtable;
+
+ /* it's a wildcard portal */
+ switch (pos) {
+ default:
+ return NULL;
+ case LNET_INS_BEFORE:
+ case LNET_INS_AFTER:
+ /* posted by no affinity thread, always hash to specific
+ * match-table to avoid buffer stealing which is heavy */
+ return ptl->ptl_mtables[ptl->ptl_index % LNET_CPT_NUMBER];
+ case LNET_INS_LOCAL:
+ /* posted by cpu-affinity thread */
+ return ptl->ptl_mtables[lnet_cpt_current()];
+ }
}
-struct lnet_match_table *
-lnet_mt_of_match(unsigned int index, lnet_process_id_t id, __u64 mbits)
+static struct lnet_match_table *
+lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg)
{
- struct lnet_portal *ptl;
+ struct lnet_match_table *mtable;
+ struct lnet_portal *ptl;
+ int nmaps;
+ int rotor;
+ int routed;
+ int cpt;
- LASSERT(index < the_lnet.ln_nportals);
+ /* NB: called w/o lock */
+ LASSERT(info->mi_portal < the_lnet.ln_nportals);
+ ptl = the_lnet.ln_portals[info->mi_portal];
- ptl = the_lnet.ln_portals[index];
- if (!lnet_ptl_is_unique(ptl) &&
- !lnet_ptl_is_wildcard(ptl) && !lnet_ptl_is_lazy(ptl))
- return NULL;
+ LASSERT(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl));
+
+ mtable = lnet_match2mt(ptl, info->mi_id, info->mi_mbits);
+ if (mtable != NULL)
+ return mtable;
+
+ /* it's a wildcard portal */
+ routed = LNET_NIDNET(msg->msg_hdr.src_nid) !=
+ LNET_NIDNET(msg->msg_hdr.dest_nid);
+
+ if (portal_rotor == LNET_PTL_ROTOR_OFF ||
+ (portal_rotor != LNET_PTL_ROTOR_ON && !routed)) {
+ cpt = lnet_cpt_current();
+ if (ptl->ptl_mtables[cpt]->mt_enabled)
+ return ptl->ptl_mtables[cpt];
+ }
+
+ rotor = ptl->ptl_rotor++; /* get round-robin factor */
+ if (portal_rotor == LNET_PTL_ROTOR_HASH_RT && routed)
+ cpt = lnet_cpt_of_nid(msg->msg_hdr.src_nid);
+ else
+ cpt = rotor % LNET_CPT_NUMBER;
+
+ if (!ptl->ptl_mtables[cpt]->mt_enabled) {
+ /* is there any active entry for this portal? */
+ nmaps = ptl->ptl_mt_nmaps;
+ /* map to an active mtable to avoid heavy "stealing" */
+ if (nmaps != 0) {
+ /* NB: there is possibility that ptl_mt_maps is being
+ * changed because we are not under protection of
+ * lnet_ptl_lock, but it shouldn't hurt anything */
+ cpt = ptl->ptl_mt_maps[rotor % nmaps];
+ }
+ }
+
+ return ptl->ptl_mtables[cpt];
+}
+
+static int
+lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos)
+{
+ __u64 *bmap;
+ int i;
+
+ if (!lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]))
+ return 0;
+
+ if (pos < 0) { /* check all bits */
+ for (i = 0; i < LNET_MT_EXHAUSTED_BMAP; i++) {
+ if (mtable->mt_exhausted[i] != (__u64)(-1))
+ return 0;
+ }
+ return 1;
+ }
+
+ LASSERT(pos <= LNET_MT_HASH_IGNORE);
+ /* mtable::mt_mhash[pos] is marked as exhausted or not */
+ bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
+ pos &= (1 << LNET_MT_BITS_U64) - 1;
- /* NB: Now we only have one match-table for each portal,
- * and will have match-table per CPT in upcoming changes,
- * request will be scattered to different match-tables based
- * on matching information */
- return ptl->ptl_mtable;
+ return ((*bmap) & (1ULL << pos)) != 0;
+}
+
+static void
+lnet_mt_set_exhausted(struct lnet_match_table *mtable, int pos, int exhausted)
+{
+ __u64 *bmap;
+
+ LASSERT(lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]));
+ LASSERT(pos <= LNET_MT_HASH_IGNORE);
+
+ /* set mtable::mt_mhash[pos] as exhausted/non-exhausted */
+ bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
+ pos &= (1 << LNET_MT_BITS_U64) - 1;
+
+ if (!exhausted)
+ *bmap &= ~(1ULL << pos);
+ else
+ *bmap |= 1ULL << pos;
}
cfs_list_t *
struct lnet_portal *ptl = the_lnet.ln_portals[mtable->mt_portal];
if (lnet_ptl_is_wildcard(ptl)) {
- return &mtable->mt_mlist;
-
- } else if (lnet_ptl_is_unique(ptl)) {
+ return &mtable->mt_mhash[mbits & LNET_MT_HASH_MASK];
+ } else {
unsigned long hash = mbits + id.nid + id.pid;
+ LASSERT(lnet_ptl_is_unique(ptl));
hash = cfs_hash_long(hash, LNET_MT_HASH_BITS);
return &mtable->mt_mhash[hash];
}
-
- return NULL;
}
int
cfs_list_t *head;
lnet_me_t *me;
lnet_me_t *tmp;
+ int exhausted = 0;
int rc;
- head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits);
- if (head == NULL) /* nobody posted anything on this portal */
- goto out;
+ /* any ME with ignore bits? */
+ if (!cfs_list_empty(&mtable->mt_mhash[LNET_MT_HASH_IGNORE]))
+ head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE];
+ else
+ head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits);
+ again:
+ /* NB: only wildcard portal needs to return LNET_MATCHMD_EXHAUSTED */
+ if (lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]))
+ exhausted = LNET_MATCHMD_EXHAUSTED;
cfs_list_for_each_entry_safe(me, tmp, head, me_list) {
/* ME attached but MD not attached yet */
LASSERT(me == me->me_md->md_me);
rc = lnet_try_match_md(me->me_md, info, msg);
- switch (rc) {
- default:
- LBUG();
+ if ((rc & LNET_MATCHMD_EXHAUSTED) == 0)
+ exhausted = 0; /* mlist is not empty */
- case LNET_MATCHMD_NONE:
- continue;
+ if ((rc & LNET_MATCHMD_FINISH) != 0) {
+ /* don't return EXHAUSTED bit because we don't know
+ * whether the mlist is empty or not */
+ return rc & ~LNET_MATCHMD_EXHAUSTED;
+ }
+ }
- case LNET_MATCHMD_OK:
- return LNET_MATCHMD_OK;
+ if (exhausted == LNET_MATCHMD_EXHAUSTED) { /* @head is exhausted */
+ lnet_mt_set_exhausted(mtable, head - mtable->mt_mhash, 1);
+ if (!lnet_mt_test_exhausted(mtable, -1))
+ exhausted = 0;
+ }
- case LNET_MATCHMD_DROP:
- return LNET_MATCHMD_DROP;
- }
- /* not reached */
+ if (exhausted == 0 && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) {
+ head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits);
+ goto again; /* re-check MEs w/o ignore-bits */
}
- out:
if (info->mi_opc == LNET_MD_OP_GET ||
!lnet_ptl_is_lazy(the_lnet.ln_portals[info->mi_portal]))
- return LNET_MATCHMD_DROP;
+ return LNET_MATCHMD_DROP | exhausted;
+
+ return LNET_MATCHMD_NONE | exhausted;
+}
+
+static int
+lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg)
+{
+ int rc;
+
+ /* message arrived before any buffer posting on this portal,
+ * simply delay or drop this message */
+ if (likely(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)))
+ return 0;
+
+ lnet_ptl_lock(ptl);
+ /* check it again with hold of lock */
+ if (lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)) {
+ lnet_ptl_unlock(ptl);
+ return 0;
+ }
+
+ if (lnet_ptl_is_lazy(ptl)) {
+ if (msg->msg_rx_ready_delay) {
+ msg->msg_rx_delayed = 1;
+ cfs_list_add_tail(&msg->msg_list,
+ &ptl->ptl_msg_delayed);
+ }
+ rc = LNET_MATCHMD_NONE;
+ } else {
+ rc = LNET_MATCHMD_DROP;
+ }
+
+ lnet_ptl_unlock(ptl);
+ return rc;
+}
+
+static int
+lnet_ptl_match_delay(struct lnet_portal *ptl,
+ struct lnet_match_info *info, struct lnet_msg *msg)
+{
+ int first = ptl->ptl_mt_maps[0]; /* read w/o lock */
+ int rc = 0;
+ int i;
+
+ /* steal buffer from other CPTs, and delay it if nothing to steal,
+ * this function is more expensive than a regular match, but we
+ * don't expect it can happen a lot */
+ LASSERT(lnet_ptl_is_wildcard(ptl));
+
+ for (i = 0; i < LNET_CPT_NUMBER; i++) {
+ struct lnet_match_table *mtable;
+ int cpt;
+
+ cpt = (first + i) % LNET_CPT_NUMBER;
+ mtable = ptl->ptl_mtables[cpt];
+ if (i != 0 && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled)
+ continue;
+
+ lnet_res_lock(cpt);
+ lnet_ptl_lock(ptl);
+
+ if (i == 0) { /* the first try, attach on stealing list */
+ cfs_list_add_tail(&msg->msg_list,
+ &ptl->ptl_msg_stealing);
+ }
+
+ if (!cfs_list_empty(&msg->msg_list)) { /* on stealing list */
+ rc = lnet_mt_match_md(mtable, info, msg);
+
+ if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 &&
+ mtable->mt_enabled)
+ lnet_ptl_disable_mt(ptl, cpt);
+
+ if ((rc & LNET_MATCHMD_FINISH) != 0)
+ cfs_list_del_init(&msg->msg_list);
+
+ } else {
+ /* could be matched by lnet_ptl_attach_md()
+ * which is called by another thread */
+ rc = msg->msg_md == NULL ?
+ LNET_MATCHMD_DROP : LNET_MATCHMD_OK;
+ }
+
+ if (!cfs_list_empty(&msg->msg_list) && /* not matched yet */
+ (i == LNET_CPT_NUMBER - 1 || /* the last CPT */
+ ptl->ptl_mt_nmaps == 0 || /* no active CPT */
+ (ptl->ptl_mt_nmaps == 1 && /* the only active CPT */
+ ptl->ptl_mt_maps[0] == cpt))) {
+ /* nothing to steal, delay or drop */
+ cfs_list_del_init(&msg->msg_list);
+
+ if (lnet_ptl_is_lazy(ptl)) {
+ msg->msg_rx_delayed = 1;
+ cfs_list_add_tail(&msg->msg_list,
+ &ptl->ptl_msg_delayed);
+ rc = LNET_MATCHMD_NONE;
+ } else {
+ rc = LNET_MATCHMD_DROP;
+ }
+ }
+
+ lnet_ptl_unlock(ptl);
+ lnet_res_unlock(cpt);
+
+ if ((rc & LNET_MATCHMD_FINISH) != 0 || msg->msg_rx_delayed)
+ break;
+ }
- return LNET_MATCHMD_NONE;
+ return rc;
}
int
return LNET_MATCHMD_DROP;
}
- mtable = lnet_mt_of_match(info->mi_portal,
- info->mi_id, info->mi_mbits);
- if (mtable == NULL) {
- CDEBUG(D_NET, "Drop early message from %s of length %d into "
- "portal %d MB="LPX64"\n",
- libcfs_id2str(info->mi_id), info->mi_rlength,
- info->mi_portal, info->mi_mbits);
- return LNET_MATCHMD_DROP;
- }
-
ptl = the_lnet.ln_portals[info->mi_portal];
- lnet_res_lock();
+ rc = lnet_ptl_match_early(ptl, msg);
+ if (rc != 0) /* matched or delayed early message */
+ return rc;
+
+ mtable = lnet_mt_of_match(info, msg);
+ lnet_res_lock(mtable->mt_cpt);
if (the_lnet.ln_shutdown) {
- rc = LNET_MATCHMD_DROP;
- goto out;
+ rc = LNET_MATCHMD_DROP;
+ goto out1;
}
rc = lnet_mt_match_md(mtable, info, msg);
- if (rc != LNET_MATCHMD_NONE) /* matched or dropping */
- goto out;
+ if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 && mtable->mt_enabled) {
+ lnet_ptl_lock(ptl);
+ lnet_ptl_disable_mt(ptl, mtable->mt_cpt);
+ lnet_ptl_unlock(ptl);
+ }
+
+ if ((rc & LNET_MATCHMD_FINISH) != 0) /* matched or dropping */
+ goto out1;
if (!msg->msg_rx_ready_delay)
- goto out;
+ goto out1;
+ LASSERT(lnet_ptl_is_lazy(ptl));
LASSERT(!msg->msg_rx_delayed);
- msg->msg_rx_delayed = 1;
- cfs_list_add_tail(&msg->msg_list, &ptl->ptl_msgq);
-
- CDEBUG(D_NET,
- "Delaying %s from %s portal %d MB "LPX64" offset %d len %d\n",
- info->mi_opc == LNET_MD_OP_PUT ? "PUT" : "GET",
- libcfs_id2str(info->mi_id), info->mi_portal,
- info->mi_mbits, info->mi_roffset, info->mi_rlength);
- out:
- lnet_res_unlock();
- return rc;
+
+ /* NB: we don't expect "delay" can happen a lot */
+ if (lnet_ptl_is_unique(ptl) || LNET_CPT_NUMBER == 1) {
+ lnet_ptl_lock(ptl);
+
+ msg->msg_rx_delayed = 1;
+ cfs_list_add_tail(&msg->msg_list, &ptl->ptl_msg_delayed);
+
+ lnet_ptl_unlock(ptl);
+ lnet_res_unlock(mtable->mt_cpt);
+
+ } else {
+ lnet_res_unlock(mtable->mt_cpt);
+ rc = lnet_ptl_match_delay(ptl, info, msg);
+ }
+
+ if (msg->msg_rx_delayed) {
+ CDEBUG(D_NET,
+ "Delaying %s from %s ptl %d MB "LPX64" off %d len %d\n",
+ info->mi_opc == LNET_MD_OP_PUT ? "PUT" : "GET",
+ libcfs_id2str(info->mi_id), info->mi_portal,
+ info->mi_mbits, info->mi_roffset, info->mi_rlength);
+ }
+ goto out0;
+ out1:
+ lnet_res_unlock(mtable->mt_cpt);
+ out0:
+ /* EXHAUSTED bit is only meaningful for internal functions */
+ return rc & ~LNET_MATCHMD_EXHAUSTED;
}
void
cfs_list_t *matches, cfs_list_t *drops)
{
struct lnet_portal *ptl = the_lnet.ln_portals[me->me_portal];
+ struct lnet_match_table *mtable;
+ cfs_list_t *head;
lnet_msg_t *tmp;
lnet_msg_t *msg;
+ int exhausted = 0;
+ int cpt;
LASSERT(md->md_refcount == 0); /* a brand new MD */
me->me_md = md;
md->md_me = me;
- cfs_list_for_each_entry_safe(msg, tmp, &ptl->ptl_msgq, msg_list) {
+ cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
+ mtable = ptl->ptl_mtables[cpt];
+
+ if (cfs_list_empty(&ptl->ptl_msg_stealing) &&
+ cfs_list_empty(&ptl->ptl_msg_delayed) &&
+ !lnet_mt_test_exhausted(mtable, me->me_pos))
+ return;
+
+ lnet_ptl_lock(ptl);
+ head = &ptl->ptl_msg_stealing;
+ again:
+ cfs_list_for_each_entry_safe(msg, tmp, head, msg_list) {
struct lnet_match_info info;
lnet_hdr_t *hdr;
int rc;
- LASSERT(msg->msg_rx_delayed);
+ LASSERT(msg->msg_rx_delayed || head == &ptl->ptl_msg_stealing);
hdr = &msg->msg_hdr;
info.mi_id.nid = hdr->src_nid;
rc = lnet_try_match_md(md, &info, msg);
- if (rc == LNET_MATCHMD_NONE)
+ exhausted = (rc & LNET_MATCHMD_EXHAUSTED) != 0;
+ if ((rc & LNET_MATCHMD_NONE) != 0) {
+ if (exhausted)
+ break;
continue;
+ }
/* Hurrah! This _is_ a match */
- cfs_list_del(&msg->msg_list);
+ LASSERT((rc & LNET_MATCHMD_FINISH) != 0);
+ cfs_list_del_init(&msg->msg_list);
- if (rc == LNET_MATCHMD_OK) {
+ if (head == &ptl->ptl_msg_stealing) {
+ if (exhausted)
+ break;
+ /* stealing thread will handle the message */
+ continue;
+ }
+
+ if ((rc & LNET_MATCHMD_OK) != 0) {
cfs_list_add_tail(&msg->msg_list, matches);
CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
info.mi_portal, info.mi_mbits,
info.mi_roffset, info.mi_rlength);
} else {
- LASSERT(rc == LNET_MATCHMD_DROP);
-
cfs_list_add_tail(&msg->msg_list, drops);
}
- if (lnet_md_exhausted(md))
+ if (exhausted)
break;
}
+
+ if (!exhausted && head == &ptl->ptl_msg_stealing) {
+ head = &ptl->ptl_msg_delayed;
+ goto again;
+ }
+
+ if (lnet_ptl_is_wildcard(ptl) && !exhausted) {
+ lnet_mt_set_exhausted(mtable, me->me_pos, 0);
+ if (!mtable->mt_enabled)
+ lnet_ptl_enable_mt(ptl, cpt);
+ }
+
+ lnet_ptl_unlock(ptl);
}
void
lnet_ptl_cleanup(struct lnet_portal *ptl)
{
struct lnet_match_table *mtable;
+ int i;
- LASSERT(cfs_list_empty(&ptl->ptl_msgq));
-
- if (ptl->ptl_mtable == NULL) /* uninitialized portal */
+ if (ptl->ptl_mtables == NULL) /* uninitialized portal */
return;
- do { /* iterate over match-tables when we have percpt match-table */
+ LASSERT(cfs_list_empty(&ptl->ptl_msg_delayed));
+ LASSERT(cfs_list_empty(&ptl->ptl_msg_stealing));
+#ifndef __KERNEL__
+# ifdef HAVE_LIBPTHREAD
+ pthread_mutex_destroy(&ptl->ptl_lock);
+# endif
+#endif
+ cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
cfs_list_t *mhash;
lnet_me_t *me;
int j;
- mtable = ptl->ptl_mtable;
-
if (mtable->mt_mhash == NULL) /* uninitialized match-table */
continue;
mhash = mtable->mt_mhash;
/* cleanup ME */
- while (!cfs_list_empty(&mtable->mt_mlist)) {
- me = cfs_list_entry(mtable->mt_mlist.next,
- lnet_me_t, me_list);
- CERROR("Active wildcard ME %p on exit\n", me);
- cfs_list_del(&me->me_list);
- lnet_me_free(me);
- }
-
- for (j = 0; j < LNET_MT_HASH_SIZE; j++) {
+ for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++) {
while (!cfs_list_empty(&mhash[j])) {
me = cfs_list_entry(mhash[j].next,
lnet_me_t, me_list);
- CERROR("Active unique ME %p on exit\n", me);
+ CERROR("Active ME %p on exit\n", me);
cfs_list_del(&me->me_list);
lnet_me_free(me);
}
}
+ /* the extra entry is for MEs with ignore bits */
+ LIBCFS_FREE(mhash, sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1));
+ }
- LIBCFS_FREE(mhash, sizeof(*mhash) * LNET_MT_HASH_SIZE);
- } while (0);
-
- LIBCFS_FREE(ptl->ptl_mtable, sizeof(*mtable));
- ptl->ptl_mtable = NULL;
+ cfs_percpt_free(ptl->ptl_mtables);
+ ptl->ptl_mtables = NULL;
}
int
{
struct lnet_match_table *mtable;
cfs_list_t *mhash;
+ int i;
int j;
- ptl->ptl_index = index;
- CFS_INIT_LIST_HEAD(&ptl->ptl_msgq);
-
- LIBCFS_ALLOC(mtable, sizeof(*mtable));
- if (mtable == NULL) {
+ ptl->ptl_mtables = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(struct lnet_match_table));
+ if (ptl->ptl_mtables == NULL) {
CERROR("Failed to create match table for portal %d\n", index);
return -ENOMEM;
}
- ptl->ptl_mtable = mtable;
- do { /* iterate over match-tables when we have percpt match-table */
- LIBCFS_ALLOC(mhash, sizeof(*mhash) * LNET_MT_HASH_SIZE);
+ ptl->ptl_index = index;
+ CFS_INIT_LIST_HEAD(&ptl->ptl_msg_delayed);
+ CFS_INIT_LIST_HEAD(&ptl->ptl_msg_stealing);
+#ifdef __KERNEL__
+ spin_lock_init(&ptl->ptl_lock);
+#else
+# ifdef HAVE_LIBPTHREAD
+ pthread_mutex_init(&ptl->ptl_lock, NULL);
+# endif
+#endif
+ cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
+ /* the extra entry is for MEs with ignore bits */
+ LIBCFS_CPT_ALLOC(mhash, lnet_cpt_table(), i,
+ sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1));
if (mhash == NULL) {
CERROR("Failed to create match hash for portal %d\n",
index);
goto failed;
}
+ memset(&mtable->mt_exhausted[0], -1,
+ sizeof(mtable->mt_exhausted[0]) *
+ LNET_MT_EXHAUSTED_BMAP);
mtable->mt_mhash = mhash;
- for (j = 0; j < LNET_MT_HASH_SIZE; j++)
+ for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++)
CFS_INIT_LIST_HEAD(&mhash[j]);
- CFS_INIT_LIST_HEAD(&mtable->mt_mlist);
mtable->mt_portal = index;
- } while (0);
+ mtable->mt_cpt = i;
+ }
return 0;
failed:
int size;
int i;
- size = sizeof(struct lnet_portal);
+ size = offsetof(struct lnet_portal, ptl_mt_maps[LNET_CPT_NUMBER]);
the_lnet.ln_nportals = MAX_PORTALS;
the_lnet.ln_portals = cfs_array_alloc(the_lnet.ln_nportals, size);
CDEBUG(D_NET, "Setting portal %d lazy\n", portal);
ptl = the_lnet.ln_portals[portal];
- lnet_res_lock();
+ lnet_res_lock(LNET_LOCK_EX);
+ lnet_ptl_lock(ptl);
+
lnet_ptl_setopt(ptl, LNET_PTL_LAZY);
- lnet_res_unlock();
+
+ lnet_ptl_unlock(ptl);
+ lnet_res_unlock(LNET_LOCK_EX);
return 0;
}
+EXPORT_SYMBOL(LNetSetLazyPortal);
/**
* Turn off the lazy portal attribute. Delayed requests on the portal,
ptl = the_lnet.ln_portals[portal];
- lnet_res_lock();
+ lnet_res_lock(LNET_LOCK_EX);
+ lnet_ptl_lock(ptl);
if (!lnet_ptl_is_lazy(ptl)) {
- lnet_res_unlock();
+ lnet_ptl_unlock(ptl);
+ lnet_res_unlock(LNET_LOCK_EX);
return 0;
}
CDEBUG(D_NET, "clearing portal %d lazy\n", portal);
/* grab all the blocked messages atomically */
- cfs_list_splice_init(&ptl->ptl_msgq, &zombies);
+ cfs_list_splice_init(&ptl->ptl_msg_delayed, &zombies);
lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY);
- lnet_res_unlock();
+ lnet_ptl_unlock(ptl);
+ lnet_res_unlock(LNET_LOCK_EX);
lnet_drop_delayed_msg_list(&zombies, "Clearing lazy portal attr");
return 0;
}
+EXPORT_SYMBOL(LNetClearLazyPortal);