X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Flnet%2Flib-ptl.c;h=d68c4fc277da4fb1ec2eba92c69ed676bd00877e;hb=dd62978b709aebf3dda536f2230f1b79c5361d9c;hp=b1ce5a8fc2f7eb9aabe132070cd143033c7710f9;hpb=25766da50b627648b04549ff3fb55af12acbcb4b;p=fs%2Flustre-release.git diff --git a/lnet/lnet/lib-ptl.c b/lnet/lnet/lib-ptl.c index b1ce5a8..d68c4fc 100644 --- a/lnet/lnet/lib-ptl.c +++ b/lnet/lnet/lib-ptl.c @@ -38,6 +38,11 @@ #include +/* NB: add /proc interfaces in upcoming patches */ +int portal_rotor = LNET_PTL_ROTOR_HASH_RT; +CFS_MODULE_PARM(portal_rotor, "i", int, 0644, + "redirect PUTs to different cpu-partitions"); + static int lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id, __u64 mbits, __u64 ignore_bits) @@ -56,10 +61,10 @@ lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id, goto match; /* unset, new portal */ - lnet_res_lock(); + lnet_ptl_lock(ptl); /* check again with lock */ if (unlikely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl))) { - lnet_res_unlock(); + lnet_ptl_unlock(ptl); goto match; } @@ -69,7 +74,7 @@ lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id, else lnet_ptl_setopt(ptl, LNET_PTL_MATCH_WILDCARD); - lnet_res_unlock(); + lnet_ptl_unlock(ptl); return 1; @@ -80,6 +85,56 @@ lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id, return 1; } +static void +lnet_ptl_enable_mt(struct lnet_portal *ptl, int cpt) +{ + struct lnet_match_table *mtable = ptl->ptl_mtables[cpt]; + int i; + + /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */ + LASSERT(lnet_ptl_is_wildcard(ptl)); + + mtable->mt_enabled = 1; + + ptl->ptl_mt_maps[ptl->ptl_mt_nmaps] = cpt; + for (i = ptl->ptl_mt_nmaps - 1; i >= 0; i--) { + LASSERT(ptl->ptl_mt_maps[i] != cpt); + if (ptl->ptl_mt_maps[i] < cpt) + break; + + /* swap to order */ + ptl->ptl_mt_maps[i + 1] = ptl->ptl_mt_maps[i]; + ptl->ptl_mt_maps[i] = cpt; + } + + ptl->ptl_mt_nmaps++; +} + +static void +lnet_ptl_disable_mt(struct lnet_portal *ptl, int cpt) +{ + struct lnet_match_table *mtable = ptl->ptl_mtables[cpt]; + int i; + + /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */ + LASSERT(lnet_ptl_is_wildcard(ptl)); + + if (LNET_CPT_NUMBER == 1) + return; /* never disable the only match-table */ + + mtable->mt_enabled = 0; + + LASSERT(ptl->ptl_mt_nmaps > 0 && + ptl->ptl_mt_nmaps <= LNET_CPT_NUMBER); + + /* remove it from mt_maps */ + ptl->ptl_mt_nmaps--; + for (i = 0; i < ptl->ptl_mt_nmaps; i++) { + if (ptl->ptl_mt_maps[i] >= cpt) /* overwrite it */ + ptl->ptl_mt_maps[i] = ptl->ptl_mt_maps[i + 1]; + } +} + static int lnet_try_match_md(lnet_libmd_t *md, struct lnet_match_info *info, struct lnet_msg *msg) @@ -90,12 +145,12 @@ lnet_try_match_md(lnet_libmd_t *md, unsigned int mlength; lnet_me_t *me = md->md_me; - /* mismatched MD op */ - if ((md->md_options & info->mi_opc) == 0) - return LNET_MATCHMD_NONE; - /* MD exhausted */ if (lnet_md_exhausted(md)) + return LNET_MATCHMD_NONE | LNET_MATCHMD_EXHAUSTED; + + /* mismatched MD op */ + if ((md->md_options & info->mi_opc) == 0) return LNET_MATCHMD_NONE; /* mismatched ME nid/pid? */ @@ -125,7 +180,7 @@ lnet_try_match_md(lnet_libmd_t *md, mlength = md->md_length - offset; } - if (info->mi_rlength <= mlength) { /* fits in allowed space */ + if (info->mi_rlength <= mlength) { /* fits in allowed space */ mlength = info->mi_rlength; } else if ((md->md_options & LNET_MD_TRUNCATE) == 0) { /* this packet _really_ is too big */ @@ -147,53 +202,113 @@ lnet_try_match_md(lnet_libmd_t *md, lnet_msg_attach_md(msg, md, offset, mlength); md->md_offset = offset + mlength; + if (!lnet_md_exhausted(md)) + return LNET_MATCHMD_OK; + /* Auto-unlink NOW, so the ME gets unlinked if required. * We bumped md->md_refcount above so the MD just gets flagged * for unlink when it is finalized. */ - if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 && - lnet_md_exhausted(md)) { + if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0) lnet_md_unlink(md); - } - return LNET_MATCHMD_OK; + return LNET_MATCHMD_OK | LNET_MATCHMD_EXHAUSTED; +} + +static struct lnet_match_table * +lnet_match2mt(struct lnet_portal *ptl, lnet_process_id_t id, __u64 mbits) +{ + if (LNET_CPT_NUMBER == 1) + return ptl->ptl_mtables[0]; /* the only one */ + + /* if it's a unique portal, return match-table hashed by NID */ + return lnet_ptl_is_unique(ptl) ? + ptl->ptl_mtables[lnet_cpt_of_nid(id.nid)] : NULL; } struct lnet_match_table * lnet_mt_of_attach(unsigned int index, lnet_process_id_t id, __u64 mbits, __u64 ignore_bits, lnet_ins_pos_t pos) { - struct lnet_portal *ptl; + struct lnet_portal *ptl; + struct lnet_match_table *mtable; + /* NB: called w/o lock */ LASSERT(index < the_lnet.ln_nportals); if (!lnet_ptl_match_type(index, id, mbits, ignore_bits)) return NULL; ptl = the_lnet.ln_portals[index]; - /* NB: Now we only have one match-table for each portal, - * and will have match-table per CPT in upcoming changes, - * ME will be scattered to different match-tables based - * on attaching information */ - return ptl->ptl_mtable; + + mtable = lnet_match2mt(ptl, id, mbits); + if (mtable != NULL) /* unique portal or only one match-table */ + return mtable; + + /* it's a wildcard portal */ + switch (pos) { + default: + return NULL; + case LNET_INS_BEFORE: + case LNET_INS_AFTER: + /* posted by no affinity thread, always hash to specific + * match-table to avoid buffer stealing which is heavy */ + return ptl->ptl_mtables[ptl->ptl_index % LNET_CPT_NUMBER]; + case LNET_INS_LOCAL: + /* posted by cpu-affinity thread */ + return ptl->ptl_mtables[lnet_cpt_current()]; + } } -struct lnet_match_table * -lnet_mt_of_match(unsigned int index, lnet_process_id_t id, __u64 mbits) +static struct lnet_match_table * +lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg) { - struct lnet_portal *ptl; + struct lnet_match_table *mtable; + struct lnet_portal *ptl; + int nmaps; + int rotor; + int routed; + int cpt; - LASSERT(index < the_lnet.ln_nportals); + /* NB: called w/o lock */ + LASSERT(info->mi_portal < the_lnet.ln_nportals); + ptl = the_lnet.ln_portals[info->mi_portal]; - ptl = the_lnet.ln_portals[index]; - if (!lnet_ptl_is_unique(ptl) && - !lnet_ptl_is_wildcard(ptl) && !lnet_ptl_is_lazy(ptl)) - return NULL; + LASSERT(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)); + + mtable = lnet_match2mt(ptl, info->mi_id, info->mi_mbits); + if (mtable != NULL) + return mtable; + + /* it's a wildcard portal */ + routed = LNET_NIDNET(msg->msg_hdr.src_nid) != + LNET_NIDNET(msg->msg_hdr.dest_nid); - /* NB: Now we only have one match-table for each portal, - * and will have match-table per CPT in upcoming changes, - * request will be scattered to different match-tables based - * on matching information */ - return ptl->ptl_mtable; + if (portal_rotor == LNET_PTL_ROTOR_OFF || + (portal_rotor != LNET_PTL_ROTOR_ON && !routed)) { + cpt = lnet_cpt_current(); + if (ptl->ptl_mtables[cpt]->mt_enabled) + return ptl->ptl_mtables[cpt]; + } + + rotor = ptl->ptl_rotor++; /* get round-robin factor */ + if (portal_rotor == LNET_PTL_ROTOR_HASH_RT && routed) + cpt = lnet_cpt_of_nid(msg->msg_hdr.src_nid); + else + cpt = rotor % LNET_CPT_NUMBER; + + if (!ptl->ptl_mtables[cpt]->mt_enabled) { + /* is there any active entry for this portal? */ + nmaps = ptl->ptl_mt_nmaps; + /* map to an active mtable to avoid heavy "stealing" */ + if (nmaps != 0) { + /* NB: there is possibility that ptl_mt_maps is being + * changed because we are not under protection of + * lnet_ptl_lock, but it shouldn't hurt anything */ + cpt = ptl->ptl_mt_maps[rotor % nmaps]; + } + } + + return ptl->ptl_mtables[cpt]; } cfs_list_t * @@ -205,14 +320,13 @@ lnet_mt_match_head(struct lnet_match_table *mtable, if (lnet_ptl_is_wildcard(ptl)) { return &mtable->mt_mlist; - } else if (lnet_ptl_is_unique(ptl)) { + } else { unsigned long hash = mbits + id.nid + id.pid; + LASSERT(lnet_ptl_is_unique(ptl)); hash = cfs_hash_long(hash, LNET_MT_HASH_BITS); return &mtable->mt_mhash[hash]; } - - return NULL; } int @@ -222,11 +336,14 @@ lnet_mt_match_md(struct lnet_match_table *mtable, cfs_list_t *head; lnet_me_t *me; lnet_me_t *tmp; + int exhausted = 0; int rc; + /* NB: only wildcard portal can return LNET_MATCHMD_EXHAUSTED */ + if (lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal])) + exhausted = LNET_MATCHMD_EXHAUSTED; + head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits); - if (head == NULL) /* nobody posted anything on this portal */ - goto out; cfs_list_for_each_entry_safe(me, tmp, head, me_list) { /* ME attached but MD not attached yet */ @@ -236,28 +353,128 @@ lnet_mt_match_md(struct lnet_match_table *mtable, LASSERT(me == me->me_md->md_me); rc = lnet_try_match_md(me->me_md, info, msg); - switch (rc) { - default: - LBUG(); + if ((rc & LNET_MATCHMD_EXHAUSTED) == 0) + exhausted = 0; /* mlist is not empty */ - case LNET_MATCHMD_NONE: - continue; - - case LNET_MATCHMD_OK: - return LNET_MATCHMD_OK; - - case LNET_MATCHMD_DROP: - return LNET_MATCHMD_DROP; + if ((rc & LNET_MATCHMD_FINISH) != 0) { + /* don't return EXHAUSTED bit because we don't know + * whether the mlist is empty or not */ + return rc & ~LNET_MATCHMD_EXHAUSTED; } - /* not reached */ } - out: if (info->mi_opc == LNET_MD_OP_GET || !lnet_ptl_is_lazy(the_lnet.ln_portals[info->mi_portal])) - return LNET_MATCHMD_DROP; + return LNET_MATCHMD_DROP | exhausted; + + return LNET_MATCHMD_NONE | exhausted; +} + +static int +lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg) +{ + int rc; + + /* message arrived before any buffer posting on this portal, + * simply delay or drop this message */ + if (likely(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl))) + return 0; + + lnet_ptl_lock(ptl); + /* check it again with hold of lock */ + if (lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)) { + lnet_ptl_unlock(ptl); + return 0; + } + + if (lnet_ptl_is_lazy(ptl)) { + if (msg->msg_rx_ready_delay) { + msg->msg_rx_delayed = 1; + cfs_list_add_tail(&msg->msg_list, + &ptl->ptl_msg_delayed); + } + rc = LNET_MATCHMD_NONE; + } else { + rc = LNET_MATCHMD_DROP; + } + + lnet_ptl_unlock(ptl); + return rc; +} + +static int +lnet_ptl_match_delay(struct lnet_portal *ptl, + struct lnet_match_info *info, struct lnet_msg *msg) +{ + int first = ptl->ptl_mt_maps[0]; /* read w/o lock */ + int rc = 0; + int i; + + /* steal buffer from other CPTs, and delay it if nothing to steal, + * this function is more expensive than a regular match, but we + * don't expect it can happen a lot */ + LASSERT(lnet_ptl_is_wildcard(ptl)); + + for (i = 0; i < LNET_CPT_NUMBER; i++) { + struct lnet_match_table *mtable; + int cpt; + + cpt = (first + i) % LNET_CPT_NUMBER; + mtable = ptl->ptl_mtables[cpt]; + if (i != 0 && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled) + continue; + + lnet_res_lock(cpt); + lnet_ptl_lock(ptl); - return LNET_MATCHMD_NONE; + if (i == 0) { /* the first try, attach on stealing list */ + cfs_list_add_tail(&msg->msg_list, + &ptl->ptl_msg_stealing); + } + + if (!cfs_list_empty(&msg->msg_list)) { /* on stealing list */ + rc = lnet_mt_match_md(mtable, info, msg); + + if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 && + mtable->mt_enabled) + lnet_ptl_disable_mt(ptl, cpt); + + if ((rc & LNET_MATCHMD_FINISH) != 0) + cfs_list_del_init(&msg->msg_list); + + } else { + /* could be matched by lnet_ptl_attach_md() + * which is called by another thread */ + rc = msg->msg_md == NULL ? + LNET_MATCHMD_DROP : LNET_MATCHMD_OK; + } + + if (!cfs_list_empty(&msg->msg_list) && /* not matched yet */ + (i == LNET_CPT_NUMBER - 1 || /* the last CPT */ + ptl->ptl_mt_nmaps == 0 || /* no active CPT */ + (ptl->ptl_mt_nmaps == 1 && /* the only active CPT */ + ptl->ptl_mt_maps[0] == cpt))) { + /* nothing to steal, delay or drop */ + cfs_list_del_init(&msg->msg_list); + + if (lnet_ptl_is_lazy(ptl)) { + msg->msg_rx_delayed = 1; + cfs_list_add_tail(&msg->msg_list, + &ptl->ptl_msg_delayed); + rc = LNET_MATCHMD_NONE; + } else { + rc = LNET_MATCHMD_DROP; + } + } + + lnet_ptl_unlock(ptl); + lnet_res_unlock(cpt); + + if ((rc & LNET_MATCHMD_FINISH) != 0 || msg->msg_rx_delayed) + break; + } + + return rc; } int @@ -277,43 +494,63 @@ lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg) return LNET_MATCHMD_DROP; } - mtable = lnet_mt_of_match(info->mi_portal, - info->mi_id, info->mi_mbits); - if (mtable == NULL) { - CDEBUG(D_NET, "Drop early message from %s of length %d into " - "portal %d MB="LPX64"\n", - libcfs_id2str(info->mi_id), info->mi_rlength, - info->mi_portal, info->mi_mbits); - return LNET_MATCHMD_DROP; - } - ptl = the_lnet.ln_portals[info->mi_portal]; - lnet_res_lock(); + rc = lnet_ptl_match_early(ptl, msg); + if (rc != 0) /* matched or delayed early message */ + return rc; + + mtable = lnet_mt_of_match(info, msg); + lnet_res_lock(mtable->mt_cpt); if (the_lnet.ln_shutdown) { - rc = LNET_MATCHMD_DROP; - goto out; + rc = LNET_MATCHMD_DROP; + goto out1; } rc = lnet_mt_match_md(mtable, info, msg); - if (rc != LNET_MATCHMD_NONE) /* matched or dropping */ - goto out; + if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 && mtable->mt_enabled) { + lnet_ptl_lock(ptl); + lnet_ptl_disable_mt(ptl, mtable->mt_cpt); + lnet_ptl_unlock(ptl); + } + + if ((rc & LNET_MATCHMD_FINISH) != 0) /* matched or dropping */ + goto out1; if (!msg->msg_rx_ready_delay) - goto out; + goto out1; + LASSERT(lnet_ptl_is_lazy(ptl)); LASSERT(!msg->msg_rx_delayed); - msg->msg_rx_delayed = 1; - cfs_list_add_tail(&msg->msg_list, &ptl->ptl_msgq); - - CDEBUG(D_NET, - "Delaying %s from %s portal %d MB "LPX64" offset %d len %d\n", - info->mi_opc == LNET_MD_OP_PUT ? "PUT" : "GET", - libcfs_id2str(info->mi_id), info->mi_portal, - info->mi_mbits, info->mi_roffset, info->mi_rlength); - out: - lnet_res_unlock(); - return rc; + + /* NB: we don't expect "delay" can happen a lot */ + if (lnet_ptl_is_unique(ptl) || LNET_CPT_NUMBER == 1) { + lnet_ptl_lock(ptl); + + msg->msg_rx_delayed = 1; + cfs_list_add_tail(&msg->msg_list, &ptl->ptl_msg_delayed); + + lnet_ptl_unlock(ptl); + lnet_res_unlock(mtable->mt_cpt); + + } else { + lnet_res_unlock(mtable->mt_cpt); + rc = lnet_ptl_match_delay(ptl, info, msg); + } + + if (msg->msg_rx_delayed) { + CDEBUG(D_NET, + "Delaying %s from %s ptl %d MB "LPX64" off %d len %d\n", + info->mi_opc == LNET_MD_OP_PUT ? "PUT" : "GET", + libcfs_id2str(info->mi_id), info->mi_portal, + info->mi_mbits, info->mi_roffset, info->mi_rlength); + } + goto out0; + out1: + lnet_res_unlock(mtable->mt_cpt); + out0: + /* EXHAUSTED bit is only meaningful for internal functions */ + return rc & ~LNET_MATCHMD_EXHAUSTED; } void @@ -331,20 +568,35 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, cfs_list_t *matches, cfs_list_t *drops) { struct lnet_portal *ptl = the_lnet.ln_portals[me->me_portal]; + struct lnet_match_table *mtable; + cfs_list_t *head; lnet_msg_t *tmp; lnet_msg_t *msg; + int exhausted = 0; + int cpt; LASSERT(md->md_refcount == 0); /* a brand new MD */ me->me_md = md; md->md_me = me; - cfs_list_for_each_entry_safe(msg, tmp, &ptl->ptl_msgq, msg_list) { + cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie); + mtable = ptl->ptl_mtables[cpt]; + + if (cfs_list_empty(&ptl->ptl_msg_stealing) && + cfs_list_empty(&ptl->ptl_msg_delayed) && + mtable->mt_enabled) + return; + + lnet_ptl_lock(ptl); + head = &ptl->ptl_msg_stealing; + again: + cfs_list_for_each_entry_safe(msg, tmp, head, msg_list) { struct lnet_match_info info; lnet_hdr_t *hdr; int rc; - LASSERT(msg->msg_rx_delayed); + LASSERT(msg->msg_rx_delayed || head == &ptl->ptl_msg_stealing); hdr = &msg->msg_hdr; info.mi_id.nid = hdr->src_nid; @@ -357,13 +609,25 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, rc = lnet_try_match_md(md, &info, msg); - if (rc == LNET_MATCHMD_NONE) + exhausted = (rc & LNET_MATCHMD_EXHAUSTED) != 0; + if ((rc & LNET_MATCHMD_NONE) != 0) { + if (exhausted) + break; continue; + } /* Hurrah! This _is_ a match */ - cfs_list_del(&msg->msg_list); + LASSERT((rc & LNET_MATCHMD_FINISH) != 0); + cfs_list_del_init(&msg->msg_list); - if (rc == LNET_MATCHMD_OK) { + if (head == &ptl->ptl_msg_stealing) { + if (exhausted) + break; + /* stealing thread will handle the message */ + continue; + } + + if ((rc & LNET_MATCHMD_OK) != 0) { cfs_list_add_tail(&msg->msg_list, matches); CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d " @@ -372,33 +636,45 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, info.mi_portal, info.mi_mbits, info.mi_roffset, info.mi_rlength); } else { - LASSERT(rc == LNET_MATCHMD_DROP); - cfs_list_add_tail(&msg->msg_list, drops); } - if (lnet_md_exhausted(md)) + if (exhausted) break; } + + if (!exhausted && head == &ptl->ptl_msg_stealing) { + head = &ptl->ptl_msg_delayed; + goto again; + } + + if (lnet_ptl_is_wildcard(ptl) && !exhausted && !mtable->mt_enabled) + lnet_ptl_enable_mt(ptl, cpt); + + lnet_ptl_unlock(ptl); } void lnet_ptl_cleanup(struct lnet_portal *ptl) { struct lnet_match_table *mtable; + int i; - LASSERT(cfs_list_empty(&ptl->ptl_msgq)); - - if (ptl->ptl_mtable == NULL) /* uninitialized portal */ + if (ptl->ptl_mtables == NULL) /* uninitialized portal */ return; - do { /* iterate over match-tables when we have percpt match-table */ + LASSERT(cfs_list_empty(&ptl->ptl_msg_delayed)); + LASSERT(cfs_list_empty(&ptl->ptl_msg_stealing)); +#ifndef __KERNEL__ +# ifdef HAVE_LIBPTHREAD + pthread_mutex_destroy(&ptl->ptl_lock); +# endif +#endif + cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) { cfs_list_t *mhash; lnet_me_t *me; int j; - mtable = ptl->ptl_mtable; - if (mtable->mt_mhash == NULL) /* uninitialized match-table */ continue; @@ -423,10 +699,10 @@ lnet_ptl_cleanup(struct lnet_portal *ptl) } LIBCFS_FREE(mhash, sizeof(*mhash) * LNET_MT_HASH_SIZE); - } while (0); + } - LIBCFS_FREE(ptl->ptl_mtable, sizeof(*mtable)); - ptl->ptl_mtable = NULL; + cfs_percpt_free(ptl->ptl_mtables); + ptl->ptl_mtables = NULL; } int @@ -434,20 +710,29 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index) { struct lnet_match_table *mtable; cfs_list_t *mhash; + int i; int j; - ptl->ptl_index = index; - CFS_INIT_LIST_HEAD(&ptl->ptl_msgq); - - LIBCFS_ALLOC(mtable, sizeof(*mtable)); - if (mtable == NULL) { + ptl->ptl_mtables = cfs_percpt_alloc(lnet_cpt_table(), + sizeof(struct lnet_match_table)); + if (ptl->ptl_mtables == NULL) { CERROR("Failed to create match table for portal %d\n", index); return -ENOMEM; } - ptl->ptl_mtable = mtable; - do { /* iterate over match-tables when we have percpt match-table */ - LIBCFS_ALLOC(mhash, sizeof(*mhash) * LNET_MT_HASH_SIZE); + ptl->ptl_index = index; + CFS_INIT_LIST_HEAD(&ptl->ptl_msg_delayed); + CFS_INIT_LIST_HEAD(&ptl->ptl_msg_stealing); +#ifdef __KERNEL__ + cfs_spin_lock_init(&ptl->ptl_lock); +#else +# ifdef HAVE_LIBPTHREAD + pthread_mutex_init(&ptl->ptl_lock, NULL); +# endif +#endif + cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) { + LIBCFS_CPT_ALLOC(mhash, lnet_cpt_table(), i, + sizeof(*mhash) * LNET_MT_HASH_SIZE); if (mhash == NULL) { CERROR("Failed to create match hash for portal %d\n", index); @@ -460,7 +745,8 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index) CFS_INIT_LIST_HEAD(&mtable->mt_mlist); mtable->mt_portal = index; - } while (0); + mtable->mt_cpt = i; + } return 0; failed: @@ -489,7 +775,7 @@ lnet_portals_create(void) int size; int i; - size = sizeof(struct lnet_portal); + size = offsetof(struct lnet_portal, ptl_mt_maps[LNET_CPT_NUMBER]); the_lnet.ln_nportals = MAX_PORTALS; the_lnet.ln_portals = cfs_array_alloc(the_lnet.ln_nportals, size); @@ -547,12 +833,17 @@ LNetSetLazyPortal(int portal) CDEBUG(D_NET, "Setting portal %d lazy\n", portal); ptl = the_lnet.ln_portals[portal]; - lnet_res_lock(); + lnet_res_lock(LNET_LOCK_EX); + lnet_ptl_lock(ptl); + lnet_ptl_setopt(ptl, LNET_PTL_LAZY); - lnet_res_unlock(); + + lnet_ptl_unlock(ptl); + lnet_res_unlock(LNET_LOCK_EX); return 0; } +EXPORT_SYMBOL(LNetSetLazyPortal); /** * Turn off the lazy portal attribute. Delayed requests on the portal, @@ -574,10 +865,12 @@ LNetClearLazyPortal(int portal) ptl = the_lnet.ln_portals[portal]; - lnet_res_lock(); + lnet_res_lock(LNET_LOCK_EX); + lnet_ptl_lock(ptl); if (!lnet_ptl_is_lazy(ptl)) { - lnet_res_unlock(); + lnet_ptl_unlock(ptl); + lnet_res_unlock(LNET_LOCK_EX); return 0; } @@ -587,13 +880,15 @@ LNetClearLazyPortal(int portal) CDEBUG(D_NET, "clearing portal %d lazy\n", portal); /* grab all the blocked messages atomically */ - cfs_list_splice_init(&ptl->ptl_msgq, &zombies); + cfs_list_splice_init(&ptl->ptl_msg_delayed, &zombies); lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY); - lnet_res_unlock(); + lnet_ptl_unlock(ptl); + lnet_res_unlock(LNET_LOCK_EX); lnet_drop_delayed_msg_list(&zombies, "Clearing lazy portal attr"); return 0; } +EXPORT_SYMBOL(LNetClearLazyPortal);