X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Flnet%2Flib-ptl.c;h=621a27ac5a5c2706de908c8594696e3439068224;hb=12e311012ae337276dc3e7da3e7ad8d85d11e764;hp=1cfc87cc71679562b58d6bab15ad5d4435230960;hpb=e531dc437c56a08a65de9074a511faa55184712b;p=fs%2Flustre-release.git diff --git a/lnet/lnet/lib-ptl.c b/lnet/lnet/lib-ptl.c index 1cfc87c..621a27a 100644 --- a/lnet/lnet/lib-ptl.c +++ b/lnet/lnet/lib-ptl.c @@ -21,7 +21,7 @@ * GPL HEADER END */ /* - * Copyright (c) 2011, 2012, Whamcloud, Inc. + * Copyright (c) 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -39,7 +39,7 @@ #include /* NB: add /proc interfaces in upcoming patches */ -int portal_rotor; +int portal_rotor = LNET_PTL_ROTOR_HASH_RT; CFS_MODULE_PARM(portal_rotor, "i", int, 0644, "redirect PUTs to different cpu-partitions"); @@ -259,34 +259,42 @@ lnet_mt_of_attach(unsigned int index, lnet_process_id_t id, } } -struct lnet_match_table * -lnet_mt_of_match(unsigned int index, lnet_process_id_t id, __u64 mbits) +static struct lnet_match_table * +lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg) { struct lnet_match_table *mtable; struct lnet_portal *ptl; int nmaps; int rotor; + int routed; int cpt; /* NB: called w/o lock */ - LASSERT(index < the_lnet.ln_nportals); - ptl = the_lnet.ln_portals[index]; + LASSERT(info->mi_portal < the_lnet.ln_nportals); + ptl = the_lnet.ln_portals[info->mi_portal]; LASSERT(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)); - mtable = lnet_match2mt(ptl, id, mbits); + mtable = lnet_match2mt(ptl, info->mi_id, info->mi_mbits); if (mtable != NULL) return mtable; /* it's a wildcard portal */ - if (!portal_rotor) { + routed = LNET_NIDNET(msg->msg_hdr.src_nid) != + LNET_NIDNET(msg->msg_hdr.dest_nid); + + if (portal_rotor == LNET_PTL_ROTOR_OFF || + (portal_rotor != LNET_PTL_ROTOR_ON && !routed)) { cpt = lnet_cpt_current(); if (ptl->ptl_mtables[cpt]->mt_enabled) return ptl->ptl_mtables[cpt]; } - rotor = ptl->ptl_rotor++; - cpt = rotor % LNET_CPT_NUMBER; + rotor = ptl->ptl_rotor++; /* get round-robin factor */ + if (portal_rotor == LNET_PTL_ROTOR_HASH_RT && routed) + cpt = lnet_cpt_of_nid(msg->msg_hdr.src_nid); + else + cpt = rotor % LNET_CPT_NUMBER; if (!ptl->ptl_mtables[cpt]->mt_enabled) { /* is there any active entry for this portal? */ @@ -303,20 +311,62 @@ lnet_mt_of_match(unsigned int index, lnet_process_id_t id, __u64 mbits) return ptl->ptl_mtables[cpt]; } -cfs_list_t * +static int +lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos) +{ + __u64 *bmap; + int i; + + if (!lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal])) + return 0; + + if (pos < 0) { /* check all bits */ + for (i = 0; i < LNET_MT_EXHAUSTED_BMAP; i++) { + if (mtable->mt_exhausted[i] != (__u64)(-1)) + return 0; + } + return 1; + } + + LASSERT(pos <= LNET_MT_HASH_IGNORE); + /* mtable::mt_mhash[pos] is marked as exhausted or not */ + bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64]; + pos &= (1 << LNET_MT_BITS_U64) - 1; + + return ((*bmap) & (1ULL << pos)) != 0; +} + +static void +lnet_mt_set_exhausted(struct lnet_match_table *mtable, int pos, int exhausted) +{ + __u64 *bmap; + + LASSERT(lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal])); + LASSERT(pos <= LNET_MT_HASH_IGNORE); + + /* set mtable::mt_mhash[pos] as exhausted/non-exhausted */ + bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64]; + pos &= (1 << LNET_MT_BITS_U64) - 1; + + if (!exhausted) + *bmap &= ~(1ULL << pos); + else + *bmap |= 1ULL << pos; +} + +struct list_head * lnet_mt_match_head(struct lnet_match_table *mtable, lnet_process_id_t id, __u64 mbits) { struct lnet_portal *ptl = the_lnet.ln_portals[mtable->mt_portal]; if (lnet_ptl_is_wildcard(ptl)) { - return &mtable->mt_mlist; - + return &mtable->mt_mhash[mbits & LNET_MT_HASH_MASK]; } else { unsigned long hash = mbits + id.nid + id.pid; LASSERT(lnet_ptl_is_unique(ptl)); - hash = cfs_hash_long(hash, LNET_MT_HASH_BITS); + hash = hash_long(hash, LNET_MT_HASH_BITS); return &mtable->mt_mhash[hash]; } } @@ -325,19 +375,23 @@ int lnet_mt_match_md(struct lnet_match_table *mtable, struct lnet_match_info *info, struct lnet_msg *msg) { - cfs_list_t *head; + struct list_head *head; lnet_me_t *me; lnet_me_t *tmp; int exhausted = 0; int rc; - /* NB: only wildcard portal can return LNET_MATCHMD_EXHAUSTED */ + /* any ME with ignore bits? */ + if (!list_empty(&mtable->mt_mhash[LNET_MT_HASH_IGNORE])) + head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE]; + else + head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits); + again: + /* NB: only wildcard portal needs to return LNET_MATCHMD_EXHAUSTED */ if (lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal])) exhausted = LNET_MATCHMD_EXHAUSTED; - head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits); - - cfs_list_for_each_entry_safe(me, tmp, head, me_list) { + list_for_each_entry_safe(me, tmp, head, me_list) { /* ME attached but MD not attached yet */ if (me->me_md == NULL) continue; @@ -355,6 +409,17 @@ lnet_mt_match_md(struct lnet_match_table *mtable, } } + if (exhausted == LNET_MATCHMD_EXHAUSTED) { /* @head is exhausted */ + lnet_mt_set_exhausted(mtable, head - mtable->mt_mhash, 1); + if (!lnet_mt_test_exhausted(mtable, -1)) + exhausted = 0; + } + + if (exhausted == 0 && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) { + head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits); + goto again; /* re-check MEs w/o ignore-bits */ + } + if (info->mi_opc == LNET_MD_OP_GET || !lnet_ptl_is_lazy(the_lnet.ln_portals[info->mi_portal])) return LNET_MATCHMD_DROP | exhausted; @@ -382,8 +447,8 @@ lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg) if (lnet_ptl_is_lazy(ptl)) { if (msg->msg_rx_ready_delay) { msg->msg_rx_delayed = 1; - cfs_list_add_tail(&msg->msg_list, - &ptl->ptl_msg_delayed); + list_add_tail(&msg->msg_list, + &ptl->ptl_msg_delayed); } rc = LNET_MATCHMD_NONE; } else { @@ -420,11 +485,11 @@ lnet_ptl_match_delay(struct lnet_portal *ptl, lnet_ptl_lock(ptl); if (i == 0) { /* the first try, attach on stealing list */ - cfs_list_add_tail(&msg->msg_list, - &ptl->ptl_msg_stealing); + list_add_tail(&msg->msg_list, + &ptl->ptl_msg_stealing); } - if (!cfs_list_empty(&msg->msg_list)) { /* on stealing list */ + if (!list_empty(&msg->msg_list)) { /* on stealing list */ rc = lnet_mt_match_md(mtable, info, msg); if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 && @@ -432,7 +497,7 @@ lnet_ptl_match_delay(struct lnet_portal *ptl, lnet_ptl_disable_mt(ptl, cpt); if ((rc & LNET_MATCHMD_FINISH) != 0) - cfs_list_del_init(&msg->msg_list); + list_del_init(&msg->msg_list); } else { /* could be matched by lnet_ptl_attach_md() @@ -441,18 +506,18 @@ lnet_ptl_match_delay(struct lnet_portal *ptl, LNET_MATCHMD_DROP : LNET_MATCHMD_OK; } - if (!cfs_list_empty(&msg->msg_list) && /* not matched yet */ + if (!list_empty(&msg->msg_list) && /* not matched yet */ (i == LNET_CPT_NUMBER - 1 || /* the last CPT */ ptl->ptl_mt_nmaps == 0 || /* no active CPT */ (ptl->ptl_mt_nmaps == 1 && /* the only active CPT */ ptl->ptl_mt_maps[0] == cpt))) { /* nothing to steal, delay or drop */ - cfs_list_del_init(&msg->msg_list); + list_del_init(&msg->msg_list); if (lnet_ptl_is_lazy(ptl)) { msg->msg_rx_delayed = 1; - cfs_list_add_tail(&msg->msg_list, - &ptl->ptl_msg_delayed); + list_add_tail(&msg->msg_list, + &ptl->ptl_msg_delayed); rc = LNET_MATCHMD_NONE; } else { rc = LNET_MATCHMD_DROP; @@ -491,8 +556,7 @@ lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg) if (rc != 0) /* matched or delayed early message */ return rc; - mtable = lnet_mt_of_match(info->mi_portal, - info->mi_id, info->mi_mbits); + mtable = lnet_mt_of_match(info, msg); lnet_res_lock(mtable->mt_cpt); if (the_lnet.ln_shutdown) { @@ -521,7 +585,7 @@ lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg) lnet_ptl_lock(ptl); msg->msg_rx_delayed = 1; - cfs_list_add_tail(&msg->msg_list, &ptl->ptl_msg_delayed); + list_add_tail(&msg->msg_list, &ptl->ptl_msg_delayed); lnet_ptl_unlock(ptl); lnet_res_unlock(mtable->mt_cpt); @@ -558,11 +622,11 @@ lnet_ptl_detach_md(lnet_me_t *me, lnet_libmd_t *md) /* called with lnet_res_lock held */ void lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, - cfs_list_t *matches, cfs_list_t *drops) + struct list_head *matches, struct list_head *drops) { struct lnet_portal *ptl = the_lnet.ln_portals[me->me_portal]; struct lnet_match_table *mtable; - cfs_list_t *head; + struct list_head *head; lnet_msg_t *tmp; lnet_msg_t *msg; int exhausted = 0; @@ -576,15 +640,15 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie); mtable = ptl->ptl_mtables[cpt]; - if (cfs_list_empty(&ptl->ptl_msg_stealing) && - cfs_list_empty(&ptl->ptl_msg_delayed) && - mtable->mt_enabled) + if (list_empty(&ptl->ptl_msg_stealing) && + list_empty(&ptl->ptl_msg_delayed) && + !lnet_mt_test_exhausted(mtable, me->me_pos)) return; lnet_ptl_lock(ptl); head = &ptl->ptl_msg_stealing; again: - cfs_list_for_each_entry_safe(msg, tmp, head, msg_list) { + list_for_each_entry_safe(msg, tmp, head, msg_list) { struct lnet_match_info info; lnet_hdr_t *hdr; int rc; @@ -611,7 +675,7 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, /* Hurrah! This _is_ a match */ LASSERT((rc & LNET_MATCHMD_FINISH) != 0); - cfs_list_del_init(&msg->msg_list); + list_del_init(&msg->msg_list); if (head == &ptl->ptl_msg_stealing) { if (exhausted) @@ -621,7 +685,7 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, } if ((rc & LNET_MATCHMD_OK) != 0) { - cfs_list_add_tail(&msg->msg_list, matches); + list_add_tail(&msg->msg_list, matches); CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d " "match "LPU64" offset %d length %d.\n", @@ -629,7 +693,7 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, info.mi_portal, info.mi_mbits, info.mi_roffset, info.mi_rlength); } else { - cfs_list_add_tail(&msg->msg_list, drops); + list_add_tail(&msg->msg_list, drops); } if (exhausted) @@ -641,8 +705,11 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, goto again; } - if (lnet_ptl_is_wildcard(ptl) && !exhausted && !mtable->mt_enabled) - lnet_ptl_enable_mt(ptl, cpt); + if (lnet_ptl_is_wildcard(ptl) && !exhausted) { + lnet_mt_set_exhausted(mtable, me->me_pos, 0); + if (!mtable->mt_enabled) + lnet_ptl_enable_mt(ptl, cpt); + } lnet_ptl_unlock(ptl); } @@ -656,42 +723,34 @@ lnet_ptl_cleanup(struct lnet_portal *ptl) if (ptl->ptl_mtables == NULL) /* uninitialized portal */ return; - LASSERT(cfs_list_empty(&ptl->ptl_msg_delayed)); - LASSERT(cfs_list_empty(&ptl->ptl_msg_stealing)); + LASSERT(list_empty(&ptl->ptl_msg_delayed)); + LASSERT(list_empty(&ptl->ptl_msg_stealing)); #ifndef __KERNEL__ # ifdef HAVE_LIBPTHREAD pthread_mutex_destroy(&ptl->ptl_lock); # endif #endif cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) { - cfs_list_t *mhash; - lnet_me_t *me; - int j; + struct list_head *mhash; + lnet_me_t *me; + int j; if (mtable->mt_mhash == NULL) /* uninitialized match-table */ continue; mhash = mtable->mt_mhash; /* cleanup ME */ - while (!cfs_list_empty(&mtable->mt_mlist)) { - me = cfs_list_entry(mtable->mt_mlist.next, - lnet_me_t, me_list); - CERROR("Active wildcard ME %p on exit\n", me); - cfs_list_del(&me->me_list); - lnet_me_free(me); - } - - for (j = 0; j < LNET_MT_HASH_SIZE; j++) { - while (!cfs_list_empty(&mhash[j])) { - me = cfs_list_entry(mhash[j].next, - lnet_me_t, me_list); - CERROR("Active unique ME %p on exit\n", me); - cfs_list_del(&me->me_list); + for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++) { + while (!list_empty(&mhash[j])) { + me = list_entry(mhash[j].next, + lnet_me_t, me_list); + CERROR("Active ME %p on exit\n", me); + list_del(&me->me_list); lnet_me_free(me); } } - - LIBCFS_FREE(mhash, sizeof(*mhash) * LNET_MT_HASH_SIZE); + /* the extra entry is for MEs with ignore bits */ + LIBCFS_FREE(mhash, sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1)); } cfs_percpt_free(ptl->ptl_mtables); @@ -702,7 +761,7 @@ int lnet_ptl_setup(struct lnet_portal *ptl, int index) { struct lnet_match_table *mtable; - cfs_list_t *mhash; + struct list_head *mhash; int i; int j; @@ -714,29 +773,32 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index) } ptl->ptl_index = index; - CFS_INIT_LIST_HEAD(&ptl->ptl_msg_delayed); - CFS_INIT_LIST_HEAD(&ptl->ptl_msg_stealing); + INIT_LIST_HEAD(&ptl->ptl_msg_delayed); + INIT_LIST_HEAD(&ptl->ptl_msg_stealing); #ifdef __KERNEL__ - cfs_spin_lock_init(&ptl->ptl_lock); + spin_lock_init(&ptl->ptl_lock); #else # ifdef HAVE_LIBPTHREAD pthread_mutex_init(&ptl->ptl_lock, NULL); # endif #endif cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) { + /* the extra entry is for MEs with ignore bits */ LIBCFS_CPT_ALLOC(mhash, lnet_cpt_table(), i, - sizeof(*mhash) * LNET_MT_HASH_SIZE); + sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1)); if (mhash == NULL) { CERROR("Failed to create match hash for portal %d\n", index); goto failed; } + memset(&mtable->mt_exhausted[0], -1, + sizeof(mtable->mt_exhausted[0]) * + LNET_MT_EXHAUSTED_BMAP); mtable->mt_mhash = mhash; - for (j = 0; j < LNET_MT_HASH_SIZE; j++) - CFS_INIT_LIST_HEAD(&mhash[j]); + for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++) + INIT_LIST_HEAD(&mhash[j]); - CFS_INIT_LIST_HEAD(&mtable->mt_mlist); mtable->mt_portal = index; mtable->mt_cpt = i; } @@ -836,6 +898,7 @@ LNetSetLazyPortal(int portal) return 0; } +EXPORT_SYMBOL(LNetSetLazyPortal); /** * Turn off the lazy portal attribute. Delayed requests on the portal, @@ -850,7 +913,7 @@ int LNetClearLazyPortal(int portal) { struct lnet_portal *ptl; - CFS_LIST_HEAD (zombies); + struct list_head zombies = LIST_HEAD_INIT(zombies); if (portal < 0 || portal >= the_lnet.ln_nportals) return -EINVAL; @@ -872,7 +935,7 @@ LNetClearLazyPortal(int portal) CDEBUG(D_NET, "clearing portal %d lazy\n", portal); /* grab all the blocked messages atomically */ - cfs_list_splice_init(&ptl->ptl_msg_delayed, &zombies); + list_splice_init(&ptl->ptl_msg_delayed, &zombies); lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY); @@ -883,3 +946,4 @@ LNetClearLazyPortal(int portal) return 0; } +EXPORT_SYMBOL(LNetClearLazyPortal);