X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Flnet%2Flib-md.c;h=b7b09398a0379cde2b0ef4f9f7d698cfbd2f1239;hb=7a74d382d5e8867785f662aede54a3e399168325;hp=22369802c08d8b0e9cdc9c960d7484d082975042;hpb=c14ef7b790e1de58ed8757e0406b9dc9fad8029b;p=fs%2Flustre-release.git diff --git a/lnet/lnet/lib-md.c b/lnet/lnet/lib-md.c index 2236980..b7b0939 100644 --- a/lnet/lnet/lib-md.c +++ b/lnet/lnet/lib-md.c @@ -23,7 +23,7 @@ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2012, 2013, Intel Corporation. + * Copyright (c) 2012, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -40,11 +40,11 @@ /* must be called with lnet_res_lock held */ void -lnet_md_unlink(lnet_libmd_t *md) +lnet_md_unlink(struct lnet_libmd *md) { if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) { /* first unlink attempt... */ - lnet_me_t *me = md->md_me; + struct lnet_me *me = md->md_me; md->md_flags |= LNET_MD_FLAG_ZOMBIE; @@ -80,36 +80,117 @@ lnet_md_unlink(lnet_libmd_t *md) lnet_md_free(md); } +struct page * +lnet_kvaddr_to_page(unsigned long vaddr) +{ + if (is_vmalloc_addr((void *)vaddr)) + return vmalloc_to_page((void *)vaddr); + +#ifdef CONFIG_HIGHMEM + +#ifdef HAVE_KMAP_TO_PAGE + /* + * This ifdef is added to handle the kernel versions + * which have kmap_to_page() function exported. If so, + * we should use it. Otherwise, remain with the legacy check. + */ + return kmap_to_page((void *)vaddr); +#else + + if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { + /* No highmem pages only used for bulk (kiov) I/O */ + CERROR("find page for address in highmem\n"); + LBUG(); + } + return virt_to_page(vaddr); +#endif /* HAVE_KMAP_TO_PAGE */ +#else + + return virt_to_page(vaddr); +#endif /* CONFIG_HIGHMEM */ +} +EXPORT_SYMBOL(lnet_kvaddr_to_page); + int -lnet_cpt_of_md(lnet_libmd_t *md) +lnet_cpt_of_md(struct lnet_libmd *md, unsigned int offset) { int cpt = CFS_CPT_ANY; + unsigned int niov; - if (!md) - return CFS_CPT_ANY; - - if ((md->md_options & LNET_MD_BULK_HANDLE) != 0 && - !LNetHandleIsInvalid(md->md_bulk_handle)) { + /* + * if the md_options has a bulk handle then we want to look at the + * bulk md because that's the data which we will be DMAing + */ + if (md && (md->md_options & LNET_MD_BULK_HANDLE) != 0 && + !LNetMDHandleIsInvalid(md->md_bulk_handle)) md = lnet_handle2md(&md->md_bulk_handle); - if (!md) - return CFS_CPT_ANY; - } + if (!md || md->md_niov == 0) + return CFS_CPT_ANY; + niov = md->md_niov; + + /* + * There are three cases to handle: + * 1. The MD is using struct bio_vec + * 2. The MD is using struct kvec + * 3. Contiguous buffer allocated via vmalloc + * + * in case 2 we can use virt_to_page() macro to get the page + * address of the memory kvec describes. + * + * in case 3 use is_vmalloc_addr() and vmalloc_to_page() + * + * The offset provided can be within the first iov/kiov entry or + * it could go beyond it. In that case we need to make sure to + * look at the page which actually contains the data that will be + * DMAed. + */ if ((md->md_options & LNET_MD_KIOV) != 0) { - if (md->md_iov.kiov[0].kiov_page != NULL) - cpt = cfs_cpt_of_node(lnet_cpt_table(), - page_to_nid(md->md_iov.kiov[0].kiov_page)); - } else if (md->md_iov.iov[0].iov_base != NULL) { + struct bio_vec *kiov = md->md_iov.kiov; + + while (offset >= kiov->bv_len) { + offset -= kiov->bv_len; + niov--; + kiov++; + if (niov == 0) { + CERROR("offset %d goes beyond kiov\n", offset); + goto out; + } + } + cpt = cfs_cpt_of_node(lnet_cpt_table(), - page_to_nid(virt_to_page(md->md_iov.iov[0].iov_base))); + page_to_nid(kiov->bv_page)); + } else { + struct kvec *iov = md->md_iov.iov; + unsigned long vaddr; + struct page *page; + + while (offset >= iov->iov_len) { + offset -= iov->iov_len; + niov--; + iov++; + if (niov == 0) { + CERROR("offset %d goes beyond iov\n", offset); + goto out; + } + } + + vaddr = ((unsigned long)iov->iov_base) + offset; + page = lnet_kvaddr_to_page(vaddr); + if (!page) { + CERROR("Couldn't resolve vaddr 0x%lx to page\n", vaddr); + goto out; + } + cpt = cfs_cpt_of_node(lnet_cpt_table(), page_to_nid(page)); } +out: return cpt; } static int -lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink) +lnet_md_build(struct lnet_libmd *lmd, struct lnet_md *umd, int unlink) { int i; unsigned int niov; @@ -158,11 +239,11 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink) for (i = 0; i < (int)niov; i++) { /* We take the page pointer on trust */ - if (lmd->md_iov.kiov[i].kiov_offset + - lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE) + if (lmd->md_iov.kiov[i].bv_offset + + lmd->md_iov.kiov[i].bv_len > PAGE_SIZE) return -EINVAL; /* invalid length */ - total_length += lmd->md_iov.kiov[i].kiov_len; + total_length += lmd->md_iov.kiov[i].bv_len; } lmd->md_length = total_length; @@ -188,7 +269,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink) /* must be called with resource lock held */ static int -lnet_md_link(lnet_libmd_t *md, lnet_handle_eq_t eq_handle, int cpt) +lnet_md_link(struct lnet_libmd *md, struct lnet_eq *eq, int cpt) { struct lnet_res_container *container = the_lnet.ln_md_containers[cpt]; @@ -204,12 +285,8 @@ lnet_md_link(lnet_libmd_t *md, lnet_handle_eq_t eq_handle, int cpt) * maybe there we shouldn't even allow LNET_EQ_NONE!) * LASSERT (eq == NULL); */ - if (!LNetHandleIsInvalid(eq_handle)) { - md->md_eq = lnet_handle2eq(&eq_handle); - - if (md->md_eq == NULL) - return -ENOENT; - + if (eq) { + md->md_eq = eq; (*md->md_eq->eq_refs[cpt])++; } @@ -223,7 +300,7 @@ lnet_md_link(lnet_libmd_t *md, lnet_handle_eq_t eq_handle, int cpt) /* must be called with lnet_res_lock held */ void -lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd) +lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_md *umd) { /* NB this doesn't copy out all the iov entries so when a * discontiguous MD is copied out, the target gets to know the @@ -237,11 +314,10 @@ lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd) umd->max_size = lmd->md_max_size; umd->options = lmd->md_options; umd->user_ptr = lmd->md_user_ptr; - lnet_eq2handle(&umd->eq_handle, lmd->md_eq); } static int -lnet_md_validate(lnet_md_t *umd) +lnet_md_validate(struct lnet_md *umd) { if (umd->start == NULL && umd->length != 0) { CERROR("MD start pointer can not be NULL with length %u\n", @@ -262,7 +338,7 @@ lnet_md_validate(lnet_md_t *umd) /** * Create a memory descriptor and attach it to a ME * - * \param meh A handle for a ME to associate the new MD with. + * \param me An ME to associate the new MD with. * \param umd Provides initial values for the user-visible parts of a MD. * Other than its use for initialization, there is no linkage between this * structure and the MD maintained by the LNet. @@ -285,12 +361,11 @@ lnet_md_validate(lnet_md_t *umd) * a MD. */ int -LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd, - lnet_unlink_t unlink, lnet_handle_md_t *handle) +LNetMDAttach(struct lnet_me *me, struct lnet_md umd, + enum lnet_unlink unlink, struct lnet_handle_md *handle) { - struct list_head matches = LIST_HEAD_INIT(matches); - struct list_head drops = LIST_HEAD_INIT(drops); - struct lnet_me *me; + LIST_HEAD(matches); + LIST_HEAD(drops); struct lnet_libmd *md; int cpt; int rc; @@ -313,14 +388,11 @@ LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd, if (rc != 0) goto out_free; - cpt = lnet_cpt_of_cookie(meh.cookie); + cpt = me->me_cpt; lnet_res_lock(cpt); - me = lnet_handle2me(&meh); - if (me == NULL) - rc = -ENOENT; - else if (me->me_md != NULL) + if (me->me_md) rc = -EBUSY; else rc = lnet_md_link(md, umd.eq_handle, cpt); @@ -366,9 +438,10 @@ EXPORT_SYMBOL(LNetMDAttach); * LNetInvalidateHandle() on it. */ int -LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle) +LNetMDBind(struct lnet_md umd, enum lnet_unlink unlink, + struct lnet_handle_md *handle) { - lnet_libmd_t *md; + struct lnet_libmd *md; int cpt; int rc; @@ -390,6 +463,13 @@ LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle) if (rc != 0) goto out_free; + if (md->md_length > LNET_MTU) { + CERROR("Invalid length: too big transfer size %u, %d max\n", + md->md_length, LNET_MTU); + rc = -EINVAL; + goto out_free; + } + cpt = lnet_res_lock_current(); rc = lnet_md_link(md, umd.eq_handle, cpt); @@ -441,11 +521,11 @@ EXPORT_SYMBOL(LNetMDBind); * \retval -ENOENT If \a mdh does not point to a valid MD object. */ int -LNetMDUnlink (lnet_handle_md_t mdh) +LNetMDUnlink(struct lnet_handle_md mdh) { - lnet_event_t ev; - lnet_libmd_t *md; - int cpt; + struct lnet_event ev; + struct lnet_libmd *md; + int cpt; LASSERT(the_lnet.ln_refcount > 0); @@ -467,6 +547,9 @@ LNetMDUnlink (lnet_handle_md_t mdh) lnet_eq_enqueue_event(md->md_eq, &ev); } + if (md->md_rspt_ptr != NULL) + lnet_detach_rsp_tracker(md, cpt); + lnet_md_unlink(md); lnet_res_unlock(cpt);