*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2013, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
/* must be called with lnet_res_lock held */
void
-lnet_md_unlink(lnet_libmd_t *md)
+lnet_md_unlink(struct lnet_libmd *md)
{
if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) {
/* first unlink attempt... */
- lnet_me_t *me = md->md_me;
+ struct lnet_me *me = md->md_me;
md->md_flags |= LNET_MD_FLAG_ZOMBIE;
lnet_md_free(md);
}
+struct page *
+lnet_kvaddr_to_page(unsigned long vaddr)
+{
+ if (is_vmalloc_addr((void *)vaddr))
+ return vmalloc_to_page((void *)vaddr);
+
+#ifdef CONFIG_HIGHMEM
+
+#ifdef HAVE_KMAP_TO_PAGE
+ /*
+ * This ifdef is added to handle the kernel versions
+ * which have kmap_to_page() function exported. If so,
+ * we should use it. Otherwise, remain with the legacy check.
+ */
+ return kmap_to_page((void *)vaddr);
+#else
+
+ if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
+ /* No highmem pages only used for bulk (kiov) I/O */
+ CERROR("find page for address in highmem\n");
+ LBUG();
+ }
+ return virt_to_page(vaddr);
+#endif /* HAVE_KMAP_TO_PAGE */
+#else
+
+ return virt_to_page(vaddr);
+#endif /* CONFIG_HIGHMEM */
+}
+EXPORT_SYMBOL(lnet_kvaddr_to_page);
+
+int
+lnet_cpt_of_md(struct lnet_libmd *md, unsigned int offset)
+{
+ int cpt = CFS_CPT_ANY;
+ unsigned int niov;
+
+ /*
+ * if the md_options has a bulk handle then we want to look at the
+ * bulk md because that's the data which we will be DMAing
+ */
+ if (md && (md->md_options & LNET_MD_BULK_HANDLE) != 0 &&
+ !LNetMDHandleIsInvalid(md->md_bulk_handle))
+ md = lnet_handle2md(&md->md_bulk_handle);
+
+ if (!md || md->md_niov == 0)
+ return CFS_CPT_ANY;
+
+ niov = md->md_niov;
+
+ /*
+ * There are three cases to handle:
+ * 1. The MD is using lnet_kiov_t
+ * 2. The MD is using struct kvec
+ * 3. Contiguous buffer allocated via vmalloc
+ *
+ * in case 2 we can use virt_to_page() macro to get the page
+ * address of the memory kvec describes.
+ *
+ * in case 3 use is_vmalloc_addr() and vmalloc_to_page()
+ *
+ * The offset provided can be within the first iov/kiov entry or
+ * it could go beyond it. In that case we need to make sure to
+ * look at the page which actually contains the data that will be
+ * DMAed.
+ */
+ if ((md->md_options & LNET_MD_KIOV) != 0) {
+ lnet_kiov_t *kiov = md->md_iov.kiov;
+
+ while (offset >= kiov->kiov_len) {
+ offset -= kiov->kiov_len;
+ niov--;
+ kiov++;
+ if (niov == 0) {
+ CERROR("offset %d goes beyond kiov\n", offset);
+ goto out;
+ }
+ }
+
+ cpt = cfs_cpt_of_node(lnet_cpt_table(),
+ page_to_nid(kiov->kiov_page));
+ } else {
+ struct kvec *iov = md->md_iov.iov;
+ unsigned long vaddr;
+ struct page *page;
+
+ while (offset >= iov->iov_len) {
+ offset -= iov->iov_len;
+ niov--;
+ iov++;
+ if (niov == 0) {
+ CERROR("offset %d goes beyond iov\n", offset);
+ goto out;
+ }
+ }
+
+ vaddr = ((unsigned long)iov->iov_base) + offset;
+ page = lnet_kvaddr_to_page(vaddr);
+ if (!page) {
+ CERROR("Couldn't resolve vaddr 0x%lx to page\n", vaddr);
+ goto out;
+ }
+ cpt = cfs_cpt_of_node(lnet_cpt_table(), page_to_nid(page));
+ }
+
+out:
+ return cpt;
+}
+
static int
-lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
+lnet_md_build(struct lnet_libmd *lmd, struct lnet_md *umd, int unlink)
{
int i;
unsigned int niov;
lmd->md_threshold = umd->threshold;
lmd->md_refcount = 0;
lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0;
+ lmd->md_bulk_handle = umd->bulk_handle;
if ((umd->options & LNET_MD_IOVEC) != 0) {
/* must be called with resource lock held */
static int
-lnet_md_link(lnet_libmd_t *md, lnet_handle_eq_t eq_handle, int cpt)
+lnet_md_link(struct lnet_libmd *md, struct lnet_handle_eq eq_handle, int cpt)
{
struct lnet_res_container *container = the_lnet.ln_md_containers[cpt];
* maybe there we shouldn't even allow LNET_EQ_NONE!)
* LASSERT (eq == NULL);
*/
- if (!LNetHandleIsInvalid(eq_handle)) {
+ if (!LNetEQHandleIsInvalid(eq_handle)) {
md->md_eq = lnet_handle2eq(&eq_handle);
if (md->md_eq == NULL)
/* must be called with lnet_res_lock held */
void
-lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd)
+lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_md *umd)
{
/* NB this doesn't copy out all the iov entries so when a
* discontiguous MD is copied out, the target gets to know the
}
static int
-lnet_md_validate(lnet_md_t *umd)
+lnet_md_validate(struct lnet_md *umd)
{
if (umd->start == NULL && umd->length != 0) {
CERROR("MD start pointer can not be NULL with length %u\n",
CERROR("Invalid option: too many fragments %u, %d max\n",
umd->length, LNET_MAX_IOV);
return -EINVAL;
+ } else if (umd->length > LNET_MTU) {
+ CERROR("Invalid length: too big fragment size %u, %d max\n",
+ umd->length, LNET_MTU);
+ return -EINVAL;
}
return 0;
* a MD.
*/
int
-LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
- lnet_unlink_t unlink, lnet_handle_md_t *handle)
+LNetMDAttach(struct lnet_handle_me meh, struct lnet_md umd,
+ enum lnet_unlink unlink, struct lnet_handle_md *handle)
{
- struct list_head matches = LIST_HEAD_INIT(matches);
- struct list_head drops = LIST_HEAD_INIT(drops);
+ LIST_HEAD(matches);
+ LIST_HEAD(drops);
struct lnet_me *me;
struct lnet_libmd *md;
int cpt;
* LNetInvalidateHandle() on it.
*/
int
-LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle)
+LNetMDBind(struct lnet_md umd, enum lnet_unlink unlink,
+ struct lnet_handle_md *handle)
{
- lnet_libmd_t *md;
+ struct lnet_libmd *md;
int cpt;
int rc;
* \retval -ENOENT If \a mdh does not point to a valid MD object.
*/
int
-LNetMDUnlink (lnet_handle_md_t mdh)
+LNetMDUnlink(struct lnet_handle_md mdh)
{
- lnet_event_t ev;
- lnet_libmd_t *md;
- int cpt;
+ struct lnet_event ev;
+ struct lnet_libmd *md;
+ int cpt;
LASSERT(the_lnet.ln_refcount > 0);