- if ((md->md_flags & PTL_MD_FLAG_ZOMBIE) == 0) {
- /* first unlink attempt... */
- lib_me_t *me = md->me;
-
- md->md_flags |= PTL_MD_FLAG_ZOMBIE;
-
- /* Disassociate from ME (if any), and unlink it if it was created
- * with PTL_UNLINK */
- if (me != NULL) {
- me->md = NULL;
- if (me->unlink == PTL_UNLINK)
- lib_me_unlink(nal, me);
- }
-
- /* emsure all future handle lookups fail */
- lib_invalidate_handle(nal, &md->md_lh);
- }
-
- if (md->pending != 0) {
- CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
- return;
- }
-
- CDEBUG(D_NET, "Unlinking md %p\n", md);
-
- if ((md->options & PTL_MD_KIOV) != 0) {
- if (nal->libnal_unmap_pages != NULL)
- nal->libnal_unmap_pages (nal,
- md->md_niov,
- md->md_iov.kiov,
- &md->md_addrkey);
- } else if (nal->libnal_unmap != NULL) {
- nal->libnal_unmap (nal,
- md->md_niov, md->md_iov.iov,
- &md->md_addrkey);
- }
-
- if (md->eq != NULL) {
- md->eq->eq_refcount--;
- LASSERT (md->eq->eq_refcount >= 0);
- }
-
- list_del (&md->md_list);
- lib_md_free(nal, md);
+ if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) {
+ /* first unlink attempt... */
+ struct lnet_me *me = md->md_me;
+
+ md->md_flags |= LNET_MD_FLAG_ZOMBIE;
+
+ /* Disassociate from ME (if any), and unlink it if it was created
+ * with LNET_UNLINK */
+ if (me != NULL) {
+ /* detach MD from portal */
+ lnet_ptl_detach_md(me, md);
+ if (me->me_unlink == LNET_UNLINK)
+ lnet_me_unlink(me);
+ }
+
+ /* ensure all future handle lookups fail */
+ lnet_res_lh_invalidate(&md->md_lh);
+ }
+
+ if (md->md_refcount != 0) {
+ CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
+ return;
+ }
+
+ CDEBUG(D_NET, "Unlinking md %p\n", md);
+
+ if (md->md_eq != NULL) {
+ int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
+
+ LASSERT(*md->md_eq->eq_refs[cpt] > 0);
+ (*md->md_eq->eq_refs[cpt])--;
+ }
+
+ LASSERT(!list_empty(&md->md_list));
+ list_del_init(&md->md_list);
+ lnet_md_free(md);
+}
+
+struct page *
+lnet_kvaddr_to_page(unsigned long vaddr)
+{
+ if (is_vmalloc_addr((void *)vaddr))
+ return vmalloc_to_page((void *)vaddr);
+
+#ifdef CONFIG_HIGHMEM
+
+#ifdef HAVE_KMAP_TO_PAGE
+ /*
+ * This ifdef is added to handle the kernel versions
+ * which have kmap_to_page() function exported. If so,
+ * we should use it. Otherwise, remain with the legacy check.
+ */
+ return kmap_to_page((void *)vaddr);
+#else
+
+ if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
+ /* No highmem pages only used for bulk (kiov) I/O */
+ CERROR("find page for address in highmem\n");
+ LBUG();
+ }
+ return virt_to_page(vaddr);
+#endif /* HAVE_KMAP_TO_PAGE */
+#else
+
+ return virt_to_page(vaddr);
+#endif /* CONFIG_HIGHMEM */
+}
+EXPORT_SYMBOL(lnet_kvaddr_to_page);
+
+int
+lnet_cpt_of_md(struct lnet_libmd *md, unsigned int offset)
+{
+ int cpt = CFS_CPT_ANY;
+ unsigned int niov;
+
+ /*
+ * if the md_options has a bulk handle then we want to look at the
+ * bulk md because that's the data which we will be DMAing
+ */
+ if (md && (md->md_options & LNET_MD_BULK_HANDLE) != 0 &&
+ !LNetMDHandleIsInvalid(md->md_bulk_handle))
+ md = lnet_handle2md(&md->md_bulk_handle);
+
+ if (!md || md->md_niov == 0)
+ return CFS_CPT_ANY;
+
+ niov = md->md_niov;
+
+ /*
+ * There are three cases to handle:
+ * 1. The MD is using lnet_kiov_t
+ * 2. The MD is using struct kvec
+ * 3. Contiguous buffer allocated via vmalloc
+ *
+ * in case 2 we can use virt_to_page() macro to get the page
+ * address of the memory kvec describes.
+ *
+ * in case 3 use is_vmalloc_addr() and vmalloc_to_page()
+ *
+ * The offset provided can be within the first iov/kiov entry or
+ * it could go beyond it. In that case we need to make sure to
+ * look at the page which actually contains the data that will be
+ * DMAed.
+ */
+ if ((md->md_options & LNET_MD_KIOV) != 0) {
+ lnet_kiov_t *kiov = md->md_iov.kiov;
+
+ while (offset >= kiov->kiov_len) {
+ offset -= kiov->kiov_len;
+ niov--;
+ kiov++;
+ if (niov == 0) {
+ CERROR("offset %d goes beyond kiov\n", offset);
+ goto out;
+ }
+ }
+
+ cpt = cfs_cpt_of_node(lnet_cpt_table(),
+ page_to_nid(kiov->kiov_page));
+ } else {
+ struct kvec *iov = md->md_iov.iov;
+ unsigned long vaddr;
+ struct page *page;
+
+ while (offset >= iov->iov_len) {
+ offset -= iov->iov_len;
+ niov--;
+ iov++;
+ if (niov == 0) {
+ CERROR("offset %d goes beyond iov\n", offset);
+ goto out;
+ }
+ }
+
+ vaddr = ((unsigned long)iov->iov_base) + offset;
+ page = lnet_kvaddr_to_page(vaddr);
+ if (!page) {
+ CERROR("Couldn't resolve vaddr 0x%lx to page\n", vaddr);
+ goto out;
+ }
+ cpt = cfs_cpt_of_node(lnet_cpt_table(), page_to_nid(page));
+ }
+
+out:
+ return cpt;