+ conn->ibc_rx_pages->ibp_device = NULL;
+
+ kiblnd_free_pages(conn->ibc_rx_pages);
+
+ conn->ibc_rx_pages = NULL;
+}
+
+void
+kiblnd_map_rx_descs(kib_conn_t *conn)
+{
+ kib_rx_t *rx;
+ struct page *pg;
+ int pg_off;
+ int ipg;
+ int i;
+
+ for (pg_off = ipg = i = 0;
+ i < IBLND_RX_MSGS(conn->ibc_version); i++) {
+ pg = conn->ibc_rx_pages->ibp_pages[ipg];
+ rx = &conn->ibc_rxs[i];
+
+ rx->rx_conn = conn;
+ rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
+
+ rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_cmid->device,
+ rx->rx_msg, IBLND_MSG_SIZE,
+ DMA_FROM_DEVICE);
+ LASSERT (!kiblnd_dma_mapping_error(conn->ibc_cmid->device,
+ rx->rx_msgaddr));
+ KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
+
+ CDEBUG(D_NET,"rx %d: %p "LPX64"("LPX64")\n",
+ i, rx->rx_msg, rx->rx_msgaddr,
+ lnet_page2phys(pg) + pg_off);
+
+ pg_off += IBLND_MSG_SIZE;
+ LASSERT (pg_off <= PAGE_SIZE);
+
+ if (pg_off == PAGE_SIZE) {
+ pg_off = 0;
+ ipg++;
+ LASSERT (ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version));
+ }
+ }
+
+ conn->ibc_rx_pages->ibp_device = conn->ibc_cmid->device;
+}
+
+void
+kiblnd_unmap_tx_descs(lnet_ni_t *ni)
+{
+ int i;
+ kib_tx_t *tx;
+ kib_net_t *net = ni->ni_data;
+
+ LASSERT (net->ibn_tx_pages != NULL);
+ LASSERT (net->ibn_tx_pages->ibp_device != NULL);
+
+ for (i = 0; i < IBLND_TX_MSGS(); i++) {
+ tx = &net->ibn_tx_descs[i];
+
+ kiblnd_dma_unmap_single(net->ibn_tx_pages->ibp_device,
+ KIBLND_UNMAP_ADDR(tx, tx_msgunmap,
+ tx->tx_msgaddr),
+ IBLND_MSG_SIZE, DMA_TO_DEVICE);
+ }
+
+ net->ibn_tx_pages->ibp_device = NULL;
+}
+
+void
+kiblnd_map_tx_descs (lnet_ni_t *ni)
+{
+ kib_net_t *net = ni->ni_data;
+ struct page *page;
+ kib_tx_t *tx;
+ int page_offset;
+ int ipage;
+ int i;
+
+ LASSERT (net != NULL);
+
+ /* pre-mapped messages are not bigger than 1 page */
+ CLASSERT (IBLND_MSG_SIZE <= PAGE_SIZE);
+
+ /* No fancy arithmetic when we do the buffer calculations */
+ CLASSERT (PAGE_SIZE % IBLND_MSG_SIZE == 0);
+
+
+ for (ipage = page_offset = i = 0; i < IBLND_TX_MSGS(); i++) {
+ page = net->ibn_tx_pages->ibp_pages[ipage];
+ tx = &net->ibn_tx_descs[i];
+
+ tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
+ page_offset);
+
+ tx->tx_msgaddr = kiblnd_dma_map_single(
+ net->ibn_dev->ibd_cmid->device,
+ tx->tx_msg, IBLND_MSG_SIZE, DMA_TO_DEVICE);
+ LASSERT (!kiblnd_dma_mapping_error(net->ibn_dev->ibd_cmid->device,
+ tx->tx_msgaddr));
+ KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
+
+ list_add(&tx->tx_list, &net->ibn_idle_txs);
+
+ page_offset += IBLND_MSG_SIZE;
+ LASSERT (page_offset <= PAGE_SIZE);
+
+ if (page_offset == PAGE_SIZE) {
+ page_offset = 0;
+ ipage++;
+ LASSERT (ipage <= IBLND_TX_MSG_PAGES());
+ }
+ }
+
+ net->ibn_tx_pages->ibp_device = net->ibn_dev->ibd_cmid->device;
+}
+
+void
+kiblnd_free_tx_descs (lnet_ni_t *ni)
+{
+ int i;
+ kib_net_t *net = ni->ni_data;
+
+ LASSERT (net != NULL);
+
+ if (net->ibn_tx_pages != NULL)
+ kiblnd_free_pages(net->ibn_tx_pages);
+
+ if (net->ibn_tx_descs == NULL)
+ return;
+
+ for (i = 0; i < IBLND_TX_MSGS(); i++) {
+ kib_tx_t *tx = &net->ibn_tx_descs[i];
+
+ if (tx->tx_pages != NULL)
+ LIBCFS_FREE(tx->tx_pages,
+ LNET_MAX_IOV *
+ sizeof(*tx->tx_pages));
+
+ if (tx->tx_ipb != NULL)
+ LIBCFS_FREE(tx->tx_ipb,
+ IBLND_MAX_RDMA_FRAGS *
+ sizeof(*tx->tx_ipb));
+
+ if (tx->tx_frags != NULL)
+ LIBCFS_FREE(tx->tx_frags,
+ IBLND_MAX_RDMA_FRAGS *
+ sizeof(*tx->tx_frags));
+
+ if (tx->tx_wrq != NULL)
+ LIBCFS_FREE(tx->tx_wrq,
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_wrq));
+
+ if (tx->tx_sge != NULL)
+ LIBCFS_FREE(tx->tx_sge,
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_sge));
+
+ if (tx->tx_rd != NULL)
+ LIBCFS_FREE(tx->tx_rd,
+ offsetof(kib_rdma_desc_t,
+ rd_frags[IBLND_MAX_RDMA_FRAGS]));
+ }
+
+ LIBCFS_FREE(net->ibn_tx_descs,
+ IBLND_TX_MSGS() * sizeof(kib_tx_t));