list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
frd_list) {
list_del(&frd->frd_list);
+#ifndef HAVE_IB_MAP_MR_SG
ib_free_fast_reg_page_list(frd->frd_frpl);
+#endif
ib_dereg_mr(frd->frd_mr);
LIBCFS_FREE(frd, sizeof(*frd));
i++;
}
frd->frd_mr = NULL;
+#ifndef HAVE_IB_MAP_MR_SG
frd->frd_frpl = ib_alloc_fast_reg_page_list(fpo->fpo_hdev->ibh_ibdev,
LNET_MAX_PAYLOAD/PAGE_SIZE);
if (IS_ERR(frd->frd_frpl)) {
rc = PTR_ERR(frd->frd_frpl);
CERROR("Failed to allocate ib_fast_reg_page_list: %d\n",
rc);
+ frd->frd_frpl = NULL;
goto out_middle;
}
+#endif
+#ifdef HAVE_IB_ALLOC_FAST_REG_MR
frd->frd_mr = ib_alloc_fast_reg_mr(fpo->fpo_hdev->ibh_pd,
LNET_MAX_PAYLOAD/PAGE_SIZE);
+#else
+ frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd,
+ IB_MR_TYPE_MEM_REG,
+ LNET_MAX_PAYLOAD/PAGE_SIZE);
+#endif
if (IS_ERR(frd->frd_mr)) {
rc = PTR_ERR(frd->frd_mr);
CERROR("Failed to allocate ib_fast_reg_mr: %d\n", rc);
+ frd->frd_mr = NULL;
goto out_middle;
}
out_middle:
if (frd->frd_mr)
ib_dereg_mr(frd->frd_mr);
+#ifndef HAVE_IB_MAP_MR_SG
if (frd->frd_frpl)
ib_free_fast_reg_page_list(frd->frd_frpl);
+#endif
LIBCFS_FREE(frd, sizeof(*frd));
out:
list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
frd_list) {
list_del(&frd->frd_list);
+#ifndef HAVE_IB_MAP_MR_SG
ib_free_fast_reg_page_list(frd->frd_frpl);
+#endif
ib_dereg_mr(frd->frd_mr);
LIBCFS_FREE(frd, sizeof(*frd));
}
return cfs_time_aftereq(now, fpo->fpo_deadline);
}
+static int
+kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd)
+{
+ kib_hca_dev_t *hdev;
+ __u64 *pages = tx->tx_pages;
+ int npages;
+ int size;
+ int i;
+
+ hdev = tx->tx_pool->tpo_hdev;
+
+ for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
+ for (size = 0; size < rd->rd_frags[i].rf_nob;
+ size += hdev->ibh_page_size) {
+ pages[npages++] = (rd->rd_frags[i].rf_addr &
+ hdev->ibh_page_mask) + size;
+ }
+ }
+
+ return npages;
+}
+
void
kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
{
}
int
-kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
- __u32 nob, __u64 iov, bool is_rx, kib_fmr_t *fmr)
+kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx, kib_rdma_desc_t *rd,
+ __u32 nob, __u64 iov, kib_fmr_t *fmr)
{
kib_fmr_pool_t *fpo;
+ __u64 *pages = tx->tx_pages;
__u64 version;
+ bool is_rx = (rd != tx->tx_rd);
+ bool tx_pages_mapped = 0;
+ int npages = 0;
int rc;
again:
struct ib_pool_fmr *pfmr;
spin_unlock(&fps->fps_lock);
+
+ if (!tx_pages_mapped) {
+ npages = kiblnd_map_tx_pages(tx, rd);
+ tx_pages_mapped = 1;
+ }
+
pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool,
pages, npages, iov);
if (likely(!IS_ERR(pfmr))) {
rc = PTR_ERR(pfmr);
} else {
if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
- struct ib_send_wr *wr;
struct kib_fast_reg_descriptor *frd;
+#ifdef HAVE_IB_MAP_MR_SG
+ struct ib_reg_wr *wr;
+ int n;
+#else
+ struct ib_rdma_wr *wr;
struct ib_fast_reg_page_list *frpl;
+#endif
struct ib_mr *mr;
frd = list_first_entry(&fpo->fast_reg.fpo_pool_list,
list_del(&frd->frd_list);
spin_unlock(&fps->fps_lock);
+#ifndef HAVE_IB_MAP_MR_SG
frpl = frd->frd_frpl;
+#endif
mr = frd->frd_mr;
if (!frd->frd_valid) {
- struct ib_send_wr *inv_wr;
+ struct ib_rdma_wr *inv_wr;
__u32 key = is_rx ? mr->rkey : mr->lkey;
inv_wr = &frd->frd_inv_wr;
memset(inv_wr, 0, sizeof(*inv_wr));
- inv_wr->opcode = IB_WR_LOCAL_INV;
- inv_wr->wr_id = IBLND_WID_MR;
- inv_wr->ex.invalidate_rkey = key;
+
+ inv_wr->wr.opcode = IB_WR_LOCAL_INV;
+ inv_wr->wr.wr_id = IBLND_WID_MR;
+ inv_wr->wr.ex.invalidate_rkey = key;
/* Bump the key */
key = ib_inc_rkey(key);
ib_update_fast_reg_key(mr, key);
}
+#ifdef HAVE_IB_MAP_MR_SG
+ n = ib_map_mr_sg(mr, tx->tx_frags,
+ tx->tx_nfrags, PAGE_SIZE);
+ if (unlikely(n != tx->tx_nfrags)) {
+ CERROR("Failed to map mr %d/%d "
+ "elements\n", n, tx->tx_nfrags);
+ return n < 0 ? n : -EINVAL;
+ }
+
+ mr->iova = iov;
+
+ wr = &frd->frd_fastreg_wr;
+ memset(wr, 0, sizeof(*wr));
+
+ wr->wr.opcode = IB_WR_REG_MR;
+ wr->wr.wr_id = IBLND_WID_MR;
+ wr->wr.num_sge = 0;
+ wr->wr.send_flags = 0;
+ wr->mr = mr;
+ wr->key = is_rx ? mr->rkey : mr->lkey;
+ wr->access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+#else
+ if (!tx_pages_mapped) {
+ npages = kiblnd_map_tx_pages(tx, rd);
+ tx_pages_mapped = 1;
+ }
+
LASSERT(npages <= frpl->max_page_list_len);
memcpy(frpl->page_list, pages,
sizeof(*pages) * npages);
/* Prepare FastReg WR */
wr = &frd->frd_fastreg_wr;
memset(wr, 0, sizeof(*wr));
- wr->opcode = IB_WR_FAST_REG_MR;
- wr->wr_id = IBLND_WID_MR;
- wr->wr.fast_reg.iova_start = iov;
- wr->wr.fast_reg.page_list = frpl;
- wr->wr.fast_reg.page_list_len = npages;
- wr->wr.fast_reg.page_shift = PAGE_SHIFT;
- wr->wr.fast_reg.length = nob;
- wr->wr.fast_reg.rkey = is_rx ? mr->rkey
- : mr->lkey;
- wr->wr.fast_reg.access_flags =
+
+ wr->wr.opcode = IB_WR_FAST_REG_MR;
+ wr->wr.wr_id = IBLND_WID_MR;
+
+ wr->wr.wr.fast_reg.iova_start = iov;
+ wr->wr.wr.fast_reg.page_list = frpl;
+ wr->wr.wr.fast_reg.page_list_len = npages;
+ wr->wr.wr.fast_reg.page_shift = PAGE_SHIFT;
+ wr->wr.wr.fast_reg.length = nob;
+ wr->wr.wr.fast_reg.rkey =
+ is_rx ? mr->rkey : mr->lkey;
+ wr->wr.wr.fast_reg.access_flags =
(IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE);
+#endif
fmr->fmr_key = is_rx ? mr->rkey : mr->lkey;
fmr->fmr_frd = frd;
static void __exit ko2iblnd_exit(void)
{
lnet_unregister_lnd(&the_o2iblnd);
- kiblnd_tunables_fini();
}
static int __init ko2iblnd_init(void)