struct kib_fmr_pool *fpo)
{
struct ib_fmr_pool_param param = {
- .max_pages_per_fmr = LNET_MAX_IOV,
+ .max_pages_per_fmr = IBLND_MAX_RDMA_FRAGS,
.page_shift = PAGE_SHIFT,
.access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE),
#ifndef HAVE_IB_MAP_MR_SG
frd->frd_frpl = ib_alloc_fast_reg_page_list(fpo->fpo_hdev->ibh_ibdev,
- LNET_MAX_IOV);
+ IBLND_MAX_RDMA_FRAGS);
if (IS_ERR(frd->frd_frpl)) {
rc = PTR_ERR(frd->frd_frpl);
CERROR("Failed to allocate ib_fast_reg_page_list: %d\n",
#ifdef HAVE_IB_ALLOC_FAST_REG_MR
frd->frd_mr = ib_alloc_fast_reg_mr(fpo->fpo_hdev->ibh_pd,
- LNET_MAX_IOV);
+ IBLND_MAX_RDMA_FRAGS);
#else
/*
* it is expected to get here if this is an MLX-5 card.
#else
IB_MR_TYPE_MEM_REG,
#endif
- LNET_MAX_IOV);
+ IBLND_MAX_RDMA_FRAGS);
if ((*kiblnd_tunables.kib_use_fastreg_gaps == 1) &&
(dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT))
CWARN("using IB_MR_TYPE_SG_GAPS, expect a performance drop\n");
CFS_FREE_PTR_ARRAY(tx->tx_pages, LNET_MAX_IOV);
if (tx->tx_frags != NULL)
CFS_FREE_PTR_ARRAY(tx->tx_frags,
- (1 + IBLND_MAX_RDMA_FRAGS));
+ IBLND_MAX_RDMA_FRAGS);
if (tx->tx_wrq != NULL)
CFS_FREE_PTR_ARRAY(tx->tx_wrq,
- (1 + IBLND_MAX_RDMA_FRAGS));
+ IBLND_MAX_RDMA_FRAGS);
if (tx->tx_sge != NULL)
CFS_FREE_PTR_ARRAY(tx->tx_sge,
- (1 + IBLND_MAX_RDMA_FRAGS) *
+ IBLND_MAX_RDMA_FRAGS *
wrq_sge);
if (tx->tx_rd != NULL)
LIBCFS_FREE(tx->tx_rd,
}
LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
- (1 + IBLND_MAX_RDMA_FRAGS) *
+ IBLND_MAX_RDMA_FRAGS *
sizeof(*tx->tx_frags));
if (tx->tx_frags == NULL)
break;
- sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1);
+ sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
- (1 + IBLND_MAX_RDMA_FRAGS) *
+ IBLND_MAX_RDMA_FRAGS *
sizeof(*tx->tx_wrq));
if (tx->tx_wrq == NULL)
break;
LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
- (1 + IBLND_MAX_RDMA_FRAGS) * wrq_sge *
+ IBLND_MAX_RDMA_FRAGS * wrq_sge *
sizeof(*tx->tx_sge));
if (tx->tx_sge == NULL)
break;