for O2IBPATH in $O2IBPATHS; do
AS_IF([test \( -f ${O2IBPATH}/include/rdma/rdma_cm.h -a \
-f ${O2IBPATH}/include/rdma/ib_cm.h -a \
- -f ${O2IBPATH}/include/rdma/ib_verbs.h -a \
- -f ${O2IBPATH}/include/rdma/ib_fmr_pool.h \)], [
+ -f ${O2IBPATH}/include/rdma/ib_verbs.h \)], [
o2ib_found=true
break
])
#include <rdma/rdma_cm.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_verbs.h>
- #include <rdma/ib_fmr_pool.h>
],[
struct rdma_cm_id *cm_idi __attribute__ ((unused));
struct rdma_conn_param conn_param __attribute__ ((unused));
struct ib_device_attr device_attr __attribute__ ((unused));
struct ib_qp_attr qp_attr __attribute__ ((unused));
- struct ib_pool_fmr pool_fmr __attribute__ ((unused));
enum ib_cm_rej_reason rej_reason __attribute__ ((unused));
rdma_destroy_id(NULL);
],[
[rdma_reject has 4 arguments])
])
+ # The FMR pool API was removed in Linux 5.8,
+ # commit 4e373d5417ecbb4f438a8500f0379a2fc29c2643
+ LB_CHECK_COMPILE([if FMR pools API available],
+ ib_fmr, [
+ #include <rdma/ib_verbs.h>
+ ],[
+ struct ib_fmr fmr = {};
+ ],[
+ AC_DEFINE(HAVE_FMR_POOL_API, 1,
+ [FMR pool API is available])
+ ])
+
EXTRA_CHECK_INCLUDE=""
AC_DEFUN([LN_CONFIG_O2IB_SRC], [])
AC_DEFUN([LN_CONFIG_O2IB_RESULTS], [])
{
LASSERT(fpo->fpo_map_count == 0);
+#ifdef HAVE_FMR_POOL_API
if (fpo->fpo_is_fmr && fpo->fmr.fpo_fmr_pool) {
ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool);
- } else {
+ } else
+#endif /* HAVE_FMR_POOL_API */
+ {
struct kib_fast_reg_descriptor *frd, *tmp;
int i = 0;
return max(IBLND_FMR_POOL_FLUSH, size);
}
+#ifdef HAVE_FMR_POOL_API
static int kiblnd_alloc_fmr_pool(struct kib_fmr_poolset *fps,
struct kib_fmr_pool *fpo)
{
return rc;
}
+#endif /* HAVE_FMR_POOL_API */
static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps,
struct kib_fmr_pool *fpo,
struct kib_fast_reg_descriptor *frd, *tmp;
int i, rc;
+#ifdef HAVE_FMR_POOL_API
fpo->fpo_is_fmr = false;
+#endif
INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
fpo->fast_reg.fpo_pool_size = 0;
fpo->fpo_hdev = kiblnd_current_hdev(dev);
+#ifdef HAVE_FMR_POOL_API
if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
rc = kiblnd_alloc_fmr_pool(fps, fpo);
else
+#endif /* HAVE_FMR_POOL_API */
rc = kiblnd_alloc_freg_pool(fps, fpo, dev->ibd_dev_caps);
if (rc)
goto out_fpo;
return now >= fpo->fpo_deadline;
}
+#if defined(HAVE_FMR_POOL_API) || !defined(HAVE_IB_MAP_MR_SG)
static int
kiblnd_map_tx_pages(struct kib_tx *tx, struct kib_rdma_desc *rd)
{
return npages;
}
+#endif
void
kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status)
struct kib_fmr_poolset *fps;
time64_t now = ktime_get_seconds();
struct kib_fmr_pool *tmp;
- int rc;
if (!fpo)
return;
fps = fpo->fpo_owner;
+
+#ifdef HAVE_FMR_POOL_API
if (fpo->fpo_is_fmr) {
if (fmr->fmr_pfmr) {
ib_fmr_pool_unmap(fmr->fmr_pfmr);
}
if (status) {
- rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool);
+ int rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool);
LASSERT(!rc);
}
- } else {
+ } else
+#endif /* HAVE_FMR_POOL_API */
+ {
struct kib_fast_reg_descriptor *frd = fmr->fmr_frd;
if (frd) {
struct kib_fmr *fmr)
{
struct kib_fmr_pool *fpo;
- __u64 *pages = tx->tx_pages;
__u64 version;
bool is_rx = (rd != tx->tx_rd);
+#ifdef HAVE_FMR_POOL_API
+ __u64 *pages = tx->tx_pages;
bool tx_pages_mapped = 0;
int npages = 0;
+#endif
int rc;
again:
fpo->fpo_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
fpo->fpo_map_count++;
+#ifdef HAVE_FMR_POOL_API
+ fmr->fmr_pfmr = NULL;
if (fpo->fpo_is_fmr) {
struct ib_pool_fmr *pfmr;
return 0;
}
rc = PTR_ERR(pfmr);
- } else {
+ } else
+#endif /* HAVE_FMR_POOL_API */
+ {
if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
struct kib_fast_reg_descriptor *frd;
#ifdef HAVE_IB_MAP_MR_SG
#else
n = ib_map_mr_sg(mr, tx->tx_frags,
rd->rd_nfrags, PAGE_SIZE);
-#endif
+#endif /* HAVE_IB_MAP_MR_SG_5ARGS */
if (unlikely(n != rd->rd_nfrags)) {
CERROR("Failed to map mr %d/%d "
"elements\n", n, rd->rd_nfrags);
wr->key = is_rx ? mr->rkey : mr->lkey;
wr->access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE);
-#else
+#else /* HAVE_IB_MAP_MR_SG */
if (!tx_pages_mapped) {
npages = kiblnd_map_tx_pages(tx, rd);
tx_pages_mapped = 1;
wr->wr.wr.fast_reg.access_flags =
(IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE);
-#endif
+#endif /* HAVE_IB_MAP_MR_SG */
fmr->fmr_key = is_rx ? mr->rkey : mr->lkey;
fmr->fmr_frd = frd;
- fmr->fmr_pfmr = NULL;
fmr->fmr_pool = fpo;
return 0;
}
hdev->ibh_max_qp_wr = dev_attr->max_qp_wr;
/* Setup device Memory Registration capabilities */
+#ifdef HAVE_FMR_POOL_API
#ifdef HAVE_IB_DEVICE_OPS
if (hdev->ibh_ibdev->ops.alloc_fmr &&
hdev->ibh_ibdev->ops.dealloc_fmr &&
#endif
LCONSOLE_INFO("Using FMR for registration\n");
hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FMR_ENABLED;
- } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+ } else
+#endif /* HAVE_FMR_POOL_API */
+ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
LCONSOLE_INFO("Using FastReg for registration\n");
hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FASTREG_ENABLED;
#ifndef HAVE_IB_ALLOC_FAST_REG_MR
#include <rdma/rdma_cm.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_verbs.h>
+#ifdef HAVE_FMR_POOL_API
#include <rdma/ib_fmr_pool.h>
+#endif
#define DEBUG_SUBSYSTEM S_LND
enum kib_dev_caps {
IBLND_DEV_CAPS_FASTREG_ENABLED = BIT(0),
IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT = BIT(1),
+#ifdef HAVE_FMR_POOL_API
IBLND_DEV_CAPS_FMR_ENABLED = BIT(2),
+#endif
};
struct kib_dev {
struct list_head fpo_list; /* chain on pool list */
struct kib_hca_dev *fpo_hdev; /* device for this pool */
struct kib_fmr_poolset *fpo_owner; /* owner of this pool */
+#ifdef HAVE_FMR_POOL_API
union {
struct {
struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
} fmr;
+#endif
struct { /* For fast registration */
struct list_head fpo_pool_list;
int fpo_pool_size;
} fast_reg;
+#ifdef HAVE_FMR_POOL_API
};
+ bool fpo_is_fmr; /* True if FMR pools allocated */
+#endif
time64_t fpo_deadline; /* deadline of this pool */
int fpo_failed; /* fmr pool is failed */
int fpo_map_count; /* # of mapped FMR */
- bool fpo_is_fmr; /* True if FMR pools allocated */
};
struct kib_fmr {
struct kib_fmr_pool *fmr_pool; /* pool of FMR */
+#ifdef HAVE_FMR_POOL_API
struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
+#endif /* HAVE_FMR_POOL_API */
struct kib_fast_reg_descriptor *fmr_frd;
u32 fmr_key;
};
+#ifdef HAVE_FMR_POOL_API
+
#ifdef HAVE_ORACLE_OFED_EXTENSIONS
#define kib_fmr_pool_map(pool, pgs, n, iov) \
ib_fmr_pool_map_phys((pool), (pgs), (n), (iov), NULL)
ib_fmr_pool_map_phys((pool), (pgs), (n), (iov))
#endif
+#endif /* HAVE_FMR_POOL_API */
+
struct kib_net {
/* chain on struct kib_dev::ibd_nets */
struct list_head ibn_list;
return -EPROTONOSUPPORT;
}
+#ifdef HAVE_FMR_POOL_API
/*
* FMR does not support gaps but the tx has gaps then
* we should make sure that the number of fragments we'll be sending
return -EFBIG;
}
}
+#endif
fps = net->ibn_fmr_ps[cpt];
rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->tx_fmr);
* for FastReg or FMR with no gaps we can accumulate all
* the fragments in one FastReg or FMR fragment.
*/
- if (((dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED) && !tx->tx_gaps) ||
+ if (
+#ifdef HAVE_FMR_POOL_API
+ ((dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
+ && !tx->tx_gaps) ||
+#endif
(dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED)) {
/* FMR requires zero based address */
+#ifdef HAVE_FMR_POOL_API
if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
+#endif
rd->rd_frags[0].rf_nob = nob;
rd->rd_nfrags = 1;
} else {
static void
kiblnd_unmap_tx(struct kib_tx *tx)
{
- if (tx->tx_fmr.fmr_pfmr || tx->tx_fmr.fmr_frd)
+ if (
+#ifdef HAVE_FMR_POOL_API
+ tx->tx_fmr.fmr_pfmr ||
+#endif
+ tx->tx_fmr.fmr_frd)
kiblnd_fmr_pool_unmap(&tx->tx_fmr, tx->tx_status);
if (tx->tx_nfrags != 0) {
* dead in the water and fail the operation.
*/
if (tunables->lnd_map_on_demand &&
- (net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED ||
- net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED))
+ (net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED
+#ifdef HAVE_FMR_POOL_API
+ || net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED
+#endif
+ ))
return NULL;
/*