if test -f ${O2IBPATH}/include/linux/compat-2.6.h; then
AC_MSG_RESULT([yes])
compatrdma_found=true
- AC_DEFINE(HAVE_COMPAT_RDMA, 1, [compat rdma found])
+ AC_DEFINE(HAVE_OFED_COMPAT_RDMA, 1, [compat rdma found])
EXTRA_OFED_CONFIG="$EXTRA_OFED_CONFIG -include ${O2IBPATH}/include/linux/compat-2.6.h"
if test -f "$O2IBPATH/include/linux/compat_autoconf.h"; then
COMPAT_AUTOCONF="$O2IBPATH/include/linux/compat_autoconf.h"
EXTRA_CHECK_INCLUDE="$EXTRA_OFED_CONFIG $EXTRA_OFED_INCLUDE"
LB_CHECK_COMPILE([whether to enable OpenIB gen2 support],
openib_gen2_support, [
- #ifdef HAVE_COMPAT_RDMA
+ #ifdef HAVE_OFED_COMPAT_RDMA
#undef PACKAGE_NAME
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
LB_CHECK_COMPILE([if Linux kernel has kthread_worker],
linux_kthread_worker, [
- #ifdef HAVE_COMPAT_RDMA
+ #ifdef HAVE_OFED_COMPAT_RDMA
#undef PACKAGE_NAME
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
# In RHEL 6.2, rdma_create_id() takes the queue-pair type as a fourth argument
AC_DEFUN([LN_SRC_O2IB_RDMA_CREATE_ID_4A], [
LB2_LINUX_TEST_SRC([rdma_create_id_4args], [
- #ifdef HAVE_COMPAT_RDMA
+ #ifdef HAVE_OFED_COMPAT_RDMA
#undef PACKAGE_NAME
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
AC_DEFUN([LN_O2IB_RDMA_CREATE_ID_4A], [
LB2_MSG_LINUX_TEST_RESULT([if 'rdma_create_id' wants four args],
[rdma_create_id_4args], [
- AC_DEFINE(HAVE_RDMA_CREATE_ID_4ARG, 1,
+ AC_DEFINE(HAVE_OFED_RDMA_CREATE_ID_4ARG, 1,
[rdma_create_id wants 4 args])
])
])
# 4.4 added network namespace parameter for rdma_create_id()
AC_DEFUN([LN_SRC_O2IB_RDMA_CREATE_ID_5A], [
LB2_LINUX_TEST_SRC([rdma_create_id_5args], [
- #ifdef HAVE_COMPAT_RDMA
+ #ifdef HAVE_OFED_COMPAT_RDMA
#undef PACKAGE_NAME
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
AC_DEFUN([LN_O2IB_RDMA_CREATE_ID_5A], [
LB2_MSG_LINUX_TEST_RESULT([if 'rdma_create_id' wants five args],
[rdma_create_id_5args], [
- AC_DEFINE(HAVE_RDMA_CREATE_ID_5ARG, 1,
+ AC_DEFINE(HAVE_OFED_RDMA_CREATE_ID_5ARG, 1,
[rdma_create_id wants 5 args])
])
])
# we need to always test functionality testings.
AC_DEFUN([LN_SRC_O2IB_IB_CQ_INIT_ATTR], [
LB2_LINUX_TEST_SRC([ib_cq_init_attr], [
- #ifdef HAVE_COMPAT_RDMA
+ #ifdef HAVE_OFED_COMPAT_RDMA
#undef PACKAGE_NAME
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
AC_DEFUN([LN_O2IB_IB_CQ_INIT_ATTR], [
LB2_MSG_LINUX_TEST_RESULT([if 'struct ib_cq_init_attr' is used],
[ib_cq_init_attr], [
- AC_DEFINE(HAVE_IB_CQ_INIT_ATTR, 1,
+ AC_DEFINE(HAVE_OFED_IB_CQ_INIT_ATTR, 1,
[struct ib_cq_init_attr is used by ib_create_cq])
])
])
# 4.3 removed ib_alloc_fast_reg_mr()
AC_DEFUN([LN_SRC_O2IB_IB_ALLOC_FAST_REG_MR], [
LB2_LINUX_TEST_SRC([ib_alloc_fast_reg_mr], [
- #ifdef HAVE_COMPAT_RDMA
+ #ifdef HAVE_OFED_COMPAT_RDMA
#undef PACKAGE_NAME
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
AC_DEFUN([LN_O2IB_IB_ALLOC_FAST_REG_MR], [
LB2_MSG_LINUX_TEST_RESULT([if 'ib_alloc_fast_reg_mr' exists],
[ib_alloc_fast_reg_mr], [
- AC_DEFINE(HAVE_IB_ALLOC_FAST_REG_MR, 1,
+ AC_DEFINE(HAVE_OFED_IB_ALLOC_FAST_REG_MR, 1,
[ib_alloc_fast_reg_mr is defined])
])
])
# We then have to use FMR/Fastreg for all RDMA.
AC_DEFUN([LN_SRC_O2IB_IB_GET_DMA_MR], [
LB2_LINUX_TEST_SRC([ib_get_dma_mr], [
- #ifdef HAVE_COMPAT_RDMA
+ #ifdef HAVE_OFED_COMPAT_RDMA
#undef PACKAGE_NAME
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
AC_DEFUN([LN_O2IB_IB_GET_DMA_MR], [
LB2_MSG_LINUX_TEST_RESULT([if 'ib_get_dma_mr' exists],
[ib_get_dma_mr], [
- AC_DEFINE(HAVE_IB_GET_DMA_MR, 1,
+ AC_DEFINE(HAVE_OFED_IB_GET_DMA_MR, 1,
[ib_get_dma_mr is defined])
])
])
# use their own structure which embedds struct ib_send_wr.
AC_DEFUN([LN_SRC_O2IB_IB_RDMA_WR], [
LB2_LINUX_TEST_SRC([ib_rdma_wr], [
- #ifdef HAVE_COMPAT_RDMA
+ #ifdef HAVE_OFED_COMPAT_RDMA
#undef PACKAGE_NAME
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
AC_DEFUN([LN_O2IB_IB_RDMA_WR], [
LB2_MSG_LINUX_TEST_RESULT([if 'struct ib_rdma_wr' is defined],
[ib_rdma_wr], [
- AC_DEFINE(HAVE_IB_RDMA_WR, 1,
+ AC_DEFINE(HAVE_OFED_IB_RDMA_WR, 1,
[struct ib_rdma_wr is defined])
])
])
# new fast registration API introduced in 4.4
AC_DEFUN([LN_SRC_O2IB_IB_MAP_MR_SG_4A], [
LB2_LINUX_TEST_SRC([ib_map_mr_sg_4args], [
- #ifdef HAVE_COMPAT_RDMA
+ #ifdef HAVE_OFED_COMPAT_RDMA
#undef PACKAGE_NAME
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
AC_DEFUN([LN_O2IB_IB_MAP_MR_SG_4A], [
LB2_MSG_LINUX_TEST_RESULT([if 4arg 'ib_map_mr_sg' exists],
[ib_map_mr_sg_4args], [
- AC_DEFINE(HAVE_IB_MAP_MR_SG, 1,
+ AC_DEFINE(HAVE_OFED_IB_MAP_MR_SG, 1,
[ib_map_mr_sg exists])
])
])
# in kernel 4.7 (and RHEL 7.3)
AC_DEFUN([LN_SRC_O2IB_IB_MAP_MR_SG_5A], [
LB2_LINUX_TEST_SRC([ib_map_mr_sg_5args], [
- #ifdef HAVE_COMPAT_RDMA
+ #ifdef HAVE_OFED_COMPAT_RDMA
#undef PACKAGE_NAME
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
AC_DEFUN([LN_O2IB_IB_MAP_MR_SG_5A], [
LB2_MSG_LINUX_TEST_RESULT([if 5arg 'ib_map_mr_sg' exists],
[ib_map_mr_sg_5args], [
- AC_DEFINE(HAVE_IB_MAP_MR_SG, 1,
+ AC_DEFINE(HAVE_OFED_IB_MAP_MR_SG, 1,
[ib_map_mr_sg exists])
- AC_DEFINE(HAVE_IB_MAP_MR_SG_5ARGS, 1,
+ AC_DEFINE(HAVE_OFED_IB_MAP_MR_SG_5ARGS, 1,
[ib_map_mr_sg has 5 arguments])
])
])
# ib_query_device() removed in 4.5
AC_DEFUN([LN_SRC_O2IB_IB_DEVICE_ATTRS], [
LB2_LINUX_TEST_SRC([ib_device_attrs], [
- #ifdef HAVE_COMPAT_RDMA
+ #ifdef HAVE_OFED_COMPAT_RDMA
#undef PACKAGE_NAME
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
AC_DEFUN([LN_O2IB_IB_DEVICE_ATTRS], [
LB2_MSG_LINUX_TEST_RESULT([if 'struct ib_device' has member 'attrs'],
[ib_device_attrs], [
- AC_DEFINE(HAVE_IB_DEVICE_ATTRS, 1,
+ AC_DEFINE(HAVE_OFED_IB_DEVICE_ATTRS, 1,
[struct ib_device.attrs is defined])
])
])
# commit ed082d36a7b2c27d1cda55fdfb28af18040c4a89
AC_DEFUN([LN_SRC_O2IB_IB_ALLOC_PD], [
LB2_LINUX_TEST_SRC([ib_alloc_pd], [
- #ifdef HAVE_COMPAT_RDMA
+ #ifdef HAVE_OFED_COMPAT_RDMA
#undef PACKAGE_NAME
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
AC_DEFUN([LN_O2IB_IB_ALLOC_PD], [
LB2_MSG_LINUX_TEST_RESULT([if 2arg 'ib_alloc_pd' exists],
[ib_alloc_pd], [
- AC_DEFINE(HAVE_IB_ALLOC_PD_2ARGS, 1,
+ AC_DEFINE(HAVE_OFED_IB_ALLOC_PD_2ARGS, 1,
[ib_alloc_pd has 2 arguments])
])
])
AC_DEFUN([LN_SRC_O2IB_IB_INC_RKEY], [
LB2_LINUX_TEST_SRC([ib_inc_rkey], [
- #ifdef HAVE_COMPAT_RDMA
+ #ifdef HAVE_OFED_COMPAT_RDMA
#undef PACKAGE_NAME
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
AC_DEFUN([LN_O2IB_IB_INC_RKEY], [
LB2_MSG_LINUX_TEST_RESULT([if function 'ib_inc_rkey' is defined],
[ib_inc_rkey], [
- AC_DEFINE(HAVE_IB_INC_RKEY, 1,
+ AC_DEFINE(HAVE_OFED_IB_INC_RKEY, 1,
[function ib_inc_rkey exist])
])
])
# 'const'.
AC_DEFUN([LN_SRC_O2IB_IB_POST_SEND_CONST], [
LB2_LINUX_TEST_SRC([ib_post_send_recv_const], [
- #ifdef HAVE_COMPAT_RDMA
+ #ifdef HAVE_OFED_COMPAT_RDMA
#undef PACKAGE_NAME
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
AC_DEFUN([LN_O2IB_IB_POST_SEND_CONST], [
LB2_MSG_LINUX_TEST_RESULT([if 'ib_post_send() and ib_post_recv()' have const parameters],
[ib_post_send_recv_const], [
- AC_DEFINE(HAVE_IB_POST_SEND_RECV_CONST, 1,
+ AC_DEFINE(HAVE_OFED_IB_POST_SEND_RECV_CONST, 1,
[ib_post_send and ib_post_recv have const parameters])
])
])
# See if we have a broken ib_dma_map_sg()
AC_DEFUN([LN_SRC_SANE_IB_DMA_MAP_SG], [
LB2_LINUX_TEST_SRC([sane_ib_dma_map_sg], [
- #ifdef HAVE_COMPAT_RDMA
+ #ifdef HAVE_OFED_COMPAT_RDMA
#undef PACKAGE_NAME
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
AC_DEFUN([LN_SANE_IB_DMA_MAP_SG], [
LB2_MSG_LINUX_TEST_RESULT([if ib_dma_map_sg() is sane],
[sane_ib_dma_map_sg], [
- AC_DEFINE(HAVE_SANE_IB_DMA_MAP_SG, 1,
+ AC_DEFINE(HAVE_OFED_IB_DMA_MAP_SG_SANE, 1,
[ib_dma_map_sg is sane])
],[],[module])
])
AC_DEFUN([LN_O2IB_IB_SG_DMA_ADDRESS_EXISTS], [
LB2_MSG_LINUX_TEST_RESULT([if ib_sg_dma_address wrapper exists],
[ib_sg_dma_address_test], [
- AC_DEFINE(HAVE_IB_SG_DMA_ADDRESS, 1,
+ AC_DEFINE(HAVE_OFED_IB_SG_DMA_ADDRESS, 1,
[if ib_sg_dma_address wrapper exists])
])
]) # LN_O2IB_IB_SG_DMA_ADDRESS_EXISTS
# commit 8094ba0ace7f6cd1e31ea8b151fba3594cadfa9a
AC_DEFUN([LN_SRC_O2IB_RDMA_REJECT], [
LB2_LINUX_TEST_SRC([rdma_reject], [
- #ifdef HAVE_COMPAT_RDMA
+ #ifdef HAVE_OFED_COMPAT_RDMA
#undef PACKAGE_NAME
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
AC_DEFUN([LN_O2IB_RDMA_REJECT], [
LB2_MSG_LINUX_TEST_RESULT([if 4arg 'rdma_reject' exists],
[rdma_reject], [
- AC_DEFINE(HAVE_RDMA_REJECT_4ARGS, 1,
+ AC_DEFINE(HAVE_OFED_RDMA_REJECT_4ARGS, 1,
[rdma_reject has 4 arguments])
])
]) # LN_O2IB_RDMA_REJECT
AC_DEFUN([LN_O2IB_RDMA_CONNECT_LOCKED], [
LB2_MSG_LINUX_TEST_RESULT([if 'rdma_connect_locked' exists],
[rdma_connect_locked], [
- AC_DEFINE(HAVE_RDMA_CONNECT_LOCKED, 1,
+ AC_DEFINE(HAVE_OFED_RDMA_CONNECT_LOCKED, 1,
[rdma_connect_locked is defined])
])
]) # LN_O2IB_RDMA_CONNECT_LOCKED
- #
- # LN_O2IB_ETHTOOL_LINK_SETTINGS
- #
- # ethtool_link_settings was added in Linux 4.6
- #
- AC_DEFUN([LN_SRC_O2IB_ETHTOOL_LINK_SETTINGS], [
- LB2_LINUX_TEST_SRC([ethtool_link_settings], [
- #include <linux/ethtool.h>
- ],[
- struct ethtool_link_ksettings cmd;
- ],[],[$EXTRA_OFED_CONFIG $EXTRA_OFED_INCLUDE])
- ])
- AC_DEFUN([LN_O2IB_ETHTOOL_LINK_SETTINGS], [
- LB2_MSG_LINUX_TEST_RESULT([if 'ethtool_link_settings' exists],
- [ethtool_link_settings], [
- AC_DEFINE(HAVE_ETHTOOL_LINK_SETTINGS, 1,
- [ethtool_link_settings is defined])
- ])
- ]) # LN_O2IB_ETHTOOL_LINK_SETTINGS
-
EXTRA_CHECK_INCLUDE=""
AC_DEFUN([LN_CONFIG_O2IB_SRC], [
LN_SRC_O2IB_RDMA_REJECT
LN_SRC_O2IB_IB_FMR
LN_SRC_O2IB_RDMA_CONNECT_LOCKED
- LN_SRC_O2IB_ETHTOOL_LINK_SETTINGS
])
AC_DEFUN([LN_CONFIG_O2IB_RESULTS], [
LN_O2IB_RDMA_CREATE_ID_4A
LN_O2IB_RDMA_REJECT
LN_O2IB_IB_FMR
LN_O2IB_RDMA_CONNECT_LOCKED
- LN_O2IB_ETHTOOL_LINK_SETTINGS
])
]) # ENABLEO2IB != "no"
]) # LN_CONFIG_O2IB
]) # LN_CONFIG_SK_DATA_READY
#
+# LN_ETHTOOL_LINK_SETTINGS
+#
+# ethtool_link_settings was added in Linux 4.6
+#
+AC_DEFUN([LN_SRC_ETHTOOL_LINK_SETTINGS], [
+ LB2_LINUX_TEST_SRC([ethtool_link_settings], [
+ #include <linux/ethtool.h>
+ ],[
+ struct ethtool_link_ksettings cmd;
+ ],[],[$EXTRA_OFED_CONFIG $EXTRA_OFED_INCLUDE])
+])
+AC_DEFUN([LN_ETHTOOL_LINK_SETTINGS], [
+ LB2_MSG_LINUX_TEST_RESULT([if 'ethtool_link_settings' exists],
+ [ethtool_link_settings], [
+ AC_DEFINE(HAVE_ETHTOOL_LINK_SETTINGS, 1,
+ [ethtool_link_settings is defined])
+ ])
+]) # LN_ETHTOOL_LINK_SETTINGS
+
+#
# LN_HAVE_HYPERVISOR_IS_TYPE
#
# 4.14 commit 79cc74155218316b9a5d28577c7077b2adba8e58
LN_SRC_CONFIG_SK_DATA_READY
# 4.x
LN_SRC_CONFIG_SOCK_CREATE_KERN
+ # 4.6
+ LN_SRC_ETHTOOL_LINK_SETTINGS
# 4.14
LN_SRC_HAVE_HYPERVISOR_IS_TYPE
LN_SRC_HAVE_ORACLE_OFED_EXTENSIONS
LN_CONFIG_SK_DATA_READY
# 4.x
LN_CONFIG_SOCK_CREATE_KERN
+ # 4.6
+ LN_ETHTOOL_LINK_SETTINGS
# 4.14
LN_HAVE_HYPERVISOR_IS_TYPE
LN_HAVE_ORACLE_OFED_EXTENSIONS
struct kib_dev *dev;
struct ib_qp_init_attr init_qp_attr = {};
struct kib_sched_info *sched;
-#ifdef HAVE_IB_CQ_INIT_ATTR
+#ifdef HAVE_OFED_IB_CQ_INIT_ATTR
struct ib_cq_init_attr cq_attr = {};
#endif
struct kib_conn *conn;
write_unlock_irqrestore(glock, flags);
-#ifdef HAVE_IB_CQ_INIT_ATTR
+#ifdef HAVE_OFED_IB_CQ_INIT_ATTR
cq_attr.cqe = IBLND_CQ_ENTRIES(conn);
cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
cq = ib_create_cq(cmid->device,
{
LASSERT(fpo->fpo_map_count == 0);
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
if (fpo->fpo_is_fmr && fpo->fmr.fpo_fmr_pool) {
ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool);
} else
-#endif /* HAVE_FMR_POOL_API */
+#endif /* HAVE_OFED_FMR_POOL_API */
{
struct kib_fast_reg_descriptor *frd, *tmp;
int i = 0;
list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
frd_list) {
list_del(&frd->frd_list);
-#ifndef HAVE_IB_MAP_MR_SG
+#ifndef HAVE_OFED_IB_MAP_MR_SG
ib_free_fast_reg_page_list(frd->frd_frpl);
#endif
ib_dereg_mr(frd->frd_mr);
return max(IBLND_FMR_POOL_FLUSH, size);
}
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
static int kiblnd_alloc_fmr_pool(struct kib_fmr_poolset *fps,
struct kib_fmr_pool *fpo)
{
return rc;
}
-#endif /* HAVE_FMR_POOL_API */
+#endif /* HAVE_OFED_FMR_POOL_API */
static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps,
struct kib_fmr_pool *fpo,
struct kib_fast_reg_descriptor *frd, *tmp;
int i, rc;
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
fpo->fpo_is_fmr = false;
#endif
}
frd->frd_mr = NULL;
-#ifndef HAVE_IB_MAP_MR_SG
+#ifndef HAVE_OFED_IB_MAP_MR_SG
frd->frd_frpl = ib_alloc_fast_reg_page_list(fpo->fpo_hdev->ibh_ibdev,
IBLND_MAX_RDMA_FRAGS);
if (IS_ERR(frd->frd_frpl)) {
}
#endif
-#ifdef HAVE_IB_ALLOC_FAST_REG_MR
+#ifdef HAVE_OFED_IB_ALLOC_FAST_REG_MR
frd->frd_mr = ib_alloc_fast_reg_mr(fpo->fpo_hdev->ibh_pd,
IBLND_MAX_RDMA_FRAGS);
#else
out_middle:
if (frd->frd_mr)
ib_dereg_mr(frd->frd_mr);
-#ifndef HAVE_IB_MAP_MR_SG
+#ifndef HAVE_OFED_IB_MAP_MR_SG
if (frd->frd_frpl)
ib_free_fast_reg_page_list(frd->frd_frpl);
#endif
list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
frd_list) {
list_del(&frd->frd_list);
-#ifndef HAVE_IB_MAP_MR_SG
+#ifndef HAVE_OFED_IB_MAP_MR_SG
ib_free_fast_reg_page_list(frd->frd_frpl);
#endif
ib_dereg_mr(frd->frd_mr);
fpo->fpo_hdev = kiblnd_current_hdev(dev);
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
rc = kiblnd_alloc_fmr_pool(fps, fpo);
else
-#endif /* HAVE_FMR_POOL_API */
+#endif /* HAVE_OFED_FMR_POOL_API */
rc = kiblnd_alloc_freg_pool(fps, fpo, dev->ibd_dev_caps);
if (rc)
goto out_fpo;
return now >= fpo->fpo_deadline;
}
-#if defined(HAVE_FMR_POOL_API) || !defined(HAVE_IB_MAP_MR_SG)
+#if defined(HAVE_OFED_FMR_POOL_API) || !defined(HAVE_OFED_IB_MAP_MR_SG)
static int
kiblnd_map_tx_pages(struct kib_tx *tx, struct kib_rdma_desc *rd)
{
fps = fpo->fpo_owner;
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
if (fpo->fpo_is_fmr) {
if (fmr->fmr_pfmr) {
ib_fmr_pool_unmap(fmr->fmr_pfmr);
LASSERT(!rc);
}
} else
-#endif /* HAVE_FMR_POOL_API */
+#endif /* HAVE_OFED_FMR_POOL_API */
{
struct kib_fast_reg_descriptor *frd = fmr->fmr_frd;
if (frd) {
struct kib_fmr_pool *fpo;
__u64 version;
bool is_rx = (rd != tx->tx_rd);
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
__u64 *pages = tx->tx_pages;
bool tx_pages_mapped = false;
int npages = 0;
fpo->fpo_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
fpo->fpo_map_count++;
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
fmr->fmr_pfmr = NULL;
if (fpo->fpo_is_fmr) {
struct ib_pool_fmr *pfmr;
}
rc = PTR_ERR(pfmr);
} else
-#endif /* HAVE_FMR_POOL_API */
+#endif /* HAVE_OFED_FMR_POOL_API */
{
if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
struct kib_fast_reg_descriptor *frd;
-#ifdef HAVE_IB_MAP_MR_SG
+#ifdef HAVE_OFED_IB_MAP_MR_SG
struct ib_reg_wr *wr;
int n;
#else
list_del(&frd->frd_list);
spin_unlock(&fps->fps_lock);
-#ifndef HAVE_IB_MAP_MR_SG
+#ifndef HAVE_OFED_IB_MAP_MR_SG
frpl = frd->frd_frpl;
#endif
mr = frd->frd_mr;
ib_update_fast_reg_key(mr, key);
}
-#ifdef HAVE_IB_MAP_MR_SG
-#ifdef HAVE_IB_MAP_MR_SG_5ARGS
+#ifdef HAVE_OFED_IB_MAP_MR_SG
+#ifdef HAVE_OFED_IB_MAP_MR_SG_5ARGS
n = ib_map_mr_sg(mr, tx->tx_frags,
rd->rd_nfrags, NULL, PAGE_SIZE);
#else
n = ib_map_mr_sg(mr, tx->tx_frags,
rd->rd_nfrags, PAGE_SIZE);
-#endif /* HAVE_IB_MAP_MR_SG_5ARGS */
+#endif /* HAVE_OFED_IB_MAP_MR_SG_5ARGS */
if (unlikely(n != rd->rd_nfrags)) {
CERROR("Failed to map mr %d/%d elements\n",
n, rd->rd_nfrags);
wr->key = is_rx ? mr->rkey : mr->lkey;
wr->access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE);
-#else /* HAVE_IB_MAP_MR_SG */
+#else /* HAVE_OFED_IB_MAP_MR_SG */
if (!tx_pages_mapped) {
npages = kiblnd_map_tx_pages(tx, rd);
tx_pages_mapped = true;
wr->wr.wr.fast_reg.access_flags =
(IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE);
-#endif /* HAVE_IB_MAP_MR_SG */
+#endif /* HAVE_OFED_IB_MAP_MR_SG */
fmr->fmr_key = is_rx ? mr->rkey : mr->lkey;
fmr->fmr_frd = frd;
int ncpts)
{
struct lnet_ioctl_config_o2iblnd_tunables *tunables;
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
unsigned long flags;
#endif
int cpt;
tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
/*
* if lnd_map_on_demand is zero then we have effectively disabled
if (i > 0)
LASSERT(i == ncpts);
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
create_tx_pool:
#endif
net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
hdev->ibh_page_size = 1 << PAGE_SHIFT;
hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1);
-#ifndef HAVE_IB_DEVICE_ATTRS
+#ifndef HAVE_OFED_IB_DEVICE_ATTRS
LIBCFS_ALLOC(dev_attr, sizeof(*dev_attr));
if (dev_attr == NULL) {
CERROR("Out of memory\n");
hdev->ibh_max_qp_wr = dev_attr->max_qp_wr;
/* Setup device Memory Registration capabilities */
-#ifdef HAVE_FMR_POOL_API
-#ifdef HAVE_IB_DEVICE_OPS
+#ifdef HAVE_OFED_FMR_POOL_API
+#ifdef HAVE_OFED_IB_DEVICE_OPS
if (hdev->ibh_ibdev->ops.alloc_fmr &&
hdev->ibh_ibdev->ops.dealloc_fmr &&
hdev->ibh_ibdev->ops.map_phys_fmr &&
LCONSOLE_INFO("Using FMR for registration\n");
hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FMR_ENABLED;
} else
-#endif /* HAVE_FMR_POOL_API */
+#endif /* HAVE_OFED_FMR_POOL_API */
if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
LCONSOLE_INFO("Using FastReg for registration\n");
hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FASTREG_ENABLED;
-#ifndef HAVE_IB_ALLOC_FAST_REG_MR
+#ifndef HAVE_OFED_IB_ALLOC_FAST_REG_MR
#ifdef IB_DEVICE_SG_GAPS_REG
if (dev_attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT;
if (rc != 0)
rc = -EINVAL;
-#ifndef HAVE_IB_DEVICE_ATTRS
+#ifndef HAVE_OFED_IB_DEVICE_ATTRS
out_clean_attr:
LIBCFS_FREE(dev_attr, sizeof(*dev_attr));
#endif
return rc;
}
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
static void
kiblnd_hdev_cleanup_mrs(struct kib_hca_dev *hdev)
{
if (hdev->ibh_event_handler.device != NULL)
ib_unregister_event_handler(&hdev->ibh_event_handler);
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
kiblnd_hdev_cleanup_mrs(hdev);
#endif
LIBCFS_FREE(hdev, sizeof(*hdev));
}
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
static int
kiblnd_hdev_setup_mrs(struct kib_hca_dev *hdev)
{
hdev->ibh_ibdev = cmid->device;
hdev->ibh_port = cmid->port_num;
-#ifdef HAVE_IB_ALLOC_PD_2ARGS
+#ifdef HAVE_OFED_IB_ALLOC_PD_2ARGS
pd = ib_alloc_pd(cmid->device, 0);
#else
pd = ib_alloc_pd(cmid->device);
goto out;
}
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
rc = kiblnd_hdev_setup_mrs(hdev);
if (rc != 0) {
CERROR("Can't setup device: %d\n", rc);
#include <linux/module.h>
#include <linux/kernel.h>
-#if defined(MLNX_OFED_BUILD) && !defined(HAVE_SANE_IB_DMA_MAP_SG)
+#if defined(MLNX_OFED_BUILD) && !defined(HAVE_OFED_IB_DMA_MAP_SG_SANE)
#undef CONFIG_INFINIBAND_VIRT_DMA
#endif
lock_is_held((struct lockdep_map *)&(lock)->dep_map)
#endif
-#ifdef HAVE_COMPAT_RDMA
+#ifdef HAVE_OFED_COMPAT_RDMA
#include <linux/compat-2.6.h>
#ifdef LINUX_3_17_COMPAT_H
#include <rdma/rdma_cm.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_verbs.h>
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
#include <rdma/ib_fmr_pool.h>
#endif
#define IBLND_CREDITS_DEFAULT 8 /* default # of peer_ni credits */
#define IBLND_CREDITS_MAX ((typeof(((struct kib_msg *) 0)->ibm_credits)) - 1) /* Max # of peer_ni credits */
-#ifdef HAVE_RDMA_CREATE_ID_5ARG
+#ifdef HAVE_OFED_RDMA_CREATE_ID_5ARG
# define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) \
rdma_create_id((ns) ? (ns) : &init_net, cb, dev, ps, qpt)
#else
-# ifdef HAVE_RDMA_CREATE_ID_4ARG
+# ifdef HAVE_OFED_RDMA_CREATE_ID_4ARG
# define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) \
rdma_create_id(cb, dev, ps, qpt)
# else
enum kib_dev_caps {
IBLND_DEV_CAPS_FASTREG_ENABLED = BIT(0),
IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT = BIT(1),
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
IBLND_DEV_CAPS_FMR_ENABLED = BIT(2),
#endif
};
__u64 ibh_page_mask; /* page mask of current HCA */
__u64 ibh_mr_size; /* size of MR */
int ibh_max_qp_wr; /* maximum work requests size */
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
struct ib_mr *ibh_mrs; /* global MR */
#endif
struct ib_pd *ibh_pd; /* PD */
time64_t fps_next_retry;
};
-#ifndef HAVE_IB_RDMA_WR
+#ifndef HAVE_OFED_IB_RDMA_WR
struct ib_rdma_wr {
struct ib_send_wr wr;
};
struct kib_fast_reg_descriptor { /* For fast registration */
struct list_head frd_list;
struct ib_rdma_wr frd_inv_wr;
-#ifdef HAVE_IB_MAP_MR_SG
+#ifdef HAVE_OFED_IB_MAP_MR_SG
struct ib_reg_wr frd_fastreg_wr;
#else
struct ib_rdma_wr frd_fastreg_wr;
struct list_head fpo_list; /* chain on pool list */
struct kib_hca_dev *fpo_hdev; /* device for this pool */
struct kib_fmr_poolset *fpo_owner; /* owner of this pool */
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
union {
struct {
struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
struct list_head fpo_pool_list;
int fpo_pool_size;
} fast_reg;
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
};
bool fpo_is_fmr; /* True if FMR pools allocated */
#endif
struct kib_fmr {
struct kib_fmr_pool *fmr_pool; /* pool of FMR */
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
-#endif /* HAVE_FMR_POOL_API */
+#endif /* HAVE_OFED_FMR_POOL_API */
struct kib_fast_reg_descriptor *fmr_frd;
u32 fmr_key;
};
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
#ifdef HAVE_ORACLE_OFED_EXTENSIONS
#define kib_fmr_pool_map(pool, pgs, n, iov) \
ib_fmr_pool_map_phys((pool), (pgs), (n), (iov))
#endif
-#endif /* HAVE_FMR_POOL_API */
+#endif /* HAVE_OFED_FMR_POOL_API */
struct kib_net {
/* chain on struct kib_dev::ibd_nets */
atomic_t ibp_nconns;
};
-#ifndef HAVE_IB_INC_RKEY
+#ifndef HAVE_OFED_IB_INC_RKEY
/**
* ib_inc_rkey - increments the key portion of the given rkey. Can be used
* for calculating a new rkey for type 2 memory windows.
ib_dma_unmap_sg(hdev->ibh_ibdev, sg, nents, direction);
}
-#ifndef HAVE_IB_SG_DMA_ADDRESS
+#ifndef HAVE_OFED_IB_SG_DMA_ADDRESS
#include <linux/scatterlist.h>
#define ib_sg_dma_address(dev, sg) sg_dma_address(sg)
#define ib_sg_dma_len(dev, sg) sg_dma_len(sg)
return ib_sg_dma_len(dev, sg);
}
-#ifndef HAVE_RDMA_CONNECT_LOCKED
+#ifndef HAVE_OFED_RDMA_CONNECT_LOCKED
#define rdma_connect_locked(cmid, cpp) rdma_connect(cmid, cpp)
#endif
struct kib_conn *conn = rx->rx_conn;
struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data;
struct ib_recv_wr *bad_wrq = NULL;
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
#endif
int rc;
LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
credit == IBLND_POSTRX_PEER_CREDIT ||
credit == IBLND_POSTRX_RSRVD_CREDIT);
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
LASSERT(mr != NULL);
rx->rx_sge.lkey = mr->lkey;
* own this rx (and rx::rx_conn) anymore, LU-5678.
*/
kiblnd_conn_addref(conn);
-#ifdef HAVE_IB_POST_SEND_RECV_CONST
+#ifdef HAVE_OFED_IB_POST_SEND_RECV_CONST
rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq,
(const struct ib_recv_wr **)&bad_wrq);
#else
return -EPROTONOSUPPORT;
}
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
/*
* FMR does not support gaps but the tx has gaps then
* we should make sure that the number of fragments we'll be sending
* the fragments in one FastReg or FMR fragment.
*/
if (
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
((dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
&& !tx->tx_gaps) ||
#endif
IS_FAST_REG_DEV(dev)) {
/* FMR requires zero based address */
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
#endif
kiblnd_unmap_tx(struct kib_tx *tx)
{
if (
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
tx->tx_fmr.fmr_pfmr ||
#endif
tx->tx_fmr.fmr_frd)
}
}
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
static struct ib_mr *
kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd)
{
* dead in the water and fail the operation.
*/
if (tunables->lnd_map_on_demand && (IS_FAST_REG_DEV(net->ibn_dev)
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
|| net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED
#endif
))
{
struct kib_net *net = ni->ni_data;
struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
struct ib_mr *mr = NULL;
#endif
__u32 nob;
nob += rd->rd_frags[i].rf_nob;
}
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
mr = kiblnd_find_rd_dma_mr(ni, rd);
if (mr != NULL) {
/* found pre-mapping MR */
if (lnet_send_error_simulation(tx->tx_lntmsg[0], &tx->tx_hstatus))
rc = -EINVAL;
else
-#ifdef HAVE_IB_POST_SEND_RECV_CONST
+#ifdef HAVE_OFED_IB_POST_SEND_RECV_CONST
rc = ib_post_send(conn->ibc_cmid->qp, wr,
(const struct ib_send_wr **)&bad);
#else
{
struct ib_sge *sge = &tx->tx_sge[tx->tx_nsge];
struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev;
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
struct ib_mr *mr = hdev->ibh_mrs;
#endif
*sge = (struct ib_sge) {
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
.lkey = mr->lkey,
#else
.lkey = hdev->ibh_pd->local_dma_lkey,
wrq->wr.opcode = IB_WR_RDMA_WRITE;
wrq->wr.send_flags = 0;
-#ifdef HAVE_IB_RDMA_WR
+#ifdef HAVE_OFED_IB_RDMA_WR
wrq->remote_addr = kiblnd_rd_frag_addr(dstrd,
dstidx);
wrq->rkey = kiblnd_rd_frag_key(dstrd,
{
int rc;
-#ifdef HAVE_RDMA_REJECT_4ARGS
+#ifdef HAVE_OFED_RDMA_REJECT_4ARGS
rc = rdma_reject(cmid, rej, sizeof(*rej), IB_CM_REJ_CONSUMER_DEFINED);
#else
rc = rdma_reject(cmid, rej, sizeof(*rej));
* 4. Look at the comments in kiblnd_fmr_map_tx() for an explanation of
* the behavior when transmit with GAPS verses contiguous.
*/
-#ifdef HAVE_IB_GET_DMA_MR
+
+#ifdef HAVE_OFED_IB_GET_DMA_MR
#define MOD_STR "map on demand"
#else
#define MOD_STR "map on demand (obsolete)"
if (tunables->lnd_map_on_demand == UINT_MAX)
tunables->lnd_map_on_demand = map_on_demand;
-#ifndef HAVE_IB_GET_DMA_MR
+#ifndef HAVE_OFED_IB_GET_DMA_MR
/*
* For kernels which do not support global memory regions, always
* enable map_on_demand