struct kib_tunables {
int *kib_dev_failover; /* HCA failover */
unsigned int *kib_service; /* IB service number */
- int *kib_min_reconnect_interval; /* first failed connection retry... */
- int *kib_max_reconnect_interval; /* ...exponentially increasing to this */
int *kib_cksum; /* checksum struct kib_msg? */
int *kib_timeout; /* comms timeout (seconds) */
int *kib_keepalive; /* keepalive timeout (seconds) */
- int *kib_ntx; /* # tx descs */
char **kib_default_ipif; /* default IPoIB interface */
int *kib_retry_count;
int *kib_rnr_retry_count;
#define IBLND_CREDITS_MAX ((typeof(((struct kib_msg *) 0)->ibm_credits)) - 1) /* Max # of peer_ni credits */
/* when eagerly to return credits */
-#define IBLND_CREDITS_HIGHWATER(t, v) ((v) == IBLND_MSG_VERSION_1 ? \
+#define IBLND_CREDITS_HIGHWATER(t, conn) ((conn->ibc_version) == IBLND_MSG_VERSION_1 ? \
IBLND_CREDIT_HIGHWATER_V1 : \
- t->lnd_peercredits_hiw)
+ min(t->lnd_peercredits_hiw, (__u32)conn->ibc_queue_depth - 1))
#ifdef HAVE_RDMA_CREATE_ID_5ARG
-# define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(current->nsproxy->net_ns, \
- cb, dev, \
- ps, qpt)
+# define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) rdma_create_id(ns, cb, \
+ dev, ps, \
+ qpt)
#else
# ifdef HAVE_RDMA_CREATE_ID_4ARG
-# define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, \
- ps, qpt)
+# define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) rdma_create_id(cb, dev, \
+ ps, qpt)
# else
-# define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps)
+# define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) rdma_create_id(cb, dev, \
+ ps)
# endif
#endif
#define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c)
/* 2 = LNet msg + Transfer chain */
-#define IBLND_CQ_ENTRIES(c) \
- (IBLND_RECV_WRS(c) + 2 * kiblnd_concurrent_sends(c->ibc_version, \
- c->ibc_peer->ibp_ni))
+#define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + kiblnd_send_wrs(c))
struct kib_hca_dev;
int ibh_page_shift; /* page shift of current HCA */
int ibh_page_size; /* page size of current HCA */
__u64 ibh_page_mask; /* page mask of current HCA */
- int ibh_mr_shift; /* bits shift of max MR size */
__u64 ibh_mr_size; /* size of MR */
+ int ibh_max_qp_wr; /* maximum work requests size */
#ifdef HAVE_IB_GET_DMA_MR
struct ib_mr *ibh_mrs; /* global MR */
#endif
u32 fmr_key;
};
+#ifdef HAVE_ORACLE_OFED_EXTENSIONS
+#define kib_fmr_pool_map(pool, pgs, n, iov) \
+ ib_fmr_pool_map_phys((pool), (pgs), (n), (iov), NULL)
+#else
+#define kib_fmr_pool_map(pool, pgs, n, iov) \
+ ib_fmr_pool_map_phys((pool), (pgs), (n), (iov))
+#endif
+
struct kib_net {
/* chain on struct kib_dev::ibd_nets */
struct list_head ibn_list;
struct kib_conn *rx_conn;
/* # bytes received (-1 while posted) */
int rx_nob;
- /* completion status */
- enum ib_wc_status rx_status;
/* message buffer (host vaddr) */
struct kib_msg *rx_msg;
/* message buffer (I/O addr) */
tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
if (conn->ibc_outstanding_credits <
- IBLND_CREDITS_HIGHWATER(tunables, conn->ibc_version) &&
+ IBLND_CREDITS_HIGHWATER(tunables, conn) &&
!kiblnd_send_keepalive(conn))
return 0; /* No need to send NOOP */
ib_dma_unmap_sg(dev, sg, nents, direction);
}
+#ifndef HAVE_IB_SG_DMA_ADDRESS
+#include <linux/scatterlist.h>
+#define ib_sg_dma_address(dev, sg) sg_dma_address(sg)
+#define ib_sg_dma_len(dev, sg) sg_dma_len(sg)
+#endif
+
static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev,
struct scatterlist *sg)
{
struct rdma_cm_event *event);
int kiblnd_translate_mtu(int value);
-int kiblnd_dev_failover(struct kib_dev *dev);
+int kiblnd_dev_failover(struct kib_dev *dev, struct net *ns);
int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer_ni **peerp,
lnet_nid_t nid);
void kiblnd_destroy_peer(struct kib_peer_ni *peer);