#define IBLND_CREDITS_MAX ((typeof(((struct kib_msg *) 0)->ibm_credits)) - 1) /* Max # of peer_ni credits */
/* when eagerly to return credits */
-#define IBLND_CREDITS_HIGHWATER(t, v) ((v) == IBLND_MSG_VERSION_1 ? \
+#define IBLND_CREDITS_HIGHWATER(t, conn) ((conn->ibc_version) == IBLND_MSG_VERSION_1 ? \
IBLND_CREDIT_HIGHWATER_V1 : \
- t->lnd_peercredits_hiw)
+ min(t->lnd_peercredits_hiw, (__u32)conn->ibc_queue_depth - 1))
#ifdef HAVE_RDMA_CREATE_ID_5ARG
-# define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) rdma_create_id(ns, cb, \
- dev, ps, \
- qpt)
+# define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) \
+ rdma_create_id((ns) ? (ns) : &init_net, cb, dev, ps, qpt)
#else
# ifdef HAVE_RDMA_CREATE_ID_4ARG
-# define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) rdma_create_id(cb, dev, \
- ps, qpt)
+# define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) \
+ rdma_create_id(cb, dev, ps, qpt)
# else
-# define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) rdma_create_id(cb, dev, \
- ps)
+# define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) \
+ rdma_create_id(cb, dev, ps)
# endif
#endif
int ibh_page_size; /* page size of current HCA */
__u64 ibh_page_mask; /* page mask of current HCA */
__u64 ibh_mr_size; /* size of MR */
+ int ibh_max_qp_wr; /* maximum work requests size */
#ifdef HAVE_IB_GET_DMA_MR
struct ib_mr *ibh_mrs; /* global MR */
#endif
struct ib_pd *ibh_pd; /* PD */
+ u8 ibh_port; /* port number */
+ struct ib_event_handler
+ ibh_event_handler; /* IB event handler */
+ int ibh_state; /* device status */
+#define IBLND_DEV_PORT_DOWN 0
+#define IBLND_DEV_PORT_ACTIVE 1
+#define IBLND_DEV_FATAL 2
struct kib_dev *ibh_dev; /* owner */
atomic_t ibh_ref; /* refcount */
};
struct kib_fmr_poolset **ibn_fmr_ps; /* fmr pool-set */
struct kib_dev *ibn_dev; /* underlying IB device */
+ struct lnet_ni *ibn_ni; /* LNet interface */
};
#define KIB_THREAD_SHIFT 16
int kiblnd_msg_queue_size(int version, struct lnet_ni *ni);
+static inline int kiblnd_timeout(void)
+{
+ return *kiblnd_tunables.kib_timeout ? *kiblnd_tunables.kib_timeout :
+ lnet_get_lnd_timeout();
+}
+
static inline int
kiblnd_concurrent_sends(int version, struct lnet_ni *ni)
{
tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
if (conn->ibc_outstanding_credits <
- IBLND_CREDITS_HIGHWATER(tunables, conn->ibc_version) &&
+ IBLND_CREDITS_HIGHWATER(tunables, conn) &&
!kiblnd_send_keepalive(conn))
return 0; /* No need to send NOOP */
return rd->rd_frags[index].rf_addr;
}
-static inline __u32
+static inline int
kiblnd_rd_frag_size(struct kib_rdma_desc *rd, int index)
{
return rd->rd_frags[index].rf_nob;
#ifndef HAVE_IB_SG_DMA_ADDRESS
#include <linux/scatterlist.h>
-#define ib_sg_dma_address(dev, sg) sg_dma_address((dev), (sg))
-#define ib_sg_dma_len(dev, sg) sg_dma_len((dev), (sg))
+#define ib_sg_dma_address(dev, sg) sg_dma_address(sg)
+#define ib_sg_dma_len(dev, sg) sg_dma_len(sg)
#endif
static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev,
int kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg);
int kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
- int delayed, unsigned int niov, struct kvec *iov,
- lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen,
+ int delayed, unsigned int niov,
+ struct bio_vec *kiov, unsigned int offset, unsigned int mlen,
unsigned int rlen);