#endif
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/kmod.h>
#include <linux/sysctl.h>
#include <linux/pci.h>
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)
-#include <linux/pci-dma.h>
-#endif
#include <net/sock.h>
#include <linux/in.h>
#define IBLND_N_SCHED 2
#define IBLND_N_SCHED_HIGH 4
-#define IBLND_DEV_CAPS_FASTREG_ENABLED 0x1
-#define IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT 0x2
-#define IBLND_DEV_CAPS_FMR_ENABLED 0x4
-
struct kib_tunables {
int *kib_dev_failover; /* HCA failover */
unsigned int *kib_service; /* IB service number */
#define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c)
/* 2 = LNet msg + Transfer chain */
-#define IBLND_CQ_ENTRIES(c) \
- (IBLND_RECV_WRS(c) + 2 * c->ibc_queue_depth)
+#define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + kiblnd_send_wrs(c))
struct kib_hca_dev;
#define KIB_IFNAME_SIZE 256
#endif
+enum kib_dev_caps {
+ IBLND_DEV_CAPS_FASTREG_ENABLED = BIT(0),
+ IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT = BIT(1),
+ IBLND_DEV_CAPS_FMR_ENABLED = BIT(2),
+};
+
struct kib_dev {
struct list_head ibd_list; /* chain on kib_devs */
struct list_head ibd_fail_list; /* chain on kib_failed_devs */
unsigned int ibd_can_failover;
struct list_head ibd_nets;
struct kib_hca_dev *ibd_hdev;
- __u32 ibd_dev_caps;
+ enum kib_dev_caps ibd_dev_caps;
};
struct kib_hca_dev {
/* message buffer (I/O addr) */
__u64 rx_msgaddr;
/* for dma_unmap_single() */
- DECLARE_PCI_UNMAP_ADDR(rx_msgunmap);
+ DEFINE_DMA_UNMAP_ADDR(rx_msgunmap);
/* receive work item... */
struct ib_recv_wr rx_wrq;
/* ...and its memory */
/* message buffer (I/O addr) */
__u64 tx_msgaddr;
/* for dma_unmap_single() */
- DECLARE_PCI_UNMAP_ADDR(tx_msgunmap);
+ DEFINE_DMA_UNMAP_ADDR(tx_msgunmap);
/** sge for tx_msgaddr */
struct ib_sge tx_msgsge;
/* # send work items */
struct list_head ibc_tx_queue_rsrvd;
/* active tx awaiting completion */
struct list_head ibc_active_txs;
+ /* zombie tx awaiting done */
+ struct list_head ibc_zombie_txs;
/* serialise */
spinlock_t ibc_lock;
/* the rx descs */
int kiblnd_msg_queue_size(int version, struct lnet_ni *ni);
+static inline int
+kiblnd_concurrent_sends(int version, struct lnet_ni *ni)
+{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ int concurrent_sends;
+
+ tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+ concurrent_sends = tunables->lnd_concurrent_sends;
+
+ if (version == IBLND_MSG_VERSION_1) {
+ if (concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
+ return IBLND_MSG_QUEUE_SIZE_V1 * 2;
+
+ if (concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
+ return IBLND_MSG_QUEUE_SIZE_V1 / 2;
+ }
+
+ return concurrent_sends;
+}
+
static inline void
kiblnd_hdev_addref_locked(struct kib_hca_dev *hdev)
{
ib_dma_unmap_sg(dev, sg, nents, direction);
}
+#ifndef HAVE_IB_SG_DMA_ADDRESS
+#include <linux/scatterlist.h>
+#define ib_sg_dma_address(dev, sg) sg_dma_address((dev), (sg))
+#define ib_sg_dma_len(dev, sg) sg_dma_len((dev), (sg))
+#endif
+
static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev,
struct scatterlist *sg)
{
#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
+void kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs);
void kiblnd_map_rx_descs(struct kib_conn *conn);
void kiblnd_unmap_rx_descs(struct kib_conn *conn);
void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node);