#include <linux/module.h>
#include <linux/kernel.h>
+#if defined(EXTERNAL_OFED_BUILD) && !defined(HAVE_OFED_IB_DMA_MAP_SG_SANE)
+#undef CONFIG_INFINIBAND_VIRT_DMA
+#endif
+
#if defined(NEED_LOCKDEP_IS_HELD_DISCARD_CONST) \
&& defined(CONFIG_LOCKDEP) \
&& defined(lockdep_is_held)
lock_is_held((struct lockdep_map *)&(lock)->dep_map)
#endif
-#ifdef HAVE_COMPAT_RDMA
+#ifdef HAVE_OFED_COMPAT_RDMA
#include <linux/compat-2.6.h>
#ifdef LINUX_3_17_COMPAT_H
#define HAVE_NLA_PARSE_6_PARAMS 1
#define HAVE_NETLINK_EXTACK 1
-
/* MOFED has its own bitmap_alloc backport */
#define HAVE_BITMAP_ALLOC 1
#include <rdma/rdma_cm.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_verbs.h>
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
#include <rdma/ib_fmr_pool.h>
#endif
#define DEBUG_SUBSYSTEM S_LND
#include <lnet/lib-lnet.h>
+#include <lnet/lnet_rdma.h>
#include "o2iblnd-idl.h"
-#define IBLND_PEER_HASH_BITS 7 /* log2 of # peer_ni lists */
+enum kiblnd_ni_lnd_tunables_attr {
+ LNET_NET_O2IBLND_TUNABLES_ATTR_UNSPEC = 0,
+
+ LNET_NET_O2IBLND_TUNABLES_ATTR_HIW_PEER_CREDITS,
+ LNET_NET_O2IBLND_TUNABLES_ATTR_CONCURRENT_SENDS,
+ LNET_NET_O2IBLND_TUNABLES_ATTR_MAP_ON_DEMAND,
+ LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_POOL_SIZE,
+ LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_FLUSH_TRIGGER,
+ LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_CACHE,
+ LNET_NET_O2IBLND_TUNABLES_ATTR_NTX,
+ LNET_NET_O2IBLND_TUNABLES_ATTR_CONNS_PER_PEER,
+ LNET_NET_O2IBLND_TUNABLES_ATTR_LND_TIMEOUT,
+ LNET_NET_O2IBLND_TUNABLES_ATTR_LND_TOS,
+ __LNET_NET_O2IBLND_TUNABLES_ATTR_MAX_PLUS_ONE,
+};
+#define LNET_NET_O2IBLND_TUNABLES_ATTR_MAX (__LNET_NET_O2IBLND_TUNABLES_ATTR_MAX_PLUS_ONE - 1)
+
+#define IBLND_PEER_HASH_BITS 7 /* log2 of # peer_ni lists */
#define IBLND_N_SCHED 2
#define IBLND_N_SCHED_HIGH 4
};
extern struct kib_tunables kiblnd_tunables;
+extern struct lnet_ioctl_config_o2iblnd_tunables kib_default_tunables;
#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
#define IBLND_CREDITS_DEFAULT 8 /* default # of peer_ni credits */
#define IBLND_CREDITS_MAX ((typeof(((struct kib_msg *) 0)->ibm_credits)) - 1) /* Max # of peer_ni credits */
-/* when eagerly to return credits */
-#define IBLND_CREDITS_HIGHWATER(t, conn) ((conn->ibc_version) == IBLND_MSG_VERSION_1 ? \
- IBLND_CREDIT_HIGHWATER_V1 : \
- min(t->lnd_peercredits_hiw, (__u32)conn->ibc_queue_depth - 1))
-
-#ifdef HAVE_RDMA_CREATE_ID_5ARG
+#ifdef HAVE_OFED_RDMA_CREATE_ID_5ARG
# define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) \
rdma_create_id((ns) ? (ns) : &init_net, cb, dev, ps, qpt)
#else
-# ifdef HAVE_RDMA_CREATE_ID_4ARG
+# ifdef HAVE_OFED_RDMA_CREATE_ID_4ARG
# define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) \
rdma_create_id(cb, dev, ps, qpt)
# else
#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
-#define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
-#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
+/* max size of queued messages (inc hdr) */
+#define IBLND_MSG_SIZE (4<<10)
+/* max # of fragments supported. + 1 for unaligned case */
+#define IBLND_MAX_RDMA_FRAGS (LNET_MAX_IOV + 1)
/************************/
/* derived constants... */
enum kib_dev_caps {
IBLND_DEV_CAPS_FASTREG_ENABLED = BIT(0),
IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT = BIT(1),
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
IBLND_DEV_CAPS_FMR_ENABLED = BIT(2),
#endif
};
+#define IS_FAST_REG_DEV(dev) \
+ ((dev)->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED)
+
+
struct kib_dev {
struct list_head ibd_list; /* chain on kib_devs */
struct list_head ibd_fail_list; /* chain on kib_failed_devs */
__u64 ibh_page_mask; /* page mask of current HCA */
__u64 ibh_mr_size; /* size of MR */
int ibh_max_qp_wr; /* maximum work requests size */
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
struct ib_mr *ibh_mrs; /* global MR */
#endif
struct ib_pd *ibh_pd; /* PD */
time64_t fps_next_retry;
};
-#ifndef HAVE_IB_RDMA_WR
+#ifndef HAVE_OFED_IB_RDMA_WR
struct ib_rdma_wr {
struct ib_send_wr wr;
};
struct kib_fast_reg_descriptor { /* For fast registration */
struct list_head frd_list;
struct ib_rdma_wr frd_inv_wr;
-#ifdef HAVE_IB_MAP_MR_SG
+#ifdef HAVE_OFED_IB_MAP_MR_SG
struct ib_reg_wr frd_fastreg_wr;
#else
struct ib_rdma_wr frd_fastreg_wr;
#endif
struct ib_mr *frd_mr;
bool frd_valid;
+ bool frd_posted;
};
struct kib_fmr_pool {
struct list_head fpo_list; /* chain on pool list */
struct kib_hca_dev *fpo_hdev; /* device for this pool */
struct kib_fmr_poolset *fpo_owner; /* owner of this pool */
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
union {
struct {
struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
struct list_head fpo_pool_list;
int fpo_pool_size;
} fast_reg;
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
};
bool fpo_is_fmr; /* True if FMR pools allocated */
#endif
struct kib_fmr {
struct kib_fmr_pool *fmr_pool; /* pool of FMR */
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
-#endif /* HAVE_FMR_POOL_API */
+#endif /* HAVE_OFED_FMR_POOL_API */
struct kib_fast_reg_descriptor *fmr_frd;
u32 fmr_key;
};
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
#ifdef HAVE_ORACLE_OFED_EXTENSIONS
#define kib_fmr_pool_map(pool, pgs, n, iov) \
ib_fmr_pool_map_phys((pool), (pgs), (n), (iov))
#endif
-#endif /* HAVE_FMR_POOL_API */
+#endif /* HAVE_OFED_FMR_POOL_API */
struct kib_net {
/* chain on struct kib_dev::ibd_nets */
/* # tx callbacks outstanding */
short tx_sending;
/* queued for sending */
- short tx_queued;
+ unsigned long tx_queued:1,
/* waiting for peer_ni */
- short tx_waiting;
+ tx_waiting:1,
+ /* force RDMA */
+ tx_gpu:1;
/* LNET completion status */
int tx_status;
/* health status of the transmit */
__u64 tx_msgaddr;
/* for dma_unmap_single() */
DEFINE_DMA_UNMAP_ADDR(tx_msgunmap);
- /** sge for tx_msgaddr */
- struct ib_sge tx_msgsge;
/* # send work items */
int tx_nwrq;
/* # used scatter/gather elements */
/* when (in seconds) I was last alive */
time64_t ibp_last_alive;
/* # users */
- atomic_t ibp_refcount;
+ struct kref ibp_kref;
/* version of peer_ni */
__u16 ibp_version;
/* current passive connection attempts */
__u16 ibp_queue_depth;
/* reduced value which allows conn to be created if max fails */
__u16 ibp_queue_depth_mod;
+ /* Number of connections allocated. */
+ atomic_t ibp_nconns;
};
-#ifndef HAVE_IB_INC_RKEY
+#ifndef HAVE_OFED_IB_INC_RKEY
/**
* ib_inc_rkey - increments the key portion of the given rkey. Can be used
* for calculating a new rkey for type 2 memory windows.
return dev->ibd_can_failover;
}
-#define kiblnd_conn_addref(conn) \
-do { \
- CDEBUG(D_NET, "conn[%p] (%d)++\n", \
- (conn), atomic_read(&(conn)->ibc_refcount)); \
- atomic_inc(&(conn)->ibc_refcount); \
-} while (0)
-
-#define kiblnd_conn_decref(conn) \
-do { \
- unsigned long flags; \
- \
- CDEBUG(D_NET, "conn[%p] (%d)--\n", \
- (conn), atomic_read(&(conn)->ibc_refcount)); \
- LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
- if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
- list_add_tail(&(conn)->ibc_list, \
- &kiblnd_data.kib_connd_zombies); \
- wake_up(&kiblnd_data.kib_connd_waitq); \
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
- } \
-} while (0)
-
-#define kiblnd_peer_addref(peer_ni) \
-do { \
- CDEBUG(D_NET, "peer_ni[%p] -> %s (%d)++\n", \
- (peer_ni), libcfs_nid2str((peer_ni)->ibp_nid), \
- atomic_read (&(peer_ni)->ibp_refcount)); \
- atomic_inc(&(peer_ni)->ibp_refcount); \
-} while (0)
-
-#define kiblnd_peer_decref(peer_ni) \
-do { \
- CDEBUG(D_NET, "peer_ni[%p] -> %s (%d)--\n", \
- (peer_ni), libcfs_nid2str((peer_ni)->ibp_nid), \
- atomic_read (&(peer_ni)->ibp_refcount)); \
- LASSERT_ATOMIC_POS(&(peer_ni)->ibp_refcount); \
- if (atomic_dec_and_test(&(peer_ni)->ibp_refcount)) \
- kiblnd_destroy_peer(peer_ni); \
-} while (0)
+static inline void kiblnd_conn_addref(struct kib_conn *conn)
+{
+#ifdef O2IBLND_CONN_REFCOUNT_DEBUG
+ CDEBUG(D_NET, "conn[%p] (%d)++\n",
+ (conn), atomic_read(&(conn)->ibc_refcount));
+#endif
+ atomic_inc(&(conn)->ibc_refcount);
+}
+
+static inline void kiblnd_conn_decref(struct kib_conn *conn)
+{
+ unsigned long flags;
+#ifdef O2IBLND_CONN_REFCOUNT_DEBUG
+ CDEBUG(D_NET, "conn[%p] (%d)--\n",
+ (conn), atomic_read(&(conn)->ibc_refcount));
+#endif
+ LASSERT(atomic_read(&(conn)->ibc_refcount) > 0);
+ if (atomic_dec_and_test(&(conn)->ibc_refcount)) {
+ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ list_add_tail(&(conn)->ibc_list,
+ &kiblnd_data.kib_connd_zombies);
+ wake_up(&kiblnd_data.kib_connd_waitq);
+ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ }
+}
+
+void kiblnd_destroy_peer(struct kref *kref);
+
+static inline void kiblnd_peer_addref(struct kib_peer_ni *peer_ni)
+{
+ CDEBUG(D_NET, "peer_ni[%p] -> %s (%d)++\n",
+ peer_ni, libcfs_nid2str(peer_ni->ibp_nid),
+ kref_read(&peer_ni->ibp_kref));
+ kref_get(&(peer_ni)->ibp_kref);
+}
+
+static inline void kiblnd_peer_decref(struct kib_peer_ni *peer_ni)
+{
+ CDEBUG(D_NET, "peer_ni[%p] -> %s (%d)--\n",
+ peer_ni, libcfs_nid2str(peer_ni->ibp_nid),
+ kref_read(&peer_ni->ibp_kref));
+ kref_put(&peer_ni->ibp_kref, kiblnd_destroy_peer);
+}
static inline bool
kiblnd_peer_connecting(struct kib_peer_ni *peer_ni)
ktime_add_ns(conn->ibc_last_send, keepalive_ns));
}
+/* when to return credits eagerly */
+static inline int
+kiblnd_credits_highwater(struct lnet_ioctl_config_o2iblnd_tunables *t,
+ struct lnet_ioctl_config_lnd_cmn_tunables *nt,
+ struct kib_conn *conn)
+{
+ int credits_hiw = IBLND_CREDIT_HIGHWATER_V1;
+
+ if ((conn->ibc_version) == IBLND_MSG_VERSION_1)
+ return credits_hiw;
+
+ /* if queue depth is negotiated down, calculate hiw proportionally */
+ credits_hiw = (conn->ibc_queue_depth * t->lnd_peercredits_hiw) /
+ nt->lct_peer_tx_credits;
+
+ return credits_hiw;
+}
+
static inline int
kiblnd_need_noop(struct kib_conn *conn)
{
struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+ net_tunables = &ni->ni_net->net_tunables;
if (conn->ibc_outstanding_credits <
- IBLND_CREDITS_HIGHWATER(tunables, conn) &&
+ kiblnd_credits_highwater(tunables, net_tunables, conn) &&
!kiblnd_send_keepalive(conn))
return 0; /* No need to send NOOP */
#define KIBLND_UNMAP_ADDR_SET(p, m, a) do {} while (0)
#define KIBLND_UNMAP_ADDR(p, m, a) (a)
-static inline int kiblnd_dma_map_sg(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
+static inline
+int kiblnd_dma_map_sg(struct kib_hca_dev *hdev, struct kib_tx *tx)
{
- return ib_dma_map_sg(dev, sg, nents, direction);
+ struct scatterlist *sg = tx->tx_frags;
+ int nents = tx->tx_nfrags;
+ enum dma_data_direction direction = tx->tx_dmadir;
+
+ if (tx->tx_gpu)
+ return lnet_rdma_map_sg_attrs(hdev->ibh_ibdev->dma_device,
+ sg, nents, direction);
+
+ return ib_dma_map_sg(hdev->ibh_ibdev, sg, nents, direction);
}
-static inline void kiblnd_dma_unmap_sg(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
+static inline
+void kiblnd_dma_unmap_sg(struct kib_hca_dev *hdev, struct kib_tx *tx)
{
- ib_dma_unmap_sg(dev, sg, nents, direction);
+ struct scatterlist *sg = tx->tx_frags;
+ int nents = tx->tx_nfrags;
+ enum dma_data_direction direction = tx->tx_dmadir;
+
+ if (tx->tx_gpu)
+ lnet_rdma_unmap_sg(hdev->ibh_ibdev->dma_device,
+ sg, nents, direction);
+ else
+ ib_dma_unmap_sg(hdev->ibh_ibdev, sg, nents, direction);
}
-#ifndef HAVE_IB_SG_DMA_ADDRESS
+#ifndef HAVE_OFED_IB_SG_DMA_ADDRESS
#include <linux/scatterlist.h>
#define ib_sg_dma_address(dev, sg) sg_dma_address(sg)
#define ib_sg_dma_len(dev, sg) sg_dma_len(sg)
return ib_sg_dma_len(dev, sg);
}
-#ifndef HAVE_RDMA_CONNECT_LOCKED
+#ifndef HAVE_OFED_RDMA_CONNECT_LOCKED
#define rdma_connect_locked(cmid, cpp) rdma_connect(cmid, cpp)
#endif
int kiblnd_connd (void *arg);
int kiblnd_scheduler(void *arg);
-int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
+#define kiblnd_thread_start(fn, data, namefmt, arg...) \
+ ({ \
+ struct task_struct *__task = kthread_run(fn, data, \
+ namefmt, ##arg); \
+ if (!IS_ERR(__task)) \
+ atomic_inc(&kiblnd_data.kib_nthreads); \
+ PTR_ERR_OR_ZERO(__task); \
+ })
+
int kiblnd_failover_thread (void *arg);
int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages);
int kiblnd_dev_failover(struct kib_dev *dev, struct net *ns);
int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer_ni **peerp,
lnet_nid_t nid);
-void kiblnd_destroy_peer(struct kib_peer_ni *peer);
bool kiblnd_reconnect_peer(struct kib_peer_ni *peer);
void kiblnd_destroy_dev(struct kib_dev *dev);
void kiblnd_unlink_peer_locked(struct kib_peer_ni *peer_ni);
int delayed, unsigned int niov,
struct bio_vec *kiov, unsigned int offset, unsigned int mlen,
unsigned int rlen);
+unsigned int kiblnd_get_dev_prio(struct lnet_ni *ni, unsigned int dev_idx);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)
+#undef netdev_notifier_info_to_dev
+#define netdev_notifier_info_to_dev(ndev) ndev
+#endif
+#define kiblnd_dump_conn_dbg(conn) \
+({ \
+ if (conn && conn->ibc_cmid) \
+ CDEBUG(D_NET, "conn %p state %d nposted %d/%d c/o/r %d/%d/%d ce %d : cm_id %p qp_num 0x%x device_name %s\n", \
+ conn, \
+ conn->ibc_state, \
+ conn->ibc_noops_posted, \
+ conn->ibc_nsends_posted, \
+ conn->ibc_credits, \
+ conn->ibc_outstanding_credits, \
+ conn->ibc_reserved_credits, \
+ conn->ibc_comms_error, \
+ conn->ibc_cmid, \
+ conn->ibc_cmid->qp ? conn->ibc_cmid->qp->qp_num : 0, \
+ conn->ibc_cmid->qp ? (conn->ibc_cmid->qp->device ? dev_name(&conn->ibc_cmid->qp->device->dev) : "NULL") : "NULL"); \
+ else if (conn) \
+ CDEBUG(D_NET, "conn %p state %d nposted %d/%d c/o/r %d/%d/%d ce %d : cm_id NULL\n", \
+ conn, \
+ conn->ibc_state, \
+ conn->ibc_noops_posted, \
+ conn->ibc_nsends_posted, \
+ conn->ibc_credits, \
+ conn->ibc_outstanding_credits, \
+ conn->ibc_reserved_credits, \
+ conn->ibc_comms_error \
+ ); \
+})