* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see [sun.com URL with a
- * copy of GPLv2].
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
#include <linux/list.h>
#include <linux/kmod.h>
#include <linux/sysctl.h>
-#include <linux/random.h>
#include <linux/pci.h>
#include <net/sock.h>
#include <libcfs/libcfs.h>
#include <lnet/lnet.h>
#include <lnet/lib-lnet.h>
+#include <lnet/lnet-sysctl.h>
#if !HAVE_GFP_T
typedef int gfp_t;
/* tunables fixed at compile time */
#ifdef CONFIG_SMP
-# define IBLND_N_SCHED num_online_cpus() /* # schedulers */
+# define IBLND_N_SCHED cfs_num_online_cpus() /* # schedulers */
#else
# define IBLND_N_SCHED 1 /* # schedulers */
#endif
#define IBLND_PEER_HASH_SIZE 101 /* # peer lists */
#define IBLND_RESCHED 100 /* # scheduler loops before reschedule */
-#define IBLND_MSG_QUEUE_SIZE 8 /* # messages/RDMAs in-flight */
-#define IBLND_CREDIT_HIGHWATER 7 /* when eagerly to return credits */
-#define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
-
-#define IBLND_MAP_ON_DEMAND 0
-#if IBLND_MAP_ON_DEMAND
-# define IBLND_MAX_RDMA_FRAGS 1
-#else
-# define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV
-#endif
-
-/************************/
-/* derived constants... */
-
-/* TX messages (shared by all connections) */
-#define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx)
-#define IBLND_TX_MSG_BYTES() (IBLND_TX_MSGS() * IBLND_MSG_SIZE)
-#define IBLND_TX_MSG_PAGES() ((IBLND_TX_MSG_BYTES() + PAGE_SIZE - 1)/PAGE_SIZE)
-
-/* RX messages (per connection) */
-#define IBLND_RX_MSGS (IBLND_MSG_QUEUE_SIZE * 2)
-#define IBLND_RX_MSG_BYTES (IBLND_RX_MSGS * IBLND_MSG_SIZE)
-#define IBLND_RX_MSG_PAGES ((IBLND_RX_MSG_BYTES + PAGE_SIZE - 1)/PAGE_SIZE)
-
-/* WRs and CQEs (per connection) */
-#define IBLND_RECV_WRS IBLND_RX_MSGS
-#define IBLND_SEND_WRS ((*kiblnd_tunables.kib_concurrent_sends) * \
- (1 + IBLND_MAX_RDMA_FRAGS))
-#define IBLND_CQ_ENTRIES() (IBLND_RECV_WRS + IBLND_SEND_WRS)
typedef struct
{
+ int *kib_dev_failover; /* HCA failover */
unsigned int *kib_service; /* IB service number */
int *kib_min_reconnect_interval; /* first failed connection retry... */
int *kib_max_reconnect_interval; /* ...exponentially increasing to this */
int *kib_keepalive; /* keepalive timeout (seconds) */
int *kib_ntx; /* # tx descs */
int *kib_credits; /* # concurrent sends */
- int *kib_peercredits; /* # concurrent sends to 1 peer */
+ int *kib_peertxcredits; /* # concurrent sends to 1 peer */
+ int *kib_peerrtrcredits; /* # per-peer router buffer credits */
+ int *kib_peercredits_hiw; /* # when eagerly to return credits */
+ int *kib_peertimeout; /* seconds to consider peer dead */
char **kib_default_ipif; /* default IPoIB interface */
int *kib_retry_count;
int *kib_rnr_retry_count;
int *kib_concurrent_sends; /* send work queue sizing */
int *kib_ib_mtu; /* IB MTU */
-#if IBLND_MAP_ON_DEMAND
+ int *kib_map_on_demand; /* map-on-demand if RD has more fragments
+ * than this value, 0 disable map-on-demand */
+ int *kib_pmr_pool_size; /* # physical MR in pool */
int *kib_fmr_pool_size; /* # FMRs in pool */
int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
int *kib_fmr_cache; /* enable FMR pool cache? */
-#endif
#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
cfs_sysctl_table_header_t *kib_sysctl; /* sysctl interface */
#endif
+ int *kib_require_priv_port;/* accept only privileged ports */
+ int *kib_use_priv_port; /* use privileged port for active connect */
} kib_tunables_t;
-typedef struct
+extern kib_tunables_t kiblnd_tunables;
+
+#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
+#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
+
+#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
+#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1) /* Max # of peer credits */
+
+#define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \
+ IBLND_MSG_QUEUE_SIZE_V1 : \
+ *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */
+#define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \
+ IBLND_CREDIT_HIGHWATER_V1 : \
+ *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
+
+#ifdef HAVE_RDMA_CREATE_ID_4ARG
+#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps, qpt)
+#else
+#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps)
+#endif
+
+static inline int
+kiblnd_concurrent_sends_v1(void)
{
- int ibp_npages; /* # pages */
- struct page *ibp_pages[0];
-} kib_pages_t;
+ if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
+ return IBLND_MSG_QUEUE_SIZE_V1 * 2;
+
+ if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
+ return IBLND_MSG_QUEUE_SIZE_V1 / 2;
+
+ return *kiblnd_tunables.kib_concurrent_sends;
+}
+
+#define IBLND_CONCURRENT_SENDS(v) ((v) == IBLND_MSG_VERSION_1 ? \
+ kiblnd_concurrent_sends_v1() : \
+ *kiblnd_tunables.kib_concurrent_sends)
+/* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
+#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
+#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
+
+#define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
+#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
+#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \
+ *kiblnd_tunables.kib_map_on_demand : \
+ IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */
+#define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \
+ IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS)
+
+/************************/
+/* derived constants... */
+
+/* TX messages (shared by all connections) */
+#define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx)
+
+/* RX messages (per connection) */
+#define IBLND_RX_MSGS(v) (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
+#define IBLND_RX_MSG_BYTES(v) (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE)
+#define IBLND_RX_MSG_PAGES(v) ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE)
-typedef struct
+/* WRs and CQEs (per connection) */
+#define IBLND_RECV_WRS(v) IBLND_RX_MSGS(v)
+#define IBLND_SEND_WRS(v) ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v))
+#define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
+
+struct kib_hca_dev;
+
+/* o2iblnd can run over aliased interface */
+#ifdef IFALIASZ
+#define KIB_IFNAME_SIZE IFALIASZ
+#else
+#define KIB_IFNAME_SIZE 256
+#endif
+
+typedef struct
{
- struct list_head ibd_list; /* chain on kib_devs */
+ cfs_list_t ibd_list; /* chain on kib_devs */
+ cfs_list_t ibd_fail_list; /* chain on kib_failed_devs */
__u32 ibd_ifip; /* IPoIB interface IP */
- char ibd_ifname[32]; /* IPoIB interface name */
+ /** IPoIB interface name */
+ char ibd_ifname[KIB_IFNAME_SIZE];
int ibd_nnets; /* # nets extant */
- struct rdma_cm_id *ibd_cmid; /* IB listener (bound to 1 device) */
- struct ib_pd *ibd_pd; /* PD for the device */
- struct ib_mr *ibd_mr; /* MR for non RDMA I/O */
+ cfs_time_t ibd_next_failover;
+ int ibd_failed_failover; /* # failover failures */
+ unsigned int ibd_failover; /* failover in progress */
+ unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
+ cfs_list_t ibd_nets;
+ struct kib_hca_dev *ibd_hdev;
} kib_dev_t;
+typedef struct kib_hca_dev
+{
+ struct rdma_cm_id *ibh_cmid; /* listener cmid */
+ struct ib_device *ibh_ibdev; /* IB device */
+ int ibh_page_shift; /* page shift of current HCA */
+ int ibh_page_size; /* page size of current HCA */
+ __u64 ibh_page_mask; /* page mask of current HCA */
+ int ibh_mr_shift; /* bits shift of max MR size */
+ __u64 ibh_mr_size; /* size of MR */
+ int ibh_nmrs; /* # of global MRs */
+ struct ib_mr **ibh_mrs; /* global MR */
+ struct ib_pd *ibh_pd; /* PD */
+ kib_dev_t *ibh_dev; /* owner */
+ cfs_atomic_t ibh_ref; /* refcount */
+} kib_hca_dev_t;
+
+/** # of seconds to keep pool alive */
+#define IBLND_POOL_DEADLINE 300
+/** # of seconds to retry if allocation failed */
+#define IBLND_POOL_RETRY 1
+
typedef struct
{
- __u64 ibn_incarnation; /* my epoch */
- int ibn_init; /* initialisation state */
- int ibn_shutdown; /* shutting down? */
+ int ibp_npages; /* # pages */
+ struct page *ibp_pages[0]; /* page array */
+} kib_pages_t;
- atomic_t ibn_npeers; /* # peers extant */
- atomic_t ibn_nconns; /* # connections extant */
+struct kib_pmr_pool;
- struct kib_tx *ibn_tx_descs; /* all the tx descriptors */
- kib_pages_t *ibn_tx_pages; /* premapped tx msg pages */
- struct list_head ibn_idle_txs; /* idle tx descriptors */
- spinlock_t ibn_tx_lock; /* serialise */
+typedef struct {
+ cfs_list_t pmr_list; /* chain node */
+ struct ib_phys_buf *pmr_ipb; /* physical buffer */
+ struct ib_mr *pmr_mr; /* IB MR */
+ struct kib_pmr_pool *pmr_pool; /* owner of this MR */
+ __u64 pmr_iova; /* Virtual I/O address */
+ int pmr_refcount; /* reference count */
+} kib_phys_mr_t;
-#if IBLND_MAP_ON_DEMAND
- struct ib_fmr_pool *ibn_fmrpool; /* FMR pool for RDMA I/O */
-#endif
+struct kib_pool;
+struct kib_poolset;
- kib_dev_t *ibn_dev; /* underlying IB device */
-} kib_net_t;
+typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps, int inc, struct kib_pool **pp_po);
+typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
+typedef void (*kib_ps_node_init_t)(struct kib_pool *po,
+ cfs_list_t *node);
+typedef void (*kib_ps_node_fini_t)(struct kib_pool *po,
+ cfs_list_t *node);
+
+struct kib_net;
+
+#define IBLND_POOL_NAME_LEN 32
+
+typedef struct kib_poolset
+{
+ cfs_spinlock_t ps_lock; /* serialize */
+ struct kib_net *ps_net; /* network it belongs to */
+ char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
+ cfs_list_t ps_pool_list; /* list of pools */
+ cfs_list_t ps_failed_pool_list; /* failed pool list */
+ cfs_time_t ps_next_retry; /* time stamp for retry if failed to allocate */
+ int ps_increasing; /* is allocating new pool */
+ int ps_pool_size; /* new pool size */
+
+ kib_ps_pool_create_t ps_pool_create; /* create a new pool */
+ kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */
+ kib_ps_node_init_t ps_node_init; /* initialize new allocated node */
+ kib_ps_node_fini_t ps_node_fini; /* finalize node */
+} kib_poolset_t;
+
+typedef struct kib_pool
+{
+ cfs_list_t po_list; /* chain on pool list */
+ cfs_list_t po_free_list; /* pre-allocated node */
+ kib_poolset_t *po_owner; /* pool_set of this pool */
+ cfs_time_t po_deadline; /* deadline of this pool */
+ int po_allocated; /* # of elements in use */
+ int po_failed; /* pool is created on failed HCA */
+ int po_size; /* # of pre-allocated elements */
+} kib_pool_t;
+
+typedef struct {
+ kib_poolset_t tps_poolset; /* pool-set */
+ __u64 tps_next_tx_cookie; /* cookie of TX */
+} kib_tx_poolset_t;
+
+typedef struct {
+ kib_pool_t tpo_pool; /* pool */
+ struct kib_hca_dev *tpo_hdev; /* device for this pool */
+ struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
+ kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */
+} kib_tx_pool_t;
+
+typedef struct {
+ kib_poolset_t pps_poolset; /* pool-set */
+} kib_pmr_poolset_t;
+
+typedef struct kib_pmr_pool {
+ struct kib_hca_dev *ppo_hdev; /* device for this pool */
+ kib_pool_t ppo_pool; /* pool */
+} kib_pmr_pool_t;
typedef struct
{
- int kib_init; /* initialisation state */
- int kib_shutdown; /* shut down? */
- struct list_head kib_devs; /* IB devices extant */
- atomic_t kib_nthreads; /* # live threads */
- rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn ops */
+ cfs_spinlock_t fps_lock; /* serialize */
+ struct kib_net *fps_net; /* IB network */
+ cfs_list_t fps_pool_list; /* FMR pool list */
+ cfs_list_t fps_failed_pool_list; /* FMR pool list */
+ __u64 fps_version; /* validity stamp */
+ int fps_increasing; /* is allocating new pool */
+ cfs_time_t fps_next_retry; /* time stamp for retry if failed to allocate */
+} kib_fmr_poolset_t;
- struct list_head *kib_peers; /* hash table of all my known peers */
- int kib_peer_hash_size; /* size of kib_peers */
+typedef struct
+{
+ cfs_list_t fpo_list; /* chain on pool list */
+ struct kib_hca_dev *fpo_hdev; /* device for this pool */
+ kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
+ struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
+ cfs_time_t fpo_deadline; /* deadline of this pool */
+ int fpo_failed; /* fmr pool is failed */
+ int fpo_map_count; /* # of mapped FMR */
+} kib_fmr_pool_t;
- void *kib_connd; /* the connd task (serialisation assertions) */
- struct list_head kib_connd_conns; /* connections to setup/teardown */
- struct list_head kib_connd_zombies; /* connections with zero refcount */
- wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps here */
- spinlock_t kib_connd_lock; /* serialise */
+typedef struct {
+ struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
+ kib_fmr_pool_t *fmr_pool; /* pool of FMR */
+} kib_fmr_t;
- wait_queue_head_t kib_sched_waitq; /* schedulers sleep here */
- struct list_head kib_sched_conns; /* conns to check for rx completions */
- spinlock_t kib_sched_lock; /* serialise */
+typedef struct kib_net
+{
+ cfs_list_t ibn_list; /* chain on kib_dev_t::ibd_nets */
+ __u64 ibn_incarnation; /* my epoch */
+ int ibn_init; /* initialisation state */
+ int ibn_shutdown; /* shutting down? */
+ unsigned int ibn_with_fmr:1; /* FMR? */
+ unsigned int ibn_with_pmr:1; /* PMR? */
+
+ cfs_atomic_t ibn_npeers; /* # peers extant */
+ cfs_atomic_t ibn_nconns; /* # connections extant */
+
+ kib_tx_poolset_t ibn_tx_ps; /* tx pool-set */
+ kib_fmr_poolset_t ibn_fmr_ps; /* fmr pool-set */
+ kib_pmr_poolset_t ibn_pmr_ps; /* pmr pool-set */
- __u64 kib_next_tx_cookie; /* RDMA completion cookie */
- struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
+ kib_dev_t *ibn_dev; /* underlying IB device */
+} kib_net_t;
+
+typedef struct
+{
+ int kib_init; /* initialisation state */
+ int kib_shutdown; /* shut down? */
+ cfs_list_t kib_devs; /* IB devices extant */
+ cfs_list_t kib_failed_devs; /* list head of failed devices */
+ cfs_atomic_t kib_nthreads; /* # live threads */
+ cfs_rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn ops */
+
+ cfs_list_t *kib_peers; /* hash table of all my known peers */
+ int kib_peer_hash_size;/* size of kib_peers */
+
+ void *kib_connd; /* the connd task (serialisation assertions) */
+ cfs_list_t kib_connd_conns; /* connections to setup/teardown */
+ cfs_list_t kib_connd_zombies;/* connections with zero refcount */
+ cfs_waitq_t kib_connd_waitq; /* connection daemon sleeps here */
+ cfs_spinlock_t kib_connd_lock; /* serialise */
+
+ cfs_waitq_t kib_sched_waitq; /* schedulers sleep here */
+ cfs_list_t kib_sched_conns; /* conns to check for rx completions */
+ cfs_spinlock_t kib_sched_lock; /* serialise */
+ cfs_waitq_t kib_failover_waitq; /* schedulers sleep here */
+
+ struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
} kib_data_t;
#define IBLND_INIT_NOTHING 0
char ibim_payload[0]; /* piggy-backed payload */
} WIRE_ATTR kib_immediate_msg_t;
-#if IBLND_MAP_ON_DEMAND
-typedef struct
-{
- __u64 rd_addr; /* IO VMA address */
- __u32 rd_nob; /* # of bytes */
- __u32 rd_key; /* remote key */
-} WIRE_ATTR kib_rdma_desc_t;
-#else
typedef struct
{
__u32 rf_nob; /* # bytes this frag */
__u32 rd_nfrags; /* # fragments */
kib_rdma_frag_t rd_frags[0]; /* buffer frags */
} WIRE_ATTR kib_rdma_desc_t;
-#endif
-
+
typedef struct
{
lnet_hdr_t ibprm_hdr; /* portals header */
typedef struct
{
/* First 2 fields fixed FOR ALL TIME */
- __u32 ibm_magic; /* I'm an openibnal message */
+ __u32 ibm_magic; /* I'm an ibnal message */
__u16 ibm_version; /* this is my version number */
__u8 ibm_type; /* msg type */
#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */
-#define IBLND_MSG_VERSION 0x11
+#define IBLND_MSG_VERSION_1 0x11
+#define IBLND_MSG_VERSION_2 0x12
+#define IBLND_MSG_VERSION IBLND_MSG_VERSION_2
#define IBLND_MSG_CONNREQ 0xc0 /* connection request */
#define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */
__u32 ibr_magic; /* sender's magic */
__u16 ibr_version; /* sender's version */
__u8 ibr_why; /* reject reason */
+ __u8 ibr_padding; /* padding */
+ __u64 ibr_incarnation; /* incarnation of peer */
+ kib_connparams_t ibr_cp; /* connection parameters */
} WIRE_ATTR kib_rej_t;
-
/* connection rejection reasons */
#define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */
#define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */
#define IBLND_REJECT_FATAL 3 /* Anything else */
+#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */
+#define IBLND_REJECT_CONN_STALE 5 /* stale peer */
+
+#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match mine */
+#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't match mine */
+
/***********************************************************************/
typedef struct kib_rx /* receive message */
{
- struct list_head rx_list; /* queue for attention */
+ cfs_list_t rx_list; /* queue for attention */
struct kib_conn *rx_conn; /* owning conn */
int rx_nob; /* # bytes received (-1 while posted) */
enum ib_wc_status rx_status; /* completion status */
typedef struct kib_tx /* transmit message */
{
- struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */
+ cfs_list_t tx_list; /* queue on idle_txs ibc_tx_queue etc. */
+ kib_tx_pool_t *tx_pool; /* pool I'm from */
struct kib_conn *tx_conn; /* owning conn */
- int tx_sending; /* # tx callbacks outstanding */
- int tx_queued; /* queued for sending */
- int tx_waiting; /* waiting for peer */
+ short tx_sending; /* # tx callbacks outstanding */
+ short tx_queued; /* queued for sending */
+ short tx_waiting; /* waiting for peer */
int tx_status; /* LNET completion status */
unsigned long tx_deadline; /* completion deadline */
__u64 tx_cookie; /* completion cookie */
__u64 tx_msgaddr; /* message buffer (I/O addr) */
DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */
int tx_nwrq; /* # send work items */
-#if IBLND_MAP_ON_DEMAND
- struct ib_send_wr tx_wrq[2]; /* send work items... */
- struct ib_sge tx_sge[2]; /* ...and their memory */
- kib_rdma_desc_t tx_rd[1]; /* rdma descriptor */
- __u64 *tx_pages; /* rdma phys page addrs */
- struct ib_pool_fmr *tx_fmr; /* rdma mapping (mapped if != NULL) */
-#else
struct ib_send_wr *tx_wrq; /* send work items... */
struct ib_sge *tx_sge; /* ...and their memory */
kib_rdma_desc_t *tx_rd; /* rdma descriptor */
int tx_nfrags; /* # entries in... */
struct scatterlist *tx_frags; /* dma_map_sg descriptor */
+ __u64 *tx_pages; /* rdma phys page addrs */
+ union {
+ kib_phys_mr_t *pmr; /* MR for physical buffer */
+ kib_fmr_t fmr; /* FMR */
+ } tx_u;
int tx_dmadir; /* dma direction */
-#endif
} kib_tx_t;
typedef struct kib_connvars
typedef struct kib_conn
{
- struct kib_peer *ibc_peer; /* owning peer */
- struct list_head ibc_list; /* stash on peer's conn list */
- struct list_head ibc_sched_list; /* schedule for attention */
- __u64 ibc_incarnation; /* which instance of the peer */
- atomic_t ibc_refcount; /* # users */
- int ibc_state; /* what's happening */
- int ibc_nsends_posted; /* # uncompleted sends */
- int ibc_credits; /* # credits I have */
- int ibc_outstanding_credits; /* # credits to return */
- int ibc_reserved_credits;/* # ACK/DONE msg credits */
- int ibc_comms_error; /* set on comms error */
- int ibc_nrx:8; /* receive buffers owned */
- int ibc_scheduled:1; /* scheduled for attention */
- int ibc_ready:1; /* CQ callback fired */
- unsigned long ibc_last_send; /* time of last send */
- struct list_head ibc_early_rxs; /* rxs completed before ESTABLISHED */
- struct list_head ibc_tx_noops; /* IBLND_MSG_NOOPs */
- struct list_head ibc_tx_queue; /* sends that need a credit */
- struct list_head ibc_tx_queue_nocred;/* sends that don't need a credit */
- struct list_head ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
- struct list_head ibc_active_txs; /* active tx awaiting completion */
- spinlock_t ibc_lock; /* serialise */
- kib_rx_t *ibc_rxs; /* the rx descs */
- kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
-
- struct rdma_cm_id *ibc_cmid; /* CM id */
- struct ib_cq *ibc_cq; /* completion queue */
-
- kib_connvars_t *ibc_connvars; /* in-progress connection state */
+ struct kib_peer *ibc_peer; /* owning peer */
+ kib_hca_dev_t *ibc_hdev; /* HCA bound on */
+ cfs_list_t ibc_list; /* stash on peer's conn list */
+ cfs_list_t ibc_sched_list; /* schedule for attention */
+ __u16 ibc_version; /* version of connection */
+ __u64 ibc_incarnation; /* which instance of the peer */
+ cfs_atomic_t ibc_refcount; /* # users */
+ int ibc_state; /* what's happening */
+ int ibc_nsends_posted; /* # uncompleted sends */
+ int ibc_noops_posted; /* # uncompleted NOOPs */
+ int ibc_credits; /* # credits I have */
+ int ibc_outstanding_credits; /* # credits to return */
+ int ibc_reserved_credits;/* # ACK/DONE msg credits */
+ int ibc_comms_error; /* set on comms error */
+ int ibc_nrx:16; /* receive buffers owned */
+ int ibc_scheduled:1; /* scheduled for attention */
+ int ibc_ready:1; /* CQ callback fired */
+ unsigned long ibc_last_send; /* time of last send */
+ cfs_list_t ibc_early_rxs; /* rxs completed before ESTABLISHED */
+ cfs_list_t ibc_tx_noops; /* IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */
+ cfs_list_t ibc_tx_queue; /* sends that need a credit */
+ cfs_list_t ibc_tx_queue_nocred;/* sends that don't need a credit */
+ cfs_list_t ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
+ cfs_list_t ibc_active_txs; /* active tx awaiting completion */
+ cfs_spinlock_t ibc_lock; /* serialise */
+ kib_rx_t *ibc_rxs; /* the rx descs */
+ kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
+
+ struct rdma_cm_id *ibc_cmid; /* CM id */
+ struct ib_cq *ibc_cq; /* completion queue */
+
+ kib_connvars_t *ibc_connvars; /* in-progress connection state */
} kib_conn_t;
#define IBLND_CONN_INIT 0 /* being intialised */
typedef struct kib_peer
{
- struct list_head ibp_list; /* stash on global peer list */
- lnet_nid_t ibp_nid; /* who's on the other end(s) */
- lnet_ni_t *ibp_ni; /* LNet interface */
- atomic_t ibp_refcount; /* # users */
- struct list_head ibp_conns; /* all active connections */
- struct list_head ibp_tx_queue; /* msgs waiting for a conn */
- int ibp_connecting; /* current active connection attempts */
- int ibp_accepting; /* current passive connection attempts */
- int ibp_error; /* errno on closing this peer */
- cfs_time_t ibp_last_alive; /* when (in jiffies) I was last alive */
+ cfs_list_t ibp_list; /* stash on global peer list */
+ lnet_nid_t ibp_nid; /* who's on the other end(s) */
+ lnet_ni_t *ibp_ni; /* LNet interface */
+ cfs_atomic_t ibp_refcount; /* # users */
+ cfs_list_t ibp_conns; /* all active connections */
+ cfs_list_t ibp_tx_queue; /* msgs waiting for a conn */
+ __u16 ibp_version; /* version of peer */
+ __u64 ibp_incarnation; /* incarnation of peer */
+ int ibp_connecting; /* current active connection attempts */
+ int ibp_accepting; /* current passive connection attempts */
+ int ibp_error; /* errno on closing this peer */
+ cfs_time_t ibp_last_alive; /* when (in jiffies) I was last alive */
} kib_peer_t;
-
extern kib_data_t kiblnd_data;
-extern kib_tunables_t kiblnd_tunables;
+
+extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
+
+static inline void
+kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
+{
+ LASSERT (cfs_atomic_read(&hdev->ibh_ref) > 0);
+ cfs_atomic_inc(&hdev->ibh_ref);
+}
+
+static inline void
+kiblnd_hdev_decref(kib_hca_dev_t *hdev)
+{
+ LASSERT (cfs_atomic_read(&hdev->ibh_ref) > 0);
+ if (cfs_atomic_dec_and_test(&hdev->ibh_ref))
+ kiblnd_hdev_destroy(hdev);
+}
+
+static inline int
+kiblnd_dev_can_failover(kib_dev_t *dev)
+{
+ if (!cfs_list_empty(&dev->ibd_fail_list)) /* already scheduled */
+ return 0;
+
+ if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
+ return 0;
+
+ if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
+ return 1;
+
+ return dev->ibd_can_failover;
+}
#define kiblnd_conn_addref(conn) \
do { \
CDEBUG(D_NET, "conn[%p] (%d)++\n", \
- (conn), atomic_read(&(conn)->ibc_refcount)); \
- LASSERT(atomic_read(&(conn)->ibc_refcount) > 0); \
- atomic_inc(&(conn)->ibc_refcount); \
+ (conn), cfs_atomic_read(&(conn)->ibc_refcount)); \
+ cfs_atomic_inc(&(conn)->ibc_refcount); \
} while (0)
-#define kiblnd_conn_decref(conn) \
-do { \
- unsigned long flags; \
- \
- CDEBUG(D_NET, "conn[%p] (%d)--\n", \
- (conn), atomic_read(&(conn)->ibc_refcount)); \
- LASSERT(atomic_read(&(conn)->ibc_refcount) > 0); \
- if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
- list_add_tail(&(conn)->ibc_list, \
- &kiblnd_data.kib_connd_zombies); \
- wake_up(&kiblnd_data.kib_connd_waitq); \
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); \
- } \
+#define kiblnd_conn_decref(conn) \
+do { \
+ unsigned long flags; \
+ \
+ CDEBUG(D_NET, "conn[%p] (%d)--\n", \
+ (conn), cfs_atomic_read(&(conn)->ibc_refcount)); \
+ LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
+ if (cfs_atomic_dec_and_test(&(conn)->ibc_refcount)) { \
+ cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
+ cfs_list_add_tail(&(conn)->ibc_list, \
+ &kiblnd_data.kib_connd_zombies); \
+ cfs_waitq_signal(&kiblnd_data.kib_connd_waitq); \
+ cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
+ } \
} while (0)
#define kiblnd_peer_addref(peer) \
do { \
CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
(peer), libcfs_nid2str((peer)->ibp_nid), \
- atomic_read (&(peer)->ibp_refcount)); \
- LASSERT(atomic_read(&(peer)->ibp_refcount) > 0); \
- atomic_inc(&(peer)->ibp_refcount); \
+ cfs_atomic_read (&(peer)->ibp_refcount)); \
+ cfs_atomic_inc(&(peer)->ibp_refcount); \
} while (0)
#define kiblnd_peer_decref(peer) \
do { \
CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
(peer), libcfs_nid2str((peer)->ibp_nid), \
- atomic_read (&(peer)->ibp_refcount)); \
- LASSERT(atomic_read(&(peer)->ibp_refcount) > 0); \
- if (atomic_dec_and_test(&(peer)->ibp_refcount)) \
+ cfs_atomic_read (&(peer)->ibp_refcount)); \
+ LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \
+ if (cfs_atomic_dec_and_test(&(peer)->ibp_refcount)) \
kiblnd_destroy_peer(peer); \
} while (0)
-static inline struct list_head *
+static inline cfs_list_t *
kiblnd_nid2peerlist (lnet_nid_t nid)
{
- unsigned int hash = ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
+ unsigned int hash =
+ ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
return (&kiblnd_data.kib_peers [hash]);
}
kiblnd_peer_active (kib_peer_t *peer)
{
/* Am I in the peer hash table? */
- return (!list_empty(&peer->ibp_list));
+ return (!cfs_list_empty(&peer->ibp_list));
}
static inline kib_conn_t *
kiblnd_get_conn_locked (kib_peer_t *peer)
{
- LASSERT (!list_empty(&peer->ibp_conns));
+ LASSERT (!cfs_list_empty(&peer->ibp_conns));
/* just return the first connection */
- return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
+ return cfs_list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
}
static inline int
kiblnd_send_keepalive(kib_conn_t *conn)
{
return (*kiblnd_tunables.kib_keepalive > 0) &&
- time_after(jiffies, conn->ibc_last_send +
- *kiblnd_tunables.kib_keepalive*HZ);
+ cfs_time_after(jiffies, conn->ibc_last_send +
+ *kiblnd_tunables.kib_keepalive*CFS_HZ);
}
static inline int
{
LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
- if (conn->ibc_outstanding_credits < IBLND_CREDIT_HIGHWATER &&
+ if (conn->ibc_outstanding_credits <
+ IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
!kiblnd_send_keepalive(conn))
return 0; /* No need to send NOOP */
- if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
- !list_empty(&conn->ibc_tx_queue_nocred) || /* can be piggybacked */
+ if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
+ if (!cfs_list_empty(&conn->ibc_tx_queue_nocred))
+ return 0; /* NOOP can be piggybacked */
+
+ /* No tx to piggyback NOOP onto or no credit to send a tx */
+ return (cfs_list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 0);
+ }
+
+ if (!cfs_list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
+ !cfs_list_empty(&conn->ibc_tx_queue_nocred) || /* can be piggybacked */
conn->ibc_credits == 0) /* no credit */
return 0;
return 0;
/* No tx to piggyback NOOP onto or no credit to send a tx */
- return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
+ return (cfs_list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
}
static inline void
&kiblnd_data.kib_error_qpa, IB_QP_STATE);
}
+static inline const char *
+kiblnd_queue2str (kib_conn_t *conn, cfs_list_t *q)
+{
+ if (q == &conn->ibc_tx_queue)
+ return "tx_queue";
+
+ if (q == &conn->ibc_tx_queue_rsrvd)
+ return "tx_queue_rsrvd";
+
+ if (q == &conn->ibc_tx_queue_nocred)
+ return "tx_queue_nocred";
+
+ if (q == &conn->ibc_active_txs)
+ return "active_txs";
+
+ LBUG();
+ return NULL;
+}
+
/* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the
* lowest bits of the work request id to stash the work item type. */
kiblnd_set_conn_state (kib_conn_t *conn, int state)
{
conn->ibc_state = state;
- mb();
+ cfs_mb();
}
-#if IBLND_MAP_ON_DEMAND
-static inline int
-kiblnd_rd_size (kib_rdma_desc_t *rd)
+static inline void
+kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob)
{
- return rd->rd_nob;
+ msg->ibm_type = type;
+ msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
}
-#else
+
static inline int
kiblnd_rd_size (kib_rdma_desc_t *rd)
{
int i;
int size;
-
+
for (i = size = 0; i < rd->rd_nfrags; i++)
size += rd->rd_frags[i].rf_nob;
-
+
return size;
}
-#endif
+
+static inline __u64
+kiblnd_rd_frag_addr(kib_rdma_desc_t *rd, int index)
+{
+ return rd->rd_frags[index].rf_addr;
+}
+
+static inline __u32
+kiblnd_rd_frag_size(kib_rdma_desc_t *rd, int index)
+{
+ return rd->rd_frags[index].rf_nob;
+}
+
+static inline __u32
+kiblnd_rd_frag_key(kib_rdma_desc_t *rd, int index)
+{
+ return rd->rd_key;
+}
+
+static inline int
+kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
+{
+ if (nob < rd->rd_frags[index].rf_nob) {
+ rd->rd_frags[index].rf_addr += nob;
+ rd->rd_frags[index].rf_nob -= nob;
+ } else {
+ index ++;
+ }
+
+ return index;
+}
+
+static inline int
+kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
+{
+ LASSERT (msgtype == IBLND_MSG_GET_REQ ||
+ msgtype == IBLND_MSG_PUT_ACK);
+
+ return msgtype == IBLND_MSG_GET_REQ ?
+ offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
+ offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
+}
#ifdef HAVE_OFED_IB_DMA_MAP
+static inline __u64
+kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+ return ib_dma_mapping_error(dev, dma_addr);
+}
+
static inline __u64 kiblnd_dma_map_single(struct ib_device *dev,
void *msg, size_t size,
enum dma_data_direction direction)
#else
+static inline __u64
+kiblnd_dma_mapping_error(struct ib_device *dev, dma_addr_t dma_addr)
+{
+ return dma_mapping_error(dma_addr);
+}
+
static inline dma_addr_t kiblnd_dma_map_single(struct ib_device *dev,
void *msg, size_t size,
enum dma_data_direction direction)
#endif
+struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
+ kib_rdma_desc_t *rd);
+struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev,
+ __u64 addr, __u64 size);
+void kiblnd_map_rx_descs(kib_conn_t *conn);
+void kiblnd_unmap_rx_descs(kib_conn_t *conn);
+int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
+ kib_rdma_desc_t *rd, int nfrags);
+void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
+void kiblnd_pool_free_node(kib_pool_t *pool, cfs_list_t *node);
+cfs_list_t *kiblnd_pool_alloc_node(kib_poolset_t *ps);
+
+int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
+ int npages, __u64 iov, kib_fmr_t *fmr);
+void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
+
+int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
+ kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr);
+void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr);
+
int kiblnd_startup (lnet_ni_t *ni);
void kiblnd_shutdown (lnet_ni_t *ni);
int kiblnd_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg);
+void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
int kiblnd_tunables_init(void);
void kiblnd_tunables_fini(void);
int kiblnd_connd (void *arg);
int kiblnd_scheduler(void *arg);
int kiblnd_thread_start (int (*fn)(void *arg), void *arg);
+int kiblnd_failover_thread (void *arg);
int kiblnd_alloc_pages (kib_pages_t **pp, int npages);
void kiblnd_free_pages (kib_pages_t *p);
int kiblnd_cm_callback(struct rdma_cm_id *cmid,
struct rdma_cm_event *event);
+int kiblnd_translate_mtu(int value);
+int kiblnd_dev_failover(kib_dev_t *dev);
int kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
void kiblnd_destroy_peer (kib_peer_t *peer);
void kiblnd_destroy_dev (kib_dev_t *dev);
void kiblnd_peer_alive (kib_peer_t *peer);
kib_peer_t *kiblnd_find_peer_locked (lnet_nid_t nid);
void kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error);
-int kiblnd_close_stale_conns_locked (kib_peer_t *peer, __u64 incarnation);
+int kiblnd_close_stale_conns_locked (kib_peer_t *peer,
+ int version, __u64 incarnation);
+int kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why);
void kiblnd_connreq_done(kib_conn_t *conn, int status);
kib_conn_t *kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid,
- int state);
+ int state, int version);
void kiblnd_destroy_conn (kib_conn_t *conn);
void kiblnd_close_conn (kib_conn_t *conn, int error);
void kiblnd_close_conn_locked (kib_conn_t *conn, int error);
-int kiblnd_init_rdma (lnet_ni_t *ni, kib_tx_t *tx, int type,
+int kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
+void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn);
void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn);
void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
-void kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int status);
+void kiblnd_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist,
+ int status);
void kiblnd_check_sends (kib_conn_t *conn);
void kiblnd_qp_event(struct ib_event *event, void *arg);
void kiblnd_cq_event(struct ib_event *event, void *arg);
void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
-void kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob);
-void kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg,
+void kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
int credits, lnet_nid_t dstnid, __u64 dststamp);
int kiblnd_unpack_msg(kib_msg_t *msg, int nob);
int kiblnd_post_rx (kib_rx_t *rx, int credit);
int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen);
+