* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2015, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Author: Eric Barton <eric@bartonsoftware.com>
*/
+#ifdef HAVE_COMPAT_RDMA
+#include <linux/compat-2.6.h>
+#endif
+
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <net/sock.h>
#include <linux/in.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_fmr_pool.h>
+
#define DEBUG_SUBSYSTEM S_LND
#include <libcfs/libcfs.h>
#include <lnet/lnet.h>
#include <lnet/lib-lnet.h>
-#include <lnet/lnet-sysctl.h>
-
-#ifdef HAVE_COMPAT_RDMA
-#include <linux/compat-2.6.h>
-#endif
-#include <rdma/rdma_cm.h>
-#include <rdma/ib_cm.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_fmr_pool.h>
#define IBLND_PEER_HASH_SIZE 101 /* # peer lists */
/* # scheduler loops before reschedule */
int *kib_timeout; /* comms timeout (seconds) */
int *kib_keepalive; /* keepalive timeout (seconds) */
int *kib_ntx; /* # tx descs */
- int *kib_credits; /* # concurrent sends */
- int *kib_peertxcredits; /* # concurrent sends to 1 peer */
- int *kib_peerrtrcredits; /* # per-peer router buffer credits */
- int *kib_peercredits_hiw; /* # when eagerly to return credits */
- int *kib_peertimeout; /* seconds to consider peer dead */
char **kib_default_ipif; /* default IPoIB interface */
int *kib_retry_count;
int *kib_rnr_retry_count;
- int *kib_concurrent_sends; /* send work queue sizing */
int *kib_ib_mtu; /* IB MTU */
- int *kib_map_on_demand; /* map-on-demand if RD has more fragments
- * than this value, 0 disable map-on-demand */
- int *kib_pmr_pool_size; /* # physical MR in pool */
- int *kib_fmr_pool_size; /* # FMRs in pool */
- int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
- int *kib_fmr_cache; /* enable FMR pool cache? */
#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
struct ctl_table_header *kib_sysctl; /* sysctl interface */
#endif
#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1) /* Max # of peer credits */
-#define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \
- IBLND_MSG_QUEUE_SIZE_V1 : \
- *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */
-#define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \
- IBLND_CREDIT_HIGHWATER_V1 : \
- *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
+/* when eagerly to return credits */
+#define IBLND_CREDITS_HIGHWATER(t, v) ((v) == IBLND_MSG_VERSION_1 ? \
+ IBLND_CREDIT_HIGHWATER_V1 : \
+ t->lnd_peercredits_hiw)
-#ifdef HAVE_RDMA_CREATE_ID_4ARG
-#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps, qpt)
+#ifdef HAVE_RDMA_CREATE_ID_5ARG
+# define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(current->nsproxy->net_ns, \
+ cb, dev, \
+ ps, qpt)
#else
-#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps)
+# ifdef HAVE_RDMA_CREATE_ID_4ARG
+# define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, \
+ ps, qpt)
+# else
+# define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps)
+# endif
#endif
-static inline int
-kiblnd_concurrent_sends_v1(void)
-{
- if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
- return IBLND_MSG_QUEUE_SIZE_V1 * 2;
-
- if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
- return IBLND_MSG_QUEUE_SIZE_V1 / 2;
-
- return *kiblnd_tunables.kib_concurrent_sends;
-}
-
-#define IBLND_CONCURRENT_SENDS(v) ((v) == IBLND_MSG_VERSION_1 ? \
- kiblnd_concurrent_sends_v1() : \
- *kiblnd_tunables.kib_concurrent_sends)
/* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
#define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
-#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \
- *kiblnd_tunables.kib_map_on_demand : \
- IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */
-#define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \
- IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS)
/************************/
/* derived constants... */
/* Pools (shared by connections on each CPT) */
/* These pools can grow at runtime, so don't need give a very large value */
#define IBLND_TX_POOL 256
-#define IBLND_PMR_POOL 256
#define IBLND_FMR_POOL 256
#define IBLND_FMR_POOL_FLUSH 192
-/* TX messages (shared by all connections) */
-#define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx)
-
/* RX messages (per connection) */
-#define IBLND_RX_MSGS(v) (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
-#define IBLND_RX_MSG_BYTES(v) (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE)
-#define IBLND_RX_MSG_PAGES(v) ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE)
+#define IBLND_RX_MSGS(c) \
+ ((c->ibc_queue_depth) * 2 + IBLND_OOB_MSGS(c->ibc_version))
+#define IBLND_RX_MSG_BYTES(c) (IBLND_RX_MSGS(c) * IBLND_MSG_SIZE)
+#define IBLND_RX_MSG_PAGES(c) \
+ ((IBLND_RX_MSG_BYTES(c) + PAGE_SIZE - 1) / PAGE_SIZE)
/* WRs and CQEs (per connection) */
-#define IBLND_RECV_WRS(v) IBLND_RX_MSGS(v)
-#define IBLND_SEND_WRS(v) ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v))
-#define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
+#define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c)
+#define IBLND_SEND_WRS(c) \
+ ((c->ibc_max_frags + 1) * kiblnd_concurrent_sends(c->ibc_version, \
+ c->ibc_peer->ibp_ni))
+#define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c))
struct kib_hca_dev;
typedef struct
{
- cfs_list_t ibd_list; /* chain on kib_devs */
- cfs_list_t ibd_fail_list; /* chain on kib_failed_devs */
- __u32 ibd_ifip; /* IPoIB interface IP */
- /** IPoIB interface name */
- char ibd_ifname[KIB_IFNAME_SIZE];
- int ibd_nnets; /* # nets extant */
-
- cfs_time_t ibd_next_failover;
- int ibd_failed_failover; /* # failover failures */
- unsigned int ibd_failover; /* failover in progress */
- unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
- cfs_list_t ibd_nets;
- struct kib_hca_dev *ibd_hdev;
+ struct list_head ibd_list; /* chain on kib_devs */
+ struct list_head ibd_fail_list; /* chain on kib_failed_devs */
+ __u32 ibd_ifip; /* IPoIB interface IP */
+ /** IPoIB interface name */
+ char ibd_ifname[KIB_IFNAME_SIZE];
+ int ibd_nnets; /* # nets extant */
+
+ cfs_time_t ibd_next_failover;
+ /* # failover failures */
+ int ibd_failed_failover;
+ /* failover in progress */
+ unsigned int ibd_failover;
+ /* IPoIB interface is a bonding master */
+ unsigned int ibd_can_failover;
+ struct list_head ibd_nets;
+ struct kib_hca_dev *ibd_hdev;
} kib_dev_t;
typedef struct kib_hca_dev
{
- struct rdma_cm_id *ibh_cmid; /* listener cmid */
- struct ib_device *ibh_ibdev; /* IB device */
- int ibh_page_shift; /* page shift of current HCA */
- int ibh_page_size; /* page size of current HCA */
- __u64 ibh_page_mask; /* page mask of current HCA */
- int ibh_mr_shift; /* bits shift of max MR size */
- __u64 ibh_mr_size; /* size of MR */
- int ibh_nmrs; /* # of global MRs */
- struct ib_mr **ibh_mrs; /* global MR */
- struct ib_pd *ibh_pd; /* PD */
- kib_dev_t *ibh_dev; /* owner */
- cfs_atomic_t ibh_ref; /* refcount */
+ struct rdma_cm_id *ibh_cmid; /* listener cmid */
+ struct ib_device *ibh_ibdev; /* IB device */
+ int ibh_page_shift; /* page shift of current HCA */
+ int ibh_page_size; /* page size of current HCA */
+ __u64 ibh_page_mask; /* page mask of current HCA */
+ int ibh_mr_shift; /* bits shift of max MR size */
+ __u64 ibh_mr_size; /* size of MR */
+ struct ib_mr *ibh_mrs; /* global MR */
+ struct ib_pd *ibh_pd; /* PD */
+ kib_dev_t *ibh_dev; /* owner */
+ atomic_t ibh_ref; /* refcount */
} kib_hca_dev_t;
/** # of seconds to keep pool alive */
struct page *ibp_pages[0]; /* page array */
} kib_pages_t;
-struct kib_pmr_pool;
-
-typedef struct {
- cfs_list_t pmr_list; /* chain node */
- struct ib_phys_buf *pmr_ipb; /* physical buffer */
- struct ib_mr *pmr_mr; /* IB MR */
- struct kib_pmr_pool *pmr_pool; /* owner of this MR */
- __u64 pmr_iova; /* Virtual I/O address */
- int pmr_refcount; /* reference count */
-} kib_phys_mr_t;
-
struct kib_pool;
struct kib_poolset;
typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps,
int inc, struct kib_pool **pp_po);
typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
-typedef void (*kib_ps_node_init_t)(struct kib_pool *po, cfs_list_t *node);
-typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, cfs_list_t *node);
+typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
+typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
struct kib_net;
typedef struct kib_poolset
{
- spinlock_t ps_lock; /* serialize */
- struct kib_net *ps_net; /* network it belongs to */
- char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
- cfs_list_t ps_pool_list; /* list of pools */
- cfs_list_t ps_failed_pool_list; /* failed pool list */
- cfs_time_t ps_next_retry; /* time stamp for retry if failed to allocate */
- int ps_increasing; /* is allocating new pool */
- int ps_pool_size; /* new pool size */
- int ps_cpt; /* CPT id */
-
- kib_ps_pool_create_t ps_pool_create; /* create a new pool */
- kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */
- kib_ps_node_init_t ps_node_init; /* initialize new allocated node */
- kib_ps_node_fini_t ps_node_fini; /* finalize node */
+ /* serialize */
+ spinlock_t ps_lock;
+ /* network it belongs to */
+ struct kib_net *ps_net;
+ /* pool set name */
+ char ps_name[IBLND_POOL_NAME_LEN];
+ /* list of pools */
+ struct list_head ps_pool_list;
+ /* failed pool list */
+ struct list_head ps_failed_pool_list;
+ /* time stamp for retry if failed to allocate */
+ cfs_time_t ps_next_retry;
+ /* is allocating new pool */
+ int ps_increasing;
+ /* new pool size */
+ int ps_pool_size;
+ /* CPT id */
+ int ps_cpt;
+
+ /* create a new pool */
+ kib_ps_pool_create_t ps_pool_create;
+ /* destroy a pool */
+ kib_ps_pool_destroy_t ps_pool_destroy;
+ /* initialize new allocated node */
+ kib_ps_node_init_t ps_node_init;
+ /* finalize node */
+ kib_ps_node_fini_t ps_node_fini;
} kib_poolset_t;
typedef struct kib_pool
{
- cfs_list_t po_list; /* chain on pool list */
- cfs_list_t po_free_list; /* pre-allocated node */
- kib_poolset_t *po_owner; /* pool_set of this pool */
- cfs_time_t po_deadline; /* deadline of this pool */
- int po_allocated; /* # of elements in use */
- int po_failed; /* pool is created on failed HCA */
- int po_size; /* # of pre-allocated elements */
+ /* chain on pool list */
+ struct list_head po_list;
+ /* pre-allocated node */
+ struct list_head po_free_list;
+ /* pool_set of this pool */
+ kib_poolset_t *po_owner;
+ /* deadline of this pool */
+ cfs_time_t po_deadline;
+ /* # of elements in use */
+ int po_allocated;
+ /* pool is created on failed HCA */
+ int po_failed;
+ /* # of pre-allocated elements */
+ int po_size;
} kib_pool_t;
typedef struct {
kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */
} kib_tx_pool_t;
-typedef struct {
- kib_poolset_t pps_poolset; /* pool-set */
-} kib_pmr_poolset_t;
-
-typedef struct kib_pmr_pool {
- struct kib_hca_dev *ppo_hdev; /* device for this pool */
- kib_pool_t ppo_pool; /* pool */
-} kib_pmr_pool_t;
-
typedef struct
{
spinlock_t fps_lock; /* serialize */
- struct kib_net *fps_net; /* IB network */
- cfs_list_t fps_pool_list; /* FMR pool list */
- cfs_list_t fps_failed_pool_list; /* FMR pool list */
- __u64 fps_version; /* validity stamp */
+ struct kib_net *fps_net; /* IB network */
+ struct list_head fps_pool_list; /* FMR pool list */
+ struct list_head fps_failed_pool_list; /* FMR pool list */
+ __u64 fps_version; /* validity stamp */
int fps_cpt; /* CPT id */
int fps_pool_size;
int fps_flush_trigger;
+ int fps_cache;
/* is allocating new pool */
int fps_increasing;
/* time stamp for retry if failed to allocate */
cfs_time_t fps_next_retry;
} kib_fmr_poolset_t;
+struct kib_fast_reg_descriptor { /* For fast registration */
+ struct list_head frd_list;
+ struct ib_send_wr frd_inv_wr;
+ struct ib_send_wr frd_fastreg_wr;
+ struct ib_mr *frd_mr;
+ struct ib_fast_reg_page_list *frd_frpl;
+ bool frd_valid;
+};
+
typedef struct
{
- cfs_list_t fpo_list; /* chain on pool list */
- struct kib_hca_dev *fpo_hdev; /* device for this pool */
- kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
- struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
- cfs_time_t fpo_deadline; /* deadline of this pool */
- int fpo_failed; /* fmr pool is failed */
- int fpo_map_count; /* # of mapped FMR */
+ struct list_head fpo_list; /* chain on pool list */
+ struct kib_hca_dev *fpo_hdev; /* device for this pool */
+ kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
+ union {
+ struct {
+ struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
+ } fmr;
+ struct { /* For fast registration */
+ struct list_head fpo_pool_list;
+ int fpo_pool_size;
+ } fast_reg;
+ };
+ cfs_time_t fpo_deadline; /* deadline of this pool */
+ int fpo_failed; /* fmr pool is failed */
+ int fpo_map_count; /* # of mapped FMR */
+ int fpo_is_fmr;
} kib_fmr_pool_t;
typedef struct {
- struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
- kib_fmr_pool_t *fmr_pool; /* pool of FMR */
+ kib_fmr_pool_t *fmr_pool; /* pool of FMR */
+ struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
+ struct kib_fast_reg_descriptor *fmr_frd;
+ u32 fmr_key;
} kib_fmr_t;
typedef struct kib_net
{
- cfs_list_t ibn_list; /* chain on kib_dev_t::ibd_nets */
- __u64 ibn_incarnation; /* my epoch */
- int ibn_init; /* initialisation state */
- int ibn_shutdown; /* shutting down? */
+ /* chain on kib_dev_t::ibd_nets */
+ struct list_head ibn_list;
+ __u64 ibn_incarnation;/* my epoch */
+ int ibn_init; /* initialisation state */
+ int ibn_shutdown; /* shutting down? */
- cfs_atomic_t ibn_npeers; /* # peers extant */
- cfs_atomic_t ibn_nconns; /* # connections extant */
+ atomic_t ibn_npeers; /* # peers extant */
+ atomic_t ibn_nconns; /* # connections extant */
kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */
kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */
- kib_pmr_poolset_t **ibn_pmr_ps; /* pmr pool-set */
kib_dev_t *ibn_dev; /* underlying IB device */
} kib_net_t;
/* serialise */
spinlock_t ibs_lock;
/* schedulers sleep here */
- wait_queue_head_t ibs_waitq;
+ wait_queue_head_t ibs_waitq;
/* conns to check for rx completions */
- cfs_list_t ibs_conns;
+ struct list_head ibs_conns;
/* number of scheduler threads */
int ibs_nthreads;
/* max allowed scheduler threads */
{
int kib_init; /* initialisation state */
int kib_shutdown; /* shut down? */
- cfs_list_t kib_devs; /* IB devices extant */
+ struct list_head kib_devs; /* IB devices extant */
/* list head of failed devices */
- cfs_list_t kib_failed_devs;
+ struct list_head kib_failed_devs;
/* schedulers sleep here */
- wait_queue_head_t kib_failover_waitq;
- cfs_atomic_t kib_nthreads; /* # live threads */
+ wait_queue_head_t kib_failover_waitq;
+ atomic_t kib_nthreads; /* # live threads */
/* stabilize net/dev/peer/conn ops */
rwlock_t kib_global_lock;
/* hash table of all my known peers */
- cfs_list_t *kib_peers;
+ struct list_head *kib_peers;
/* size of kib_peers */
int kib_peer_hash_size;
/* the connd task (serialisation assertions) */
void *kib_connd;
/* connections to setup/teardown */
- cfs_list_t kib_connd_conns;
+ struct list_head kib_connd_conns;
/* connections with zero refcount */
- cfs_list_t kib_connd_zombies;
+ struct list_head kib_connd_zombies;
+ /* connections to reconnect */
+ struct list_head kib_reconn_list;
+ /* peers wait for reconnection */
+ struct list_head kib_reconn_wait;
+ /*
+ * The second that peers are pulled out from \a kib_reconn_wait
+ * for reconnection.
+ */
+ unsigned int kib_reconn_sec;
/* connection daemon sleeps here */
- wait_queue_head_t kib_connd_waitq;
+ wait_queue_head_t kib_connd_waitq;
spinlock_t kib_connd_lock; /* serialise */
struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
/* percpt data for schedulers */
#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */
#define IBLND_REJECT_CONN_STALE 5 /* stale peer */
-#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match mine */
-#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't match mine */
+/* peer's rdma frags doesn't match mine */
+#define IBLND_REJECT_RDMA_FRAGS 6
+/* peer's msg queue size doesn't match mine */
+#define IBLND_REJECT_MSG_QUEUE_SIZE 7
/***********************************************************************/
typedef struct kib_rx /* receive message */
{
- cfs_list_t rx_list; /* queue for attention */
- struct kib_conn *rx_conn; /* owning conn */
- int rx_nob; /* # bytes received (-1 while posted) */
- enum ib_wc_status rx_status; /* completion status */
- kib_msg_t *rx_msg; /* message buffer (host vaddr) */
- __u64 rx_msgaddr; /* message buffer (I/O addr) */
- DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */
- struct ib_recv_wr rx_wrq; /* receive work item... */
- struct ib_sge rx_sge; /* ...and its memory */
+ /* queue for attention */
+ struct list_head rx_list;
+ /* owning conn */
+ struct kib_conn *rx_conn;
+ /* # bytes received (-1 while posted) */
+ int rx_nob;
+ /* completion status */
+ enum ib_wc_status rx_status;
+ /* message buffer (host vaddr) */
+ kib_msg_t *rx_msg;
+ /* message buffer (I/O addr) */
+ __u64 rx_msgaddr;
+ /* for dma_unmap_single() */
+ DECLARE_PCI_UNMAP_ADDR(rx_msgunmap);
+ /* receive work item... */
+ struct ib_recv_wr rx_wrq;
+ /* ...and its memory */
+ struct ib_sge rx_sge;
} kib_rx_t;
#define IBLND_POSTRX_DONT_POST 0 /* don't post */
typedef struct kib_tx /* transmit message */
{
- cfs_list_t tx_list; /* queue on idle_txs ibc_tx_queue etc. */
- kib_tx_pool_t *tx_pool; /* pool I'm from */
- struct kib_conn *tx_conn; /* owning conn */
- short tx_sending; /* # tx callbacks outstanding */
- short tx_queued; /* queued for sending */
- short tx_waiting; /* waiting for peer */
- int tx_status; /* LNET completion status */
- unsigned long tx_deadline; /* completion deadline */
- __u64 tx_cookie; /* completion cookie */
- lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
- kib_msg_t *tx_msg; /* message buffer (host vaddr) */
- __u64 tx_msgaddr; /* message buffer (I/O addr) */
- DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */
- int tx_nwrq; /* # send work items */
- struct ib_send_wr *tx_wrq; /* send work items... */
- struct ib_sge *tx_sge; /* ...and their memory */
- kib_rdma_desc_t *tx_rd; /* rdma descriptor */
- int tx_nfrags; /* # entries in... */
- struct scatterlist *tx_frags; /* dma_map_sg descriptor */
- __u64 *tx_pages; /* rdma phys page addrs */
- union {
- kib_phys_mr_t *pmr; /* MR for physical buffer */
- kib_fmr_t fmr; /* FMR */
- } tx_u;
- int tx_dmadir; /* dma direction */
+ /* queue on idle_txs ibc_tx_queue etc. */
+ struct list_head tx_list;
+ /* pool I'm from */
+ kib_tx_pool_t *tx_pool;
+ /* owning conn */
+ struct kib_conn *tx_conn;
+ /* # tx callbacks outstanding */
+ short tx_sending;
+ /* queued for sending */
+ short tx_queued;
+ /* waiting for peer */
+ short tx_waiting;
+ /* LNET completion status */
+ int tx_status;
+ /* completion deadline */
+ unsigned long tx_deadline;
+ /* completion cookie */
+ __u64 tx_cookie;
+ /* lnet msgs to finalize on completion */
+ lnet_msg_t *tx_lntmsg[2];
+ /* message buffer (host vaddr) */
+ kib_msg_t *tx_msg;
+ /* message buffer (I/O addr) */
+ __u64 tx_msgaddr;
+ /* for dma_unmap_single() */
+ DECLARE_PCI_UNMAP_ADDR(tx_msgunmap);
+ /* # send work items */
+ int tx_nwrq;
+ /* send work items... */
+ struct ib_send_wr *tx_wrq;
+ /* ...and their memory */
+ struct ib_sge *tx_sge;
+ /* rdma descriptor */
+ kib_rdma_desc_t *tx_rd;
+ /* # entries in... */
+ int tx_nfrags;
+ /* dma_map_sg descriptor */
+ struct scatterlist *tx_frags;
+ /* rdma phys page addrs */
+ __u64 *tx_pages;
+ /* FMR */
+ kib_fmr_t fmr;
+ /* dma direction */
+ int tx_dmadir;
} kib_tx_t;
typedef struct kib_connvars
typedef struct kib_conn
{
- struct kib_sched_info *ibc_sched; /* scheduler information */
- struct kib_peer *ibc_peer; /* owning peer */
- kib_hca_dev_t *ibc_hdev; /* HCA bound on */
- cfs_list_t ibc_list; /* stash on peer's conn list */
- cfs_list_t ibc_sched_list; /* schedule for attention */
- __u16 ibc_version; /* version of connection */
- __u64 ibc_incarnation; /* which instance of the peer */
- cfs_atomic_t ibc_refcount; /* # users */
- int ibc_state; /* what's happening */
- int ibc_nsends_posted; /* # uncompleted sends */
- int ibc_noops_posted; /* # uncompleted NOOPs */
- int ibc_credits; /* # credits I have */
- int ibc_outstanding_credits; /* # credits to return */
- int ibc_reserved_credits;/* # ACK/DONE msg credits */
- int ibc_comms_error; /* set on comms error */
- unsigned int ibc_nrx:16; /* receive buffers owned */
- unsigned int ibc_scheduled:1; /* scheduled for attention */
- unsigned int ibc_ready:1; /* CQ callback fired */
- /* time of last send */
- unsigned long ibc_last_send;
- /** link chain for kiblnd_check_conns only */
- cfs_list_t ibc_connd_list;
- /** rxs completed before ESTABLISHED */
- cfs_list_t ibc_early_rxs;
- /** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */
- cfs_list_t ibc_tx_noops;
- cfs_list_t ibc_tx_queue; /* sends that need a credit */
- cfs_list_t ibc_tx_queue_nocred;/* sends that don't need a credit */
- cfs_list_t ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
- cfs_list_t ibc_active_txs; /* active tx awaiting completion */
- spinlock_t ibc_lock; /* serialise */
- kib_rx_t *ibc_rxs; /* the rx descs */
- kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
-
- struct rdma_cm_id *ibc_cmid; /* CM id */
- struct ib_cq *ibc_cq; /* completion queue */
-
- kib_connvars_t *ibc_connvars; /* in-progress connection state */
+ /* scheduler information */
+ struct kib_sched_info *ibc_sched;
+ /* owning peer */
+ struct kib_peer *ibc_peer;
+ /* HCA bound on */
+ kib_hca_dev_t *ibc_hdev;
+ /* stash on peer's conn list */
+ struct list_head ibc_list;
+ /* schedule for attention */
+ struct list_head ibc_sched_list;
+ /* version of connection */
+ __u16 ibc_version;
+ /* reconnect later */
+ __u16 ibc_reconnect:1;
+ /* which instance of the peer */
+ __u64 ibc_incarnation;
+ /* # users */
+ atomic_t ibc_refcount;
+ /* what's happening */
+ int ibc_state;
+ /* # uncompleted sends */
+ int ibc_nsends_posted;
+ /* # uncompleted NOOPs */
+ int ibc_noops_posted;
+ /* # credits I have */
+ int ibc_credits;
+ /* # credits to return */
+ int ibc_outstanding_credits;
+ /* # ACK/DONE msg credits */
+ int ibc_reserved_credits;
+ /* set on comms error */
+ int ibc_comms_error;
+ /* connections queue depth */
+ __u16 ibc_queue_depth;
+ /* connections max frags */
+ __u16 ibc_max_frags;
+ /* receive buffers owned */
+ unsigned int ibc_nrx:16;
+ /* scheduled for attention */
+ unsigned int ibc_scheduled:1;
+ /* CQ callback fired */
+ unsigned int ibc_ready:1;
+ /* time of last send */
+ unsigned long ibc_last_send;
+ /** link chain for kiblnd_check_conns only */
+ struct list_head ibc_connd_list;
+ /** rxs completed before ESTABLISHED */
+ struct list_head ibc_early_rxs;
+ /** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */
+ struct list_head ibc_tx_noops;
+ /* sends that need a credit */
+ struct list_head ibc_tx_queue;
+ /* sends that don't need a credit */
+ struct list_head ibc_tx_queue_nocred;
+ /* sends that need to reserve an ACK/DONE msg */
+ struct list_head ibc_tx_queue_rsrvd;
+ /* active tx awaiting completion */
+ struct list_head ibc_active_txs;
+ /* serialise */
+ spinlock_t ibc_lock;
+ /* the rx descs */
+ kib_rx_t *ibc_rxs;
+ /* premapped rx msg pages */
+ kib_pages_t *ibc_rx_pages;
+
+ /* CM id */
+ struct rdma_cm_id *ibc_cmid;
+ /* completion queue */
+ struct ib_cq *ibc_cq;
+
+ /* in-progress connection state */
+ kib_connvars_t *ibc_connvars;
} kib_conn_t;
#define IBLND_CONN_INIT 0 /* being initialised */
typedef struct kib_peer
{
- cfs_list_t ibp_list; /* stash on global peer list */
- lnet_nid_t ibp_nid; /* who's on the other end(s) */
- lnet_ni_t *ibp_ni; /* LNet interface */
- cfs_atomic_t ibp_refcount; /* # users */
- cfs_list_t ibp_conns; /* all active connections */
- cfs_list_t ibp_tx_queue; /* msgs waiting for a conn */
- __u16 ibp_version; /* version of peer */
- __u64 ibp_incarnation; /* incarnation of peer */
- int ibp_connecting; /* current active connection attempts */
- int ibp_accepting; /* current passive connection attempts */
- int ibp_error; /* errno on closing this peer */
- cfs_time_t ibp_last_alive; /* when (in jiffies) I was last alive */
+ /* stash on global peer list */
+ struct list_head ibp_list;
+ /* who's on the other end(s) */
+ lnet_nid_t ibp_nid;
+ /* LNet interface */
+ lnet_ni_t *ibp_ni;
+ /* all active connections */
+ struct list_head ibp_conns;
+ /* msgs waiting for a conn */
+ struct list_head ibp_tx_queue;
+ /* incarnation of peer */
+ __u64 ibp_incarnation;
+ /* when (in jiffies) I was last alive */
+ cfs_time_t ibp_last_alive;
+ /* # users */
+ atomic_t ibp_refcount;
+ /* version of peer */
+ __u16 ibp_version;
+ /* current passive connection attempts */
+ unsigned short ibp_accepting;
+ /* current active connection attempts */
+ unsigned short ibp_connecting;
+ /* reconnect this peer later */
+ unsigned short ibp_reconnecting:1;
+ /* # consecutive reconnection attempts to this peer */
+ unsigned int ibp_reconnected;
+ /* errno on closing this peer */
+ int ibp_error;
+ /* max map_on_demand */
+ __u16 ibp_max_frags;
+ /* max_peer_credits */
+ __u16 ibp_queue_depth;
} kib_peer_t;
+#ifndef HAVE_IB_INC_RKEY
+/**
+ * ib_inc_rkey - increments the key portion of the given rkey. Can be used
+ * for calculating a new rkey for type 2 memory windows.
+ * @rkey - the rkey to increment.
+ */
+static inline u32 ib_inc_rkey(u32 rkey)
+{
+ const u32 mask = 0x000000ff;
+ return ((rkey + 1) & mask) | (rkey & ~mask);
+}
+#endif
+
extern kib_data_t kiblnd_data;
extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
+int kiblnd_msg_queue_size(int version, struct lnet_ni *ni);
+
+/* max # of fragments configured by user */
+static inline int
+kiblnd_cfg_rdma_frags(struct lnet_ni *ni)
+{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ int mod;
+
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+ mod = tunables->lnd_map_on_demand;
+ return mod != 0 ? mod : IBLND_MAX_RDMA_FRAGS;
+}
+
+static inline int
+kiblnd_rdma_frags(int version, struct lnet_ni *ni)
+{
+ return version == IBLND_MSG_VERSION_1 ?
+ IBLND_MAX_RDMA_FRAGS :
+ kiblnd_cfg_rdma_frags(ni);
+}
+
+static inline int
+kiblnd_concurrent_sends(int version, struct lnet_ni *ni)
+{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ int concurrent_sends;
+
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+ concurrent_sends = tunables->lnd_concurrent_sends;
+
+ if (version == IBLND_MSG_VERSION_1) {
+ if (concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
+ return IBLND_MSG_QUEUE_SIZE_V1 * 2;
+
+ if (concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
+ return IBLND_MSG_QUEUE_SIZE_V1 / 2;
+ }
+
+ return concurrent_sends;
+}
+
static inline void
kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
{
- LASSERT (cfs_atomic_read(&hdev->ibh_ref) > 0);
- cfs_atomic_inc(&hdev->ibh_ref);
+ LASSERT(atomic_read(&hdev->ibh_ref) > 0);
+ atomic_inc(&hdev->ibh_ref);
}
static inline void
kiblnd_hdev_decref(kib_hca_dev_t *hdev)
{
- LASSERT (cfs_atomic_read(&hdev->ibh_ref) > 0);
- if (cfs_atomic_dec_and_test(&hdev->ibh_ref))
- kiblnd_hdev_destroy(hdev);
+ LASSERT(atomic_read(&hdev->ibh_ref) > 0);
+ if (atomic_dec_and_test(&hdev->ibh_ref))
+ kiblnd_hdev_destroy(hdev);
}
static inline int
kiblnd_dev_can_failover(kib_dev_t *dev)
{
- if (!cfs_list_empty(&dev->ibd_fail_list)) /* already scheduled */
+ if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
return 0;
if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
#define kiblnd_conn_addref(conn) \
do { \
CDEBUG(D_NET, "conn[%p] (%d)++\n", \
- (conn), cfs_atomic_read(&(conn)->ibc_refcount)); \
- cfs_atomic_inc(&(conn)->ibc_refcount); \
+ (conn), atomic_read(&(conn)->ibc_refcount)); \
+ atomic_inc(&(conn)->ibc_refcount); \
} while (0)
#define kiblnd_conn_decref(conn) \
unsigned long flags; \
\
CDEBUG(D_NET, "conn[%p] (%d)--\n", \
- (conn), cfs_atomic_read(&(conn)->ibc_refcount)); \
+ (conn), atomic_read(&(conn)->ibc_refcount)); \
LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
- if (cfs_atomic_dec_and_test(&(conn)->ibc_refcount)) { \
+ if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
- cfs_list_add_tail(&(conn)->ibc_list, \
+ list_add_tail(&(conn)->ibc_list, \
&kiblnd_data.kib_connd_zombies); \
wake_up(&kiblnd_data.kib_connd_waitq); \
spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
#define kiblnd_peer_addref(peer) \
do { \
- CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
- (peer), libcfs_nid2str((peer)->ibp_nid), \
- cfs_atomic_read (&(peer)->ibp_refcount)); \
- cfs_atomic_inc(&(peer)->ibp_refcount); \
+ CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
+ (peer), libcfs_nid2str((peer)->ibp_nid), \
+ atomic_read (&(peer)->ibp_refcount)); \
+ atomic_inc(&(peer)->ibp_refcount); \
} while (0)
#define kiblnd_peer_decref(peer) \
do { \
- CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
- (peer), libcfs_nid2str((peer)->ibp_nid), \
- cfs_atomic_read (&(peer)->ibp_refcount)); \
- LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \
- if (cfs_atomic_dec_and_test(&(peer)->ibp_refcount)) \
- kiblnd_destroy_peer(peer); \
+ CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
+ (peer), libcfs_nid2str((peer)->ibp_nid), \
+ atomic_read (&(peer)->ibp_refcount)); \
+ LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \
+ if (atomic_dec_and_test(&(peer)->ibp_refcount)) \
+ kiblnd_destroy_peer(peer); \
} while (0)
-static inline cfs_list_t *
+static inline bool
+kiblnd_peer_connecting(kib_peer_t *peer)
+{
+ return peer->ibp_connecting != 0 ||
+ peer->ibp_reconnecting != 0 ||
+ peer->ibp_accepting != 0;
+}
+
+static inline bool
+kiblnd_peer_idle(kib_peer_t *peer)
+{
+ return !kiblnd_peer_connecting(peer) && list_empty(&peer->ibp_conns);
+}
+
+static inline struct list_head *
kiblnd_nid2peerlist (lnet_nid_t nid)
{
- unsigned int hash =
- ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
+ unsigned int hash =
+ ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
- return (&kiblnd_data.kib_peers [hash]);
+ return &kiblnd_data.kib_peers[hash];
}
static inline int
kiblnd_peer_active (kib_peer_t *peer)
{
- /* Am I in the peer hash table? */
- return (!cfs_list_empty(&peer->ibp_list));
+ /* Am I in the peer hash table? */
+ return !list_empty(&peer->ibp_list);
}
static inline kib_conn_t *
kiblnd_get_conn_locked (kib_peer_t *peer)
{
- LASSERT (!cfs_list_empty(&peer->ibp_conns));
+ LASSERT(!list_empty(&peer->ibp_conns));
/* just return the first connection */
- return cfs_list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
+ return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
}
static inline int
{
return (*kiblnd_tunables.kib_keepalive > 0) &&
cfs_time_after(jiffies, conn->ibc_last_send +
- *kiblnd_tunables.kib_keepalive*HZ);
+ msecs_to_jiffies(*kiblnd_tunables.kib_keepalive *
+ MSEC_PER_SEC));
}
static inline int
kiblnd_need_noop(kib_conn_t *conn)
{
- LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+ lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+
+ LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
if (conn->ibc_outstanding_credits <
- IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
+ IBLND_CREDITS_HIGHWATER(tunables, conn->ibc_version) &&
!kiblnd_send_keepalive(conn))
return 0; /* No need to send NOOP */
if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
- if (!cfs_list_empty(&conn->ibc_tx_queue_nocred))
+ if (!list_empty(&conn->ibc_tx_queue_nocred))
return 0; /* NOOP can be piggybacked */
/* No tx to piggyback NOOP onto or no credit to send a tx */
- return (cfs_list_empty(&conn->ibc_tx_queue) ||
+ return (list_empty(&conn->ibc_tx_queue) ||
conn->ibc_credits == 0);
}
- if (!cfs_list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
- !cfs_list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
+ if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
+ !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
conn->ibc_credits == 0) /* no credit */
return 0;
return 0;
/* No tx to piggyback NOOP onto or no credit to send a tx */
- return (cfs_list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
+ return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
}
static inline void
}
static inline const char *
-kiblnd_queue2str (kib_conn_t *conn, cfs_list_t *q)
+kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
{
- if (q == &conn->ibc_tx_queue)
- return "tx_queue";
+ if (q == &conn->ibc_tx_queue)
+ return "tx_queue";
- if (q == &conn->ibc_tx_queue_rsrvd)
- return "tx_queue_rsrvd";
+ if (q == &conn->ibc_tx_queue_rsrvd)
+ return "tx_queue_rsrvd";
- if (q == &conn->ibc_tx_queue_nocred)
- return "tx_queue_nocred";
+ if (q == &conn->ibc_tx_queue_nocred)
+ return "tx_queue_nocred";
- if (q == &conn->ibc_active_txs)
- return "active_txs";
+ if (q == &conn->ibc_active_txs)
+ return "active_txs";
- LBUG();
- return NULL;
+ LBUG();
+ return NULL;
}
/* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the
* lowest bits of the work request id to stash the work item type. */
-#define IBLND_WID_TX 0
-#define IBLND_WID_RDMA 1
-#define IBLND_WID_RX 2
-#define IBLND_WID_MASK 3UL
+#define IBLND_WID_INVAL 0
+#define IBLND_WID_TX 1
+#define IBLND_WID_RX 2
+#define IBLND_WID_RDMA 3
+#define IBLND_WID_MR 4
+#define IBLND_WID_MASK 7UL
static inline __u64
kiblnd_ptr2wreqid (void *ptr, int type)
static inline void
kiblnd_set_conn_state (kib_conn_t *conn, int state)
{
- conn->ibc_state = state;
- cfs_mb();
+ conn->ibc_state = state;
+ smp_mb();
}
static inline void
offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
}
-#ifdef HAVE_OFED_IB_DMA_MAP
-
static inline __u64
kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
{
#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
-#else
-
-static inline __u64
-kiblnd_dma_mapping_error(struct ib_device *dev, dma_addr_t dma_addr)
-{
- return dma_mapping_error(dma_addr);
-}
-
-static inline dma_addr_t kiblnd_dma_map_single(struct ib_device *dev,
- void *msg, size_t size,
- enum dma_data_direction direction)
-{
- return dma_map_single(dev->dma_device, msg, size, direction);
-}
-
-static inline void kiblnd_dma_unmap_single(struct ib_device *dev,
- dma_addr_t addr, size_t size,
- enum dma_data_direction direction)
-{
- dma_unmap_single(dev->dma_device, addr, size, direction);
-}
-
-#define KIBLND_UNMAP_ADDR_SET(p, m, a) pci_unmap_addr_set(p, m, a)
-#define KIBLND_UNMAP_ADDR(p, m, a) pci_unmap_addr(p, m)
-
-static inline int kiblnd_dma_map_sg(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- return dma_map_sg(dev->dma_device, sg, nents, direction);
-}
-
-static inline void kiblnd_dma_unmap_sg(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- return dma_unmap_sg(dev->dma_device, sg, nents, direction);
-}
-
-
-static inline dma_addr_t kiblnd_sg_dma_address(struct ib_device *dev,
- struct scatterlist *sg)
-{
- return sg_dma_address(sg);
-}
-
-
-static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
- struct scatterlist *sg)
-{
- return sg_dma_len(sg);
-}
-
-#define KIBLND_CONN_PARAM(e) ((e)->private_data)
-#define KIBLND_CONN_PARAM_LEN(e) ((e)->private_data_len)
-
-#endif
-
-struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
- kib_rdma_desc_t *rd);
-struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev,
- __u64 addr, __u64 size);
+struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
+ int negotiated_nfrags);
void kiblnd_map_rx_descs(kib_conn_t *conn);
void kiblnd_unmap_rx_descs(kib_conn_t *conn);
-int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
- kib_rdma_desc_t *rd, int nfrags);
-void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
-void kiblnd_pool_free_node(kib_pool_t *pool, cfs_list_t *node);
-cfs_list_t *kiblnd_pool_alloc_node(kib_poolset_t *ps);
-
-int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
- int npages, __u64 iov, kib_fmr_t *fmr);
-void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
+void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
+struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
-int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
- kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr);
-void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr);
-
-int kiblnd_startup (lnet_ni_t *ni);
-void kiblnd_shutdown (lnet_ni_t *ni);
-int kiblnd_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg);
-void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
+int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
+ __u32 nob, __u64 iov, bool is_rx, kib_fmr_t *fmr);
+void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
+int kiblnd_tunables_setup(struct lnet_ni *ni);
int kiblnd_tunables_init(void);
void kiblnd_tunables_fini(void);
int kiblnd_failover_thread (void *arg);
int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
-void kiblnd_free_pages (kib_pages_t *p);
int kiblnd_cm_callback(struct rdma_cm_id *cmid,
struct rdma_cm_event *event);
int kiblnd_translate_mtu(int value);
int kiblnd_dev_failover(kib_dev_t *dev);
-int kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
+int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
void kiblnd_destroy_peer (kib_peer_t *peer);
+bool kiblnd_reconnect_peer(kib_peer_t *peer);
void kiblnd_destroy_dev (kib_dev_t *dev);
void kiblnd_unlink_peer_locked (kib_peer_t *peer);
-void kiblnd_peer_alive (kib_peer_t *peer);
kib_peer_t *kiblnd_find_peer_locked (lnet_nid_t nid);
-void kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error);
int kiblnd_close_stale_conns_locked (kib_peer_t *peer,
int version, __u64 incarnation);
int kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why);
-void kiblnd_connreq_done(kib_conn_t *conn, int status);
-kib_conn_t *kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid,
- int state, int version);
-void kiblnd_destroy_conn (kib_conn_t *conn);
+kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
+ int state, int version);
+void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn);
void kiblnd_close_conn (kib_conn_t *conn, int error);
void kiblnd_close_conn_locked (kib_conn_t *conn, int error);
-int kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
- int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
-
void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
-void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn);
-void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn);
-void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
-void kiblnd_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist,
- int status);
-void kiblnd_check_sends (kib_conn_t *conn);
+void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status);
void kiblnd_qp_event(struct ib_event *event, void *arg);
void kiblnd_cq_event(struct ib_event *event, void *arg);
int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
- unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
+ unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen);