Whamcloud - gitweb
LU-1346 gnilnd: remove libcfs abstractions
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd.h
index ed5bacf..661756c 100644 (file)
@@ -1,6 +1,4 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
  * GPL HEADER START
  *
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  * GPL HEADER END
  */
 /*
- * Copyright  2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2013, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
  * Author: Eric Barton <eric@bartonsoftware.com>
  */
 
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
-#ifndef AUTOCONF_INCLUDED
-#include <linux/config.h>
-#endif
+#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/string.h>
 #include <linux/stat.h>
 #include <linux/errno.h>
-#include <linux/smp_lock.h>
 #include <linux/unistd.h>
 #include <linux/uio.h>
 
-#include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
 #include <linux/list.h>
 #include <linux/kmod.h>
 #include <linux/sysctl.h>
-#include <linux/random.h>
 #include <linux/pci.h>
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)
+#include <linux/pci-dma.h>
+#endif
 
 #include <net/sock.h>
 #include <linux/in.h>
 #include <lnet/lib-lnet.h>
 #include <lnet/lnet-sysctl.h>
 
-#if !HAVE_GFP_T
-typedef int gfp_t;
+#ifdef HAVE_COMPAT_RDMA
+#include <linux/compat-2.6.h>
 #endif
-
 #include <rdma/rdma_cm.h>
 #include <rdma/ib_cm.h>
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_fmr_pool.h>
 
-/* tunables fixed at compile time */
-#ifdef CONFIG_SMP
-# define IBLND_N_SCHED      num_online_cpus()   /* # schedulers */
-#else
-# define IBLND_N_SCHED      1                   /* # schedulers */
-#endif
+#define IBLND_PEER_HASH_SIZE           101     /* # peer lists */
+/* # scheduler loops before reschedule */
+#define IBLND_RESCHED                  100
 
-#define IBLND_PEER_HASH_SIZE         101        /* # peer lists */
-#define IBLND_RESCHED                100        /* # scheduler loops before reschedule */
+#define IBLND_N_SCHED                  2
+#define IBLND_N_SCHED_HIGH             4
 
 typedef struct
 {
-        unsigned int     *kib_service;          /* IB service number */
-        int              *kib_min_reconnect_interval; /* first failed connection retry... */
-        int              *kib_max_reconnect_interval; /* ...exponentially increasing to this */
-        int              *kib_cksum;            /* checksum kib_msg_t? */
-        int              *kib_timeout;          /* comms timeout (seconds) */
-        int              *kib_keepalive;        /* keepalive timeout (seconds) */
-        int              *kib_ntx;              /* # tx descs */
-        int              *kib_credits;          /* # concurrent sends */
-        int              *kib_peercredits;      /* # concurrent sends to 1 peer */
-        int              *kib_peercredits_hiw;  /* # when eagerly to return credits */
-        int              *kib_peertimeout;      /* seconds to consider peer dead */
-        char            **kib_default_ipif;     /* default IPoIB interface */
-        int              *kib_retry_count;
-        int              *kib_rnr_retry_count;
-        int              *kib_concurrent_sends; /* send work queue sizing */
-        int             *kib_ib_mtu;           /* IB MTU */
-        int              *kib_map_on_demand;    /* map-on-demand if RD has more fragments
-                                                 * than this value, 0 disable map-on-demand */
-        int              *kib_pmr_pool_size;    /* # physical MR in pool */
-        int              *kib_fmr_pool_size;    /* # FMRs in pool */
-        int              *kib_fmr_flush_trigger; /* When to trigger FMR flush */
-        int              *kib_fmr_cache;        /* enable FMR pool cache? */
+       int              *kib_dev_failover;     /* HCA failover */
+       unsigned int     *kib_service;          /* IB service number */
+       int              *kib_min_reconnect_interval; /* first failed connection retry... */
+       int              *kib_max_reconnect_interval; /* ...exponentially increasing to this */
+       int              *kib_cksum;            /* checksum kib_msg_t? */
+       int              *kib_timeout;          /* comms timeout (seconds) */
+       int              *kib_keepalive;        /* keepalive timeout (seconds) */
+       int              *kib_ntx;              /* # tx descs */
+       int              *kib_credits;          /* # concurrent sends */
+       int              *kib_peertxcredits;    /* # concurrent sends to 1 peer */
+       int              *kib_peerrtrcredits;   /* # per-peer router buffer credits */
+       int              *kib_peercredits_hiw;  /* # when eagerly to return credits */
+       int              *kib_peertimeout;      /* seconds to consider peer dead */
+       char            **kib_default_ipif;     /* default IPoIB interface */
+       int              *kib_retry_count;
+       int              *kib_rnr_retry_count;
+       int              *kib_concurrent_sends; /* send work queue sizing */
+       int              *kib_ib_mtu;           /* IB MTU */
+       int              *kib_map_on_demand;    /* map-on-demand if RD has more fragments
+                                                * than this value, 0 disable map-on-demand */
+       int              *kib_pmr_pool_size;    /* # physical MR in pool */
+       int              *kib_fmr_pool_size;    /* # FMRs in pool */
+       int              *kib_fmr_flush_trigger; /* When to trigger FMR flush */
+       int              *kib_fmr_cache;        /* enable FMR pool cache? */
 #if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
-        cfs_sysctl_table_header_t *kib_sysctl;  /* sysctl interface */
+       struct ctl_table_header *kib_sysctl;  /* sysctl interface */
 #endif
+       int              *kib_require_priv_port;/* accept only privileged ports */
+       int              *kib_use_priv_port;    /* use privileged port for active connect */
+       /* # threads on each CPT */
+       int              *kib_nscheds;
 } kib_tunables_t;
 
 extern kib_tunables_t  kiblnd_tunables;
@@ -132,15 +129,21 @@ extern kib_tunables_t  kiblnd_tunables;
 #define IBLND_CREDIT_HIGHWATER_V1    7          /* V1 only : when eagerly to return credits */
 
 #define IBLND_CREDITS_DEFAULT        8          /* default # of peer credits */
-#define IBLND_CREDITS_MAX            4096       /* Max # of peer credits */
+#define IBLND_CREDITS_MAX          ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1)  /* Max # of peer credits */
 
 #define IBLND_MSG_QUEUE_SIZE(v)    ((v) == IBLND_MSG_VERSION_1 ? \
                                      IBLND_MSG_QUEUE_SIZE_V1 :   \
-                                     *kiblnd_tunables.kib_peercredits) /* # messages/RDMAs in-flight */
+                                     *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */
 #define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \
                                      IBLND_CREDIT_HIGHWATER_V1 : \
                                      *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
 
+#ifdef HAVE_RDMA_CREATE_ID_4ARG
+#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps, qpt)
+#else
+#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps)
+#endif
+
 static inline int
 kiblnd_concurrent_sends_v1(void)
 {
@@ -170,11 +173,15 @@ kiblnd_concurrent_sends_v1(void)
 
 /************************/
 /* derived constants... */
+/* Pools (shared by connections on each CPT) */
+/* These pools can grow at runtime, so don't need give a very large value */
+#define IBLND_TX_POOL                  256
+#define IBLND_PMR_POOL                 256
+#define IBLND_FMR_POOL                 256
+#define IBLND_FMR_POOL_FLUSH           192
 
 /* TX messages (shared by all connections) */
 #define IBLND_TX_MSGS()            (*kiblnd_tunables.kib_ntx)
-#define IBLND_TX_MSG_BYTES()        (IBLND_TX_MSGS() * IBLND_MSG_SIZE)
-#define IBLND_TX_MSG_PAGES()       ((IBLND_TX_MSG_BYTES() + PAGE_SIZE - 1) / PAGE_SIZE)
 
 /* RX messages (per connection) */
 #define IBLND_RX_MSGS(v)            (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
@@ -186,88 +193,229 @@ kiblnd_concurrent_sends_v1(void)
 #define IBLND_SEND_WRS(v)          ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v))
 #define IBLND_CQ_ENTRIES(v)         (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
 
+struct kib_hca_dev;
+
+/* o2iblnd can run over aliased interface */
+#ifdef IFALIASZ
+#define KIB_IFNAME_SIZE              IFALIASZ
+#else
+#define KIB_IFNAME_SIZE              256
+#endif
+
 typedef struct
 {
-        struct ib_device *ibp_device;           /* device for mapping */
-        int               ibp_npages;           /* # pages */
-        struct page      *ibp_pages[0];
+        cfs_list_t           ibd_list;          /* chain on kib_devs */
+        cfs_list_t           ibd_fail_list;     /* chain on kib_failed_devs */
+        __u32                ibd_ifip;          /* IPoIB interface IP */
+        /** IPoIB interface name */
+        char                 ibd_ifname[KIB_IFNAME_SIZE];
+        int                  ibd_nnets;         /* # nets extant */
+
+        cfs_time_t           ibd_next_failover;
+        int                  ibd_failed_failover; /* # failover failures */
+        unsigned int         ibd_failover;      /* failover in progress */
+        unsigned int         ibd_can_failover;  /* IPoIB interface is a bonding master */
+        cfs_list_t           ibd_nets;
+        struct kib_hca_dev  *ibd_hdev;
+} kib_dev_t;
+
+typedef struct kib_hca_dev
+{
+        struct rdma_cm_id   *ibh_cmid;          /* listener cmid */
+        struct ib_device    *ibh_ibdev;         /* IB device */
+        int                  ibh_page_shift;    /* page shift of current HCA */
+        int                  ibh_page_size;     /* page size of current HCA */
+        __u64                ibh_page_mask;     /* page mask of current HCA */
+        int                  ibh_mr_shift;      /* bits shift of max MR size */
+        __u64                ibh_mr_size;       /* size of MR */
+        int                  ibh_nmrs;          /* # of global MRs */
+        struct ib_mr       **ibh_mrs;           /* global MR */
+        struct ib_pd        *ibh_pd;            /* PD */
+        kib_dev_t           *ibh_dev;           /* owner */
+        cfs_atomic_t         ibh_ref;           /* refcount */
+} kib_hca_dev_t;
+
+/** # of seconds to keep pool alive */
+#define IBLND_POOL_DEADLINE     300
+/** # of seconds to retry if allocation failed */
+#define IBLND_POOL_RETRY        1
+
+typedef struct
+{
+        int                     ibp_npages;             /* # pages */
+        struct page            *ibp_pages[0];           /* page array */
 } kib_pages_t;
 
-typedef struct {
-        spinlock_t              ibmp_lock;      /* serialize */
-        int                     ibmp_allocated; /* MR in use */
-        struct list_head        ibmp_free_list; /* pre-allocated MR */
-} kib_phys_mr_pool_t;
+struct kib_pmr_pool;
 
 typedef struct {
-        struct list_head        ibpm_link;      /* link node */
-        struct ib_mr           *ibpm_mr;        /* MR */
-        __u64                   ibpm_iova;      /* Virtual I/O address */
-        int                     ibpm_refcount;  /* reference count */
+        cfs_list_t              pmr_list;               /* chain node */
+        struct ib_phys_buf     *pmr_ipb;                /* physical buffer */
+        struct ib_mr           *pmr_mr;                 /* IB MR */
+        struct kib_pmr_pool    *pmr_pool;               /* owner of this MR */
+        __u64                   pmr_iova;               /* Virtual I/O address */
+        int                     pmr_refcount;           /* reference count */
 } kib_phys_mr_t;
 
-typedef struct
+struct kib_pool;
+struct kib_poolset;
+
+typedef int  (*kib_ps_pool_create_t)(struct kib_poolset *ps,
+                                    int inc, struct kib_pool **pp_po);
+typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
+typedef void (*kib_ps_node_init_t)(struct kib_pool *po, cfs_list_t *node);
+typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, cfs_list_t *node);
+
+struct kib_net;
+
+#define IBLND_POOL_NAME_LEN     32
+
+typedef struct kib_poolset
 {
-        struct list_head     ibd_list;          /* chain on kib_devs */
-        __u32                ibd_ifip;          /* IPoIB interface IP */
-        char                 ibd_ifname[32];    /* IPoIB interface name */
-        int                  ibd_nnets;         /* # nets extant */
+       spinlock_t              ps_lock;                /* serialize */
+        struct kib_net         *ps_net;                 /* network it belongs to */
+        char                    ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
+        cfs_list_t              ps_pool_list;           /* list of pools */
+        cfs_list_t              ps_failed_pool_list;    /* failed pool list */
+        cfs_time_t              ps_next_retry;          /* time stamp for retry if failed to allocate */
+        int                     ps_increasing;          /* is allocating new pool */
+        int                     ps_pool_size;           /* new pool size */
+       int                     ps_cpt;                 /* CPT id */
+
+        kib_ps_pool_create_t    ps_pool_create;         /* create a new pool */
+        kib_ps_pool_destroy_t   ps_pool_destroy;        /* destroy a pool */
+        kib_ps_node_init_t      ps_node_init;           /* initialize new allocated node */
+        kib_ps_node_fini_t      ps_node_fini;           /* finalize node */
+} kib_poolset_t;
+
+typedef struct kib_pool
+{
+        cfs_list_t              po_list;                /* chain on pool list */
+        cfs_list_t              po_free_list;           /* pre-allocated node */
+        kib_poolset_t          *po_owner;               /* pool_set of this pool */
+        cfs_time_t              po_deadline;            /* deadline of this pool */
+        int                     po_allocated;           /* # of elements in use */
+        int                     po_failed;              /* pool is created on failed HCA */
+        int                     po_size;                /* # of pre-allocated elements */
+} kib_pool_t;
 
-        struct rdma_cm_id   *ibd_cmid;          /* IB listener (bound to 1 device) */
-        struct ib_pd        *ibd_pd;            /* PD for the device */
-        int                  ibd_page_shift;    /* page shift of current HCA */
-        int                  ibd_page_size;     /* page size of current HCA */
-        __u64                ibd_page_mask;     /* page mask of current HCA */
-        int                  ibd_mr_shift;      /* bits shift of max MR size */
-        __u64                ibd_mr_size;       /* size of MR */
+typedef struct {
+        kib_poolset_t           tps_poolset;            /* pool-set */
+        __u64                   tps_next_tx_cookie;     /* cookie of TX */
+} kib_tx_poolset_t;
 
-        int                  ibd_nmrs;          /* # of global MRs */
-        struct ib_mr       **ibd_mrs;           /* MR for non RDMA I/O */
-} kib_dev_t;
+typedef struct {
+        kib_pool_t              tpo_pool;               /* pool */
+        struct kib_hca_dev     *tpo_hdev;               /* device for this pool */
+        struct kib_tx          *tpo_tx_descs;           /* all the tx descriptors */
+        kib_pages_t            *tpo_tx_pages;           /* premapped tx msg pages */
+} kib_tx_pool_t;
+
+typedef struct {
+        kib_poolset_t           pps_poolset;            /* pool-set */
+} kib_pmr_poolset_t;
+
+typedef struct kib_pmr_pool {
+        struct kib_hca_dev     *ppo_hdev;               /* device for this pool */
+        kib_pool_t              ppo_pool;               /* pool */
+} kib_pmr_pool_t;
+
+typedef struct
+{
+       spinlock_t              fps_lock;               /* serialize */
+        struct kib_net         *fps_net;                /* IB network */
+        cfs_list_t              fps_pool_list;          /* FMR pool list */
+        cfs_list_t              fps_failed_pool_list;   /* FMR pool list */
+        __u64                   fps_version;            /* validity stamp */
+       int                     fps_cpt;                /* CPT id */
+       int                     fps_pool_size;
+       int                     fps_flush_trigger;
+       /* is allocating new pool */
+       int                     fps_increasing;
+       /* time stamp for retry if failed to allocate */
+       cfs_time_t              fps_next_retry;
+} kib_fmr_poolset_t;
 
 typedef struct
 {
+        cfs_list_t              fpo_list;               /* chain on pool list */
+        struct kib_hca_dev     *fpo_hdev;               /* device for this pool */
+        kib_fmr_poolset_t      *fpo_owner;              /* owner of this pool */
+        struct ib_fmr_pool     *fpo_fmr_pool;           /* IB FMR pool */
+        cfs_time_t              fpo_deadline;           /* deadline of this pool */
+        int                     fpo_failed;             /* fmr pool is failed */
+        int                     fpo_map_count;          /* # of mapped FMR */
+} kib_fmr_pool_t;
+
+typedef struct {
+        struct ib_pool_fmr     *fmr_pfmr;               /* IB pool fmr */
+        kib_fmr_pool_t         *fmr_pool;               /* pool of FMR */
+} kib_fmr_t;
+
+typedef struct kib_net
+{
+        cfs_list_t           ibn_list;          /* chain on kib_dev_t::ibd_nets */
         __u64                ibn_incarnation;   /* my epoch */
         int                  ibn_init;          /* initialisation state */
         int                  ibn_shutdown;      /* shutting down? */
 
-        atomic_t             ibn_npeers;        /* # peers extant */
-        atomic_t             ibn_nconns;        /* # connections extant */
+       cfs_atomic_t            ibn_npeers;     /* # peers extant */
+       cfs_atomic_t            ibn_nconns;     /* # connections extant */
 
-        __u64                ibn_tx_next_cookie; /* RDMA completion cookie */
-        struct kib_tx       *ibn_tx_descs;      /* all the tx descriptors */
-        kib_pages_t         *ibn_tx_pages;      /* premapped tx msg pages */
-        struct list_head     ibn_idle_txs;      /* idle tx descriptors */
-        spinlock_t           ibn_tx_lock;       /* serialise */
+       kib_tx_poolset_t        **ibn_tx_ps;    /* tx pool-set */
+       kib_fmr_poolset_t       **ibn_fmr_ps;   /* fmr pool-set */
+       kib_pmr_poolset_t       **ibn_pmr_ps;   /* pmr pool-set */
 
-        struct ib_fmr_pool  *ibn_fmrpool;       /* FMR pool for RDMA I/O */
-        kib_phys_mr_pool_t  *ibn_pmrpool;       /* Physical MR pool for RDMA I/O */
-
-        kib_dev_t           *ibn_dev;           /* underlying IB device */
+       kib_dev_t               *ibn_dev;       /* underlying IB device */
 } kib_net_t;
 
+#define KIB_THREAD_SHIFT               16
+#define KIB_THREAD_ID(cpt, tid)                ((cpt) << KIB_THREAD_SHIFT | (tid))
+#define KIB_THREAD_CPT(id)             ((id) >> KIB_THREAD_SHIFT)
+#define KIB_THREAD_TID(id)             ((id) & ((1UL << KIB_THREAD_SHIFT) - 1))
+
+struct kib_sched_info {
+       /* serialise */
+       spinlock_t              ibs_lock;
+       /* schedulers sleep here */
+       wait_queue_head_t               ibs_waitq;
+       /* conns to check for rx completions */
+       cfs_list_t              ibs_conns;
+       /* number of scheduler threads */
+       int                     ibs_nthreads;
+       /* max allowed scheduler threads */
+       int                     ibs_nthreads_max;
+       int                     ibs_cpt;        /* CPT id */
+};
+
 typedef struct
 {
-        int                  kib_init;          /* initialisation state */
-        int                  kib_shutdown;      /* shut down? */
-        struct list_head     kib_devs;          /* IB devices extant */
-        atomic_t             kib_nthreads;      /* # live threads */
-        rwlock_t             kib_global_lock;   /* stabilize net/dev/peer/conn ops */
-
-        struct list_head    *kib_peers;         /* hash table of all my known peers */
-        int                  kib_peer_hash_size; /* size of kib_peers */
-
-        void                *kib_connd;         /* the connd task (serialisation assertions) */
-        struct list_head     kib_connd_conns;   /* connections to setup/teardown */
-        struct list_head     kib_connd_zombies; /* connections with zero refcount */
-        wait_queue_head_t    kib_connd_waitq;   /* connection daemon sleeps here */
-        spinlock_t           kib_connd_lock;    /* serialise */
-
-        wait_queue_head_t    kib_sched_waitq;   /* schedulers sleep here */
-        struct list_head     kib_sched_conns;   /* conns to check for rx completions */
-        spinlock_t           kib_sched_lock;    /* serialise */
-
-        struct ib_qp_attr    kib_error_qpa;      /* QP->ERROR */
+       int                     kib_init;       /* initialisation state */
+       int                     kib_shutdown;   /* shut down? */
+       cfs_list_t              kib_devs;       /* IB devices extant */
+       /* list head of failed devices */
+       cfs_list_t              kib_failed_devs;
+       /* schedulers sleep here */
+       wait_queue_head_t               kib_failover_waitq;
+       cfs_atomic_t            kib_nthreads;   /* # live threads */
+       /* stabilize net/dev/peer/conn ops */
+       rwlock_t                kib_global_lock;
+       /* hash table of all my known peers */
+       cfs_list_t              *kib_peers;
+       /* size of kib_peers */
+       int                     kib_peer_hash_size;
+       /* the connd task (serialisation assertions) */
+       void                    *kib_connd;
+       /* connections to setup/teardown */
+       cfs_list_t              kib_connd_conns;
+       /* connections with zero refcount */
+       cfs_list_t              kib_connd_zombies;
+       /* connection daemon sleeps here */
+       wait_queue_head_t               kib_connd_waitq;
+       spinlock_t              kib_connd_lock; /* serialise */
+       struct ib_qp_attr       kib_error_qpa;  /* QP->ERROR */
+       /* percpt data for schedulers */
+       struct kib_sched_info   **kib_scheds;
 } kib_data_t;
 
 #define IBLND_INIT_NOTHING         0
@@ -334,7 +482,7 @@ typedef struct
 typedef struct
 {
         /* First 2 fields fixed FOR ALL TIME */
-        __u32             ibm_magic;            /* I'm an openibnal message */
+        __u32             ibm_magic;            /* I'm an ibnal message */
         __u16             ibm_version;          /* this is my version number */
 
         __u8              ibm_type;             /* msg type */
@@ -397,7 +545,7 @@ typedef struct {
 
 typedef struct kib_rx                           /* receive message */
 {
-        struct list_head          rx_list;      /* queue for attention */
+        cfs_list_t                rx_list;      /* queue for attention */
         struct kib_conn          *rx_conn;      /* owning conn */
         int                       rx_nob;       /* # bytes received (-1 while posted) */
         enum ib_wc_status         rx_status;    /* completion status */
@@ -415,11 +563,12 @@ typedef struct kib_rx                           /* receive message */
 
 typedef struct kib_tx                           /* transmit message */
 {
-        struct list_head          tx_list;      /* queue on idle_txs ibc_tx_queue etc. */
+        cfs_list_t                tx_list;      /* queue on idle_txs ibc_tx_queue etc. */
+        kib_tx_pool_t            *tx_pool;      /* pool I'm from */
         struct kib_conn          *tx_conn;      /* owning conn */
-        int                       tx_sending;   /* # tx callbacks outstanding */
-        int                       tx_queued;    /* queued for sending */
-        int                       tx_waiting;   /* waiting for peer */
+        short                     tx_sending;   /* # tx callbacks outstanding */
+        short                     tx_queued;    /* queued for sending */
+        short                     tx_waiting;   /* waiting for peer */
         int                       tx_status;    /* LNET completion status */
         unsigned long             tx_deadline;  /* completion deadline */
         __u64                     tx_cookie;    /* completion cookie */
@@ -433,11 +582,10 @@ typedef struct kib_tx                           /* transmit message */
         kib_rdma_desc_t          *tx_rd;        /* rdma descriptor */
         int                       tx_nfrags;    /* # entries in... */
         struct scatterlist       *tx_frags;     /* dma_map_sg descriptor */
-        struct ib_phys_buf       *tx_ipb;       /* physical buffer (for iWARP) */
         __u64                    *tx_pages;     /* rdma phys page addrs */
         union {
-                kib_phys_mr_t      *pmr;         /* MR for physical buffer */
-                struct ib_pool_fmr *fmr;         /* rdma mapping (mapped if != NULL) */
+                kib_phys_mr_t      *pmr;        /* MR for physical buffer */
+                kib_fmr_t           fmr;        /* FMR */
         }                         tx_u;
         int                       tx_dmadir;    /* dma direction */
 } kib_tx_t;
@@ -450,39 +598,47 @@ typedef struct kib_connvars
 
 typedef struct kib_conn
 {
-        struct kib_peer    *ibc_peer;           /* owning peer */
-        struct list_head    ibc_list;           /* stash on peer's conn list */
-        struct list_head    ibc_sched_list;     /* schedule for attention */
-        __u16               ibc_version;        /* version of connection */
-        __u64               ibc_incarnation;    /* which instance of the peer */
-        atomic_t            ibc_refcount;       /* # users */
-        int                 ibc_state;          /* what's happening */
-        int                 ibc_nsends_posted;  /* # uncompleted sends */
-        int                 ibc_noops_posted;   /* # uncompleted NOOPs */
-        int                 ibc_credits;        /* # credits I have */
-        int                 ibc_outstanding_credits; /* # credits to return */
-        int                 ibc_reserved_credits;/* # ACK/DONE msg credits */
-        int                 ibc_comms_error;    /* set on comms error */
-        int                 ibc_nrx:16;         /* receive buffers owned */
-        int                 ibc_scheduled:1;    /* scheduled for attention */
-        int                 ibc_ready:1;        /* CQ callback fired */
-        unsigned long       ibc_last_send;      /* time of last send */
-        struct list_head    ibc_early_rxs;      /* rxs completed before ESTABLISHED */
-        struct list_head    ibc_tx_queue;       /* sends that need a credit */
-        struct list_head    ibc_tx_queue_nocred;/* sends that don't need a credit */
-        struct list_head    ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
-        struct list_head    ibc_active_txs;     /* active tx awaiting completion */
-        spinlock_t          ibc_lock;           /* serialise */
-        kib_rx_t           *ibc_rxs;            /* the rx descs */
-        kib_pages_t        *ibc_rx_pages;       /* premapped rx msg pages */
-
-        struct rdma_cm_id  *ibc_cmid;           /* CM id */
-        struct ib_cq       *ibc_cq;             /* completion queue */
-
-        kib_connvars_t     *ibc_connvars;       /* in-progress connection state */
+       struct kib_sched_info *ibc_sched;       /* scheduler information */
+        struct kib_peer     *ibc_peer;          /* owning peer */
+        kib_hca_dev_t       *ibc_hdev;          /* HCA bound on */
+        cfs_list_t           ibc_list;          /* stash on peer's conn list */
+        cfs_list_t           ibc_sched_list;    /* schedule for attention */
+        __u16                ibc_version;       /* version of connection */
+        __u64                ibc_incarnation;   /* which instance of the peer */
+        cfs_atomic_t         ibc_refcount;      /* # users */
+        int                  ibc_state;         /* what's happening */
+        int                  ibc_nsends_posted; /* # uncompleted sends */
+        int                  ibc_noops_posted;  /* # uncompleted NOOPs */
+        int                  ibc_credits;       /* # credits I have */
+        int                  ibc_outstanding_credits; /* # credits to return */
+        int                  ibc_reserved_credits;/* # ACK/DONE msg credits */
+        int                  ibc_comms_error;   /* set on comms error */
+       unsigned int         ibc_nrx:16;        /* receive buffers owned */
+       unsigned int         ibc_scheduled:1;   /* scheduled for attention */
+       unsigned int         ibc_ready:1;       /* CQ callback fired */
+        /* time of last send */
+        unsigned long        ibc_last_send;
+        /** link chain for kiblnd_check_conns only */
+        cfs_list_t           ibc_connd_list;
+        /** rxs completed before ESTABLISHED */
+        cfs_list_t           ibc_early_rxs;
+        /** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */
+        cfs_list_t           ibc_tx_noops;
+        cfs_list_t           ibc_tx_queue;       /* sends that need a credit */
+        cfs_list_t           ibc_tx_queue_nocred;/* sends that don't need a credit */
+        cfs_list_t           ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
+        cfs_list_t           ibc_active_txs;     /* active tx awaiting completion */
+       spinlock_t           ibc_lock;           /* serialise */
+        kib_rx_t            *ibc_rxs;            /* the rx descs */
+        kib_pages_t         *ibc_rx_pages;       /* premapped rx msg pages */
+
+        struct rdma_cm_id   *ibc_cmid;           /* CM id */
+        struct ib_cq        *ibc_cq;             /* completion queue */
+
+        kib_connvars_t      *ibc_connvars;       /* in-progress connection state */
 } kib_conn_t;
 
-#define IBLND_CONN_INIT               0         /* being intialised */
+#define IBLND_CONN_INIT               0         /* being initialised */
 #define IBLND_CONN_ACTIVE_CONNECT     1         /* active sending req */
 #define IBLND_CONN_PASSIVE_WAIT       2         /* passive waiting for rtu */
 #define IBLND_CONN_ESTABLISHED        3         /* connection established */
@@ -491,69 +647,100 @@ typedef struct kib_conn
 
 typedef struct kib_peer
 {
-        struct list_head    ibp_list;           /* stash on global peer list */
-        lnet_nid_t          ibp_nid;            /* who's on the other end(s) */
-        lnet_ni_t          *ibp_ni;             /* LNet interface */
-        atomic_t            ibp_refcount;       /* # users */
-        struct list_head    ibp_conns;          /* all active connections */
-        struct list_head    ibp_tx_queue;       /* msgs waiting for a conn */
-        __u16               ibp_version;        /* version of peer */
-        __u64               ibp_incarnation;    /* incarnation of peer */
-        int                 ibp_connecting;     /* current active connection attempts */
-        int                 ibp_accepting;      /* current passive connection attempts */
-        int                 ibp_error;          /* errno on closing this peer */
-        cfs_time_t          ibp_last_alive;     /* when (in jiffies) I was last alive */
+        cfs_list_t           ibp_list;           /* stash on global peer list */
+        lnet_nid_t           ibp_nid;            /* who's on the other end(s) */
+        lnet_ni_t           *ibp_ni;             /* LNet interface */
+        cfs_atomic_t         ibp_refcount;       /* # users */
+        cfs_list_t           ibp_conns;          /* all active connections */
+        cfs_list_t           ibp_tx_queue;       /* msgs waiting for a conn */
+        __u16                ibp_version;        /* version of peer */
+        __u64                ibp_incarnation;    /* incarnation of peer */
+        int                  ibp_connecting;     /* current active connection attempts */
+        int                  ibp_accepting;      /* current passive connection attempts */
+        int                  ibp_error;          /* errno on closing this peer */
+        cfs_time_t           ibp_last_alive;     /* when (in jiffies) I was last alive */
 } kib_peer_t;
 
 extern kib_data_t      kiblnd_data;
 
+extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
+
+static inline void
+kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
+{
+        LASSERT (cfs_atomic_read(&hdev->ibh_ref) > 0);
+        cfs_atomic_inc(&hdev->ibh_ref);
+}
+
+static inline void
+kiblnd_hdev_decref(kib_hca_dev_t *hdev)
+{
+        LASSERT (cfs_atomic_read(&hdev->ibh_ref) > 0);
+        if (cfs_atomic_dec_and_test(&hdev->ibh_ref))
+                kiblnd_hdev_destroy(hdev);
+}
+
+static inline int
+kiblnd_dev_can_failover(kib_dev_t *dev)
+{
+        if (!cfs_list_empty(&dev->ibd_fail_list)) /* already scheduled */
+                return 0;
+
+        if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
+                return 0;
+
+        if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
+                return 1;
+
+        return dev->ibd_can_failover;
+}
+
 #define kiblnd_conn_addref(conn)                                \
 do {                                                            \
         CDEBUG(D_NET, "conn[%p] (%d)++\n",                      \
-               (conn), atomic_read(&(conn)->ibc_refcount));     \
-        LASSERT(atomic_read(&(conn)->ibc_refcount) > 0);        \
-        atomic_inc(&(conn)->ibc_refcount);                      \
+               (conn), cfs_atomic_read(&(conn)->ibc_refcount)); \
+        cfs_atomic_inc(&(conn)->ibc_refcount);                  \
 } while (0)
 
-#define kiblnd_conn_decref(conn)                                              \
-do {                                                                          \
-        unsigned long   flags;                                                \
-                                                                              \
-        CDEBUG(D_NET, "conn[%p] (%d)--\n",                                    \
-               (conn), atomic_read(&(conn)->ibc_refcount));                   \
-        LASSERT(atomic_read(&(conn)->ibc_refcount) > 0);                      \
-        if (atomic_dec_and_test(&(conn)->ibc_refcount)) {                     \
-                spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);        \
-                list_add_tail(&(conn)->ibc_list,                              \
-                              &kiblnd_data.kib_connd_zombies);                \
-                wake_up(&kiblnd_data.kib_connd_waitq);                        \
-                spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);   \
-        }                                                                     \
+#define kiblnd_conn_decref(conn)                                       \
+do {                                                                   \
+       unsigned long flags;                                            \
+                                                                       \
+       CDEBUG(D_NET, "conn[%p] (%d)--\n",                              \
+              (conn), cfs_atomic_read(&(conn)->ibc_refcount));         \
+       LASSERT_ATOMIC_POS(&(conn)->ibc_refcount);                      \
+       if (cfs_atomic_dec_and_test(&(conn)->ibc_refcount)) {           \
+               spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);  \
+               cfs_list_add_tail(&(conn)->ibc_list,                    \
+                                 &kiblnd_data.kib_connd_zombies);      \
+               wake_up(&kiblnd_data.kib_connd_waitq);          \
+               spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
+       }                                                               \
 } while (0)
 
 #define kiblnd_peer_addref(peer)                                \
 do {                                                            \
         CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n",                \
                (peer), libcfs_nid2str((peer)->ibp_nid),         \
-               atomic_read (&(peer)->ibp_refcount));            \
-        LASSERT(atomic_read(&(peer)->ibp_refcount) > 0);        \
-        atomic_inc(&(peer)->ibp_refcount);                      \
+               cfs_atomic_read (&(peer)->ibp_refcount));        \
+        cfs_atomic_inc(&(peer)->ibp_refcount);                  \
 } while (0)
 
 #define kiblnd_peer_decref(peer)                                \
 do {                                                            \
         CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n",                \
                (peer), libcfs_nid2str((peer)->ibp_nid),         \
-               atomic_read (&(peer)->ibp_refcount));            \
-        LASSERT(atomic_read(&(peer)->ibp_refcount) > 0);        \
-        if (atomic_dec_and_test(&(peer)->ibp_refcount))         \
+               cfs_atomic_read (&(peer)->ibp_refcount));        \
+        LASSERT_ATOMIC_POS(&(peer)->ibp_refcount);              \
+        if (cfs_atomic_dec_and_test(&(peer)->ibp_refcount))     \
                 kiblnd_destroy_peer(peer);                      \
 } while (0)
 
-static inline struct list_head *
+static inline cfs_list_t *
 kiblnd_nid2peerlist (lnet_nid_t nid)
 {
-        unsigned int hash = ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
+        unsigned int hash =
+                ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
 
         return (&kiblnd_data.kib_peers [hash]);
 }
@@ -562,28 +749,28 @@ static inline int
 kiblnd_peer_active (kib_peer_t *peer)
 {
         /* Am I in the peer hash table? */
-        return (!list_empty(&peer->ibp_list));
+        return (!cfs_list_empty(&peer->ibp_list));
 }
 
 static inline kib_conn_t *
 kiblnd_get_conn_locked (kib_peer_t *peer)
 {
-        LASSERT (!list_empty(&peer->ibp_conns));
+        LASSERT (!cfs_list_empty(&peer->ibp_conns));
 
         /* just return the first connection */
-        return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
+        return cfs_list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
 }
 
 static inline int
 kiblnd_send_keepalive(kib_conn_t *conn)
 {
-        return (*kiblnd_tunables.kib_keepalive > 0) &&
-                time_after(jiffies, conn->ibc_last_send +
-                           *kiblnd_tunables.kib_keepalive*HZ);
+       return (*kiblnd_tunables.kib_keepalive > 0) &&
+               cfs_time_after(jiffies, conn->ibc_last_send +
+                              *kiblnd_tunables.kib_keepalive*HZ);
 }
 
 static inline int
-kiblnd_send_noop(kib_conn_t *conn)
+kiblnd_need_noop(kib_conn_t *conn)
 {
         LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
@@ -592,14 +779,26 @@ kiblnd_send_noop(kib_conn_t *conn)
             !kiblnd_send_keepalive(conn))
                 return 0; /* No need to send NOOP */
 
-        if (!list_empty(&conn->ibc_tx_queue_nocred))
-                return 0; /* NOOP can be piggybacked */
+        if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
+                if (!cfs_list_empty(&conn->ibc_tx_queue_nocred))
+                        return 0; /* NOOP can be piggybacked */
 
-        if (!IBLND_OOB_CAPABLE(conn->ibc_version))
-                return list_empty(&conn->ibc_tx_queue); /* can't piggyback? */
+                /* No tx to piggyback NOOP onto or no credit to send a tx */
+                return (cfs_list_empty(&conn->ibc_tx_queue) ||
+                        conn->ibc_credits == 0);
+        }
+
+        if (!cfs_list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
+            !cfs_list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
+            conn->ibc_credits == 0)                    /* no credit */
+                return 0;
+
+        if (conn->ibc_credits == 1 &&      /* last credit reserved for */
+            conn->ibc_outstanding_credits == 0) /* giving back credits */
+                return 0;
 
         /* No tx to piggyback NOOP onto or no credit to send a tx */
-        return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 0);
+        return (cfs_list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
 }
 
 static inline void
@@ -610,7 +809,7 @@ kiblnd_abort_receives(kib_conn_t *conn)
 }
 
 static inline const char *
-kiblnd_queue2str (kib_conn_t *conn, struct list_head *q)
+kiblnd_queue2str (kib_conn_t *conn, cfs_list_t *q)
 {
         if (q == &conn->ibc_tx_queue)
                 return "tx_queue";
@@ -662,7 +861,7 @@ static inline void
 kiblnd_set_conn_state (kib_conn_t *conn, int state)
 {
         conn->ibc_state = state;
-        mb();
+        cfs_mb();
 }
 
 static inline void
@@ -842,40 +1041,47 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
 
 #endif
 
-struct ib_mr *kiblnd_find_rd_dma_mr(kib_net_t *net,
+struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
                                     kib_rdma_desc_t *rd);
-struct ib_mr *kiblnd_find_dma_mr(kib_net_t *net,
+struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev,
                                  __u64 addr, __u64 size);
 void kiblnd_map_rx_descs(kib_conn_t *conn);
 void kiblnd_unmap_rx_descs(kib_conn_t *conn);
-void kiblnd_map_tx_descs (lnet_ni_t *ni);
-void kiblnd_unmap_tx_descs(lnet_ni_t *ni);
 int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
                   kib_rdma_desc_t *rd, int nfrags);
 void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
-kib_phys_mr_t *kiblnd_phys_mr_map(kib_net_t *net, kib_rdma_desc_t *rd,
-                                  struct ib_phys_buf *ipb, __u64 *iova);
-void kiblnd_phys_mr_unmap(kib_net_t *net, kib_phys_mr_t *pmr);
+void kiblnd_pool_free_node(kib_pool_t *pool, cfs_list_t *node);
+cfs_list_t *kiblnd_pool_alloc_node(kib_poolset_t *ps);
+
+int  kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
+                         int npages, __u64 iov, kib_fmr_t *fmr);
+void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
+
+int  kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
+                         kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr);
+void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr);
 
 int  kiblnd_startup (lnet_ni_t *ni);
 void kiblnd_shutdown (lnet_ni_t *ni);
 int  kiblnd_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg);
-void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, time_t *when);
+void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
 
 int  kiblnd_tunables_init(void);
 void kiblnd_tunables_fini(void);
 
 int  kiblnd_connd (void *arg);
 int  kiblnd_scheduler(void *arg);
-int  kiblnd_thread_start (int (*fn)(void *arg), void *arg);
+int  kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
+int  kiblnd_failover_thread (void *arg);
 
-int  kiblnd_alloc_pages (kib_pages_t **pp, int npages);
+int  kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
 void kiblnd_free_pages (kib_pages_t *p);
 
 int  kiblnd_cm_callback(struct rdma_cm_id *cmid,
                         struct rdma_cm_event *event);
 int  kiblnd_translate_mtu(int value);
 
+int  kiblnd_dev_failover(kib_dev_t *dev);
 int  kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
 void kiblnd_destroy_peer (kib_peer_t *peer);
 void kiblnd_destroy_dev (kib_dev_t *dev);
@@ -901,7 +1107,8 @@ void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
 void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn);
 void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn);
 void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
-void kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int status);
+void kiblnd_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist,
+                         int status);
 void kiblnd_check_sends (kib_conn_t *conn);
 
 void kiblnd_qp_event(struct ib_event *event, void *arg);