}
static void
-kiblnd_setup_mtu(struct rdma_cm_id *cmid)
+kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
{
- unsigned long flags;
int mtu;
/* XXX There is no path record for iWARP, set by netdev->change_mtu? */
if (cmid->route.path_rec == NULL)
return;
- cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
LASSERT (mtu >= 0);
if (mtu != 0)
cmid->route.path_rec->mtu = mtu;
-
- cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
}
kib_conn_t *
* she must dispose of 'cmid'. (Actually I'd block forever if I tried
* to destroy 'cmid' here since I'm called from the CM which still has
* its ref on 'cmid'). */
+ cfs_rwlock_t *glock = &kiblnd_data.kib_global_lock;
kib_net_t *net = peer->ibp_ni->ni_data;
+ kib_dev_t *dev = net->ibn_dev;
struct ib_qp_init_attr *init_qp_attr;
kib_conn_t *conn;
struct ib_cq *cq;
}
memset(conn->ibc_connvars, 0, sizeof(*conn->ibc_connvars));
+ cfs_write_lock_irqsave(glock, flags);
+ if (dev->ibd_failover) {
+ cfs_write_unlock_irqrestore(glock, flags);
+ CERROR("%s: failover in progress\n", dev->ibd_ifname);
+ goto failed_2;
+ }
+
+ if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
+ /* wakeup failover thread and teardown connection */
+ if (kiblnd_dev_can_failover(dev)) {
+ cfs_list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
+ }
+
+ cfs_write_unlock_irqrestore(glock, flags);
+ CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
+ cmid->device->name, dev->ibd_ifname);
+ goto failed_2;
+ }
+
+ kiblnd_hdev_addref_locked(dev->ibd_hdev);
+ conn->ibc_hdev = dev->ibd_hdev;
+
+ kiblnd_setup_mtu_locked(cmid);
+
+ cfs_write_unlock_irqrestore(glock, flags);
+
LIBCFS_ALLOC(conn->ibc_rxs, IBLND_RX_MSGS(version) * sizeof(kib_rx_t));
if (conn->ibc_rxs == NULL) {
CERROR("Cannot allocate RX buffers\n");
goto failed_2;
}
- kiblnd_setup_mtu(cmid);
-
memset(init_qp_attr, 0, sizeof(*init_qp_attr));
init_qp_attr->event_handler = kiblnd_qp_event;
init_qp_attr->qp_context = conn;
init_qp_attr->send_cq = cq;
init_qp_attr->recv_cq = cq;
- rc = rdma_create_qp(cmid, net->ibn_dev->ibd_pd, init_qp_attr);
+ rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
if (rc != 0) {
CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
rc, init_qp_attr->cap.max_send_wr,
if (conn->ibc_connvars != NULL)
LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
+ if (conn->ibc_hdev != NULL)
+ kiblnd_hdev_decref(conn->ibc_hdev);
+
/* See CAVEAT EMPTOR above in kiblnd_create_conn */
if (conn->ibc_state != IBLND_CONN_INIT) {
kib_net_t *net = peer->ibp_ni->ni_data;
int npages = p->ibp_npages;
int i;
- LASSERT (p->ibp_device == NULL);
-
for (i = 0; i < npages; i++)
if (p->ibp_pages[i] != NULL)
__free_page(p->ibp_pages[i]);
int i;
LASSERT (conn->ibc_rxs != NULL);
- LASSERT (conn->ibc_rx_pages->ibp_device != NULL);
+ LASSERT (conn->ibc_hdev != NULL);
for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
rx = &conn->ibc_rxs[i];
LASSERT (rx->rx_nob >= 0); /* not posted */
- kiblnd_dma_unmap_single(conn->ibc_rx_pages->ibp_device,
+ kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev,
KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
rx->rx_msgaddr),
IBLND_MSG_SIZE, DMA_FROM_DEVICE);
}
- conn->ibc_rx_pages->ibp_device = NULL;
-
kiblnd_free_pages(conn->ibc_rx_pages);
conn->ibc_rx_pages = NULL;
rx->rx_conn = conn;
rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
- rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_cmid->device,
+ rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
rx->rx_msg, IBLND_MSG_SIZE,
DMA_FROM_DEVICE);
- LASSERT (!kiblnd_dma_mapping_error(conn->ibc_cmid->device,
+ LASSERT (!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
rx->rx_msgaddr));
KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
LASSERT (ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version));
}
}
-
- conn->ibc_rx_pages->ibp_device = conn->ibc_cmid->device;
}
static void
kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
{
+ kib_hca_dev_t *hdev = tpo->tpo_hdev;
kib_tx_t *tx;
int i;
LASSERT (tpo->tpo_pool.po_allocated == 0);
- LASSERT (tpo->tpo_tx_pages->ibp_device != NULL);
+
+ if (hdev == NULL)
+ return;
for (i = 0; i < tpo->tpo_pool.po_size; i++) {
tx = &tpo->tpo_tx_descs[i];
- kiblnd_dma_unmap_single(tpo->tpo_tx_pages->ibp_device,
+ kiblnd_dma_unmap_single(hdev->ibh_ibdev,
KIBLND_UNMAP_ADDR(tx, tx_msgunmap,
tx->tx_msgaddr),
IBLND_MSG_SIZE, DMA_TO_DEVICE);
}
- tpo->tpo_tx_pages->ibp_device = NULL;
+
+ kiblnd_hdev_decref(hdev);
+ tpo->tpo_hdev = NULL;
+}
+
+static kib_hca_dev_t *
+kiblnd_current_hdev(kib_dev_t *dev)
+{
+ kib_hca_dev_t *hdev;
+ unsigned long flags;
+ int i = 0;
+
+ cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ while (dev->ibd_failover) {
+ cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ if (i++ % 50 == 0)
+ CDEBUG(D_NET, "Wait for dev(%s) failover\n", dev->ibd_ifname);
+ cfs_schedule_timeout(cfs_time_seconds(1) / 100);
+
+ cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ }
+
+ kiblnd_hdev_addref_locked(dev->ibd_hdev);
+ hdev = dev->ibd_hdev;
+
+ cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ return hdev;
}
static void
kib_pages_t *txpgs = tpo->tpo_tx_pages;
kib_pool_t *pool = &tpo->tpo_pool;
kib_net_t *net = pool->po_owner->ps_net;
+ kib_dev_t *dev = net->ibn_dev;
struct page *page;
kib_tx_t *tx;
int page_offset;
int i;
LASSERT (net != NULL);
- LASSERT (net->ibn_dev->ibd_cmid != NULL &&
- net->ibn_dev->ibd_cmid->device != NULL);
/* pre-mapped messages are not bigger than 1 page */
CLASSERT (IBLND_MSG_SIZE <= PAGE_SIZE);
/* No fancy arithmetic when we do the buffer calculations */
CLASSERT (PAGE_SIZE % IBLND_MSG_SIZE == 0);
- txpgs->ibp_device = net->ibn_dev->ibd_cmid->device;
+ tpo->tpo_hdev = kiblnd_current_hdev(dev);
for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
page = txpgs->ibp_pages[ipage];
page_offset);
tx->tx_msgaddr = kiblnd_dma_map_single(
- txpgs->ibp_device, tx->tx_msg,
+ tpo->tpo_hdev->ibh_ibdev, tx->tx_msg,
IBLND_MSG_SIZE, DMA_TO_DEVICE);
- LASSERT (!kiblnd_dma_mapping_error(txpgs->ibp_device,
+ LASSERT (!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
tx->tx_msgaddr));
KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
}
struct ib_mr *
-kiblnd_find_dma_mr(kib_net_t *net, __u64 addr, __u64 size)
+kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size)
{
__u64 index;
- LASSERT (net->ibn_dev->ibd_mrs[0] != NULL);
+ LASSERT (hdev->ibh_mrs[0] != NULL);
- if (net->ibn_dev->ibd_nmrs == 1)
- return net->ibn_dev->ibd_mrs[0];
+ if (hdev->ibh_nmrs == 1)
+ return hdev->ibh_mrs[0];
- index = addr >> net->ibn_dev->ibd_mr_shift;
+ index = addr >> hdev->ibh_mr_shift;
- if (index < net->ibn_dev->ibd_nmrs &&
- index == ((addr + size - 1) >> net->ibn_dev->ibd_mr_shift))
- return net->ibn_dev->ibd_mrs[index];
+ if (index < hdev->ibh_nmrs &&
+ index == ((addr + size - 1) >> hdev->ibh_mr_shift))
+ return hdev->ibh_mrs[index];
return NULL;
}
struct ib_mr *
-kiblnd_find_rd_dma_mr(kib_net_t *net, kib_rdma_desc_t *rd)
+kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd)
{
struct ib_mr *prev_mr;
struct ib_mr *mr;
int i;
- LASSERT (net->ibn_dev->ibd_mrs[0] != NULL);
+ LASSERT (hdev->ibh_mrs[0] != NULL);
if (*kiblnd_tunables.kib_map_on_demand > 0 &&
*kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags)
return NULL;
- if (net->ibn_dev->ibd_nmrs == 1)
- return net->ibn_dev->ibd_mrs[0];
+ if (hdev->ibh_nmrs == 1)
+ return hdev->ibh_mrs[0];
for (i = 0, mr = prev_mr = NULL;
i < rd->rd_nfrags; i++) {
- mr = kiblnd_find_dma_mr(net,
+ mr = kiblnd_find_dma_mr(hdev,
rd->rd_frags[i].rf_addr,
rd->rd_frags[i].rf_nob);
if (prev_mr == NULL)
if (pool->fpo_fmr_pool != NULL)
ib_destroy_fmr_pool(pool->fpo_fmr_pool);
+ if (pool->fpo_hdev != NULL)
+ kiblnd_hdev_decref(pool->fpo_hdev);
+
LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t));
}
kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo)
{
/* FMR pool for RDMA */
+ kib_dev_t *dev = fps->fps_net->ibn_dev;
kib_fmr_pool_t *fpo;
struct ib_fmr_pool_param param = {
.max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
.cache = !!*kiblnd_tunables.kib_fmr_cache};
int rc;
- LASSERT (fps->fps_net->ibn_dev != NULL &&
- fps->fps_net->ibn_dev->ibd_pd != NULL);
-
LIBCFS_ALLOC(fpo, sizeof(kib_fmr_pool_t));
if (fpo == NULL)
return -ENOMEM;
memset(fpo, 0, sizeof(kib_fmr_pool_t));
- fpo->fpo_fmr_pool = ib_create_fmr_pool(fps->fps_net->ibn_dev->ibd_pd, ¶m);
+ fpo->fpo_hdev = kiblnd_current_hdev(dev);
+ fpo->fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, ¶m);
if (IS_ERR(fpo->fpo_fmr_pool)) {
CERROR("Failed to create FMR pool: %ld\n",
PTR_ERR(fpo->fpo_fmr_pool));
rc = PTR_ERR(fpo->fpo_fmr_pool);
+ kiblnd_hdev_decref(fpo->fpo_hdev);
LIBCFS_FREE(fpo, sizeof(kib_fmr_pool_t));
return rc;
}
}
static void
+kiblnd_fail_fmr_pool_set(kib_fmr_poolset_t *fps, cfs_list_t *zombies)
+{
+ cfs_spin_lock(&fps->fps_lock);
+
+ while (!cfs_list_empty(&fps->fps_pool_list)) {
+ kib_fmr_pool_t *fpo = cfs_list_entry(fps->fps_pool_list.next,
+ kib_fmr_pool_t, fpo_list);
+ fpo->fpo_failed = 1;
+ cfs_list_del(&fpo->fpo_list);
+ if (fpo->fpo_map_count == 0)
+ cfs_list_add(&fpo->fpo_list, zombies);
+ else
+ cfs_list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
+ }
+
+ cfs_spin_unlock(&fps->fps_lock);
+}
+
+static void
kiblnd_fini_fmr_pool_set(kib_fmr_poolset_t *fps)
{
+ kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list);
}
fps->fps_net = net;
cfs_spin_lock_init(&fps->fps_lock);
CFS_INIT_LIST_HEAD(&fps->fps_pool_list);
+ CFS_INIT_LIST_HEAD(&fps->fps_failed_pool_list);
rc = kiblnd_create_fmr_pool(fps, &fpo);
if (rc == 0)
cfs_list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
return rc;
}
+static int
+kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, cfs_time_t now)
+{
+ if (fpo->fpo_map_count != 0) /* still in use */
+ return 0;
+ if (fpo->fpo_failed)
+ return 1;
+ return cfs_time_aftereq(now, fpo->fpo_deadline);
+}
+
void
kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
{
CFS_LIST_HEAD (zombies);
kib_fmr_pool_t *fpo = fmr->fmr_pool;
kib_fmr_poolset_t *fps = fpo->fpo_owner;
+ cfs_time_t now = cfs_time_current();
kib_fmr_pool_t *tmp;
int rc;
if (fps->fps_pool_list.next == &fpo->fpo_list)
continue;
- if (fpo->fpo_map_count == 0 && /* no more reference */
- cfs_time_aftereq(cfs_time_current(), fpo->fpo_deadline)) {
+ if (kiblnd_fmr_pool_is_idle(fpo, now)) {
cfs_list_move(&fpo->fpo_list, &zombies);
fps->fps_version ++;
}
}
void
-kiblnd_destroy_pool_list(kib_poolset_t *ps, cfs_list_t *head)
+kiblnd_destroy_pool_list(cfs_list_t *head)
{
kib_pool_t *pool;
while (!cfs_list_empty(head)) {
pool = cfs_list_entry(head->next, kib_pool_t, po_list);
cfs_list_del(&pool->po_list);
- ps->ps_pool_destroy(pool);
+
+ LASSERT (pool->po_owner != NULL);
+ pool->po_owner->ps_pool_destroy(pool);
+ }
+}
+
+static void
+kiblnd_fail_pool_set(kib_poolset_t *ps, cfs_list_t *zombies)
+{
+ cfs_spin_lock(&ps->ps_lock);
+ while (!cfs_list_empty(&ps->ps_pool_list)) {
+ kib_pool_t *po = cfs_list_entry(ps->ps_pool_list.next,
+ kib_pool_t, po_list);
+ po->po_failed = 1;
+ cfs_list_del(&po->po_list);
+ if (po->po_allocated == 0)
+ cfs_list_add(&po->po_list, zombies);
+ else
+ cfs_list_add(&po->po_list, &ps->ps_failed_pool_list);
}
+ cfs_spin_unlock(&ps->ps_lock);
}
static void
kiblnd_fini_pool_set(kib_poolset_t *ps)
{
- kiblnd_destroy_pool_list(ps, &ps->ps_pool_list);
+ kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
+ kiblnd_destroy_pool_list(&ps->ps_pool_list);
}
static int
strncpy(ps->ps_name, name, IBLND_POOL_NAME_LEN);
cfs_spin_lock_init(&ps->ps_lock);
CFS_INIT_LIST_HEAD(&ps->ps_pool_list);
+ CFS_INIT_LIST_HEAD(&ps->ps_failed_pool_list);
rc = ps->ps_pool_create(ps, size, &pool);
if (rc == 0)
return rc;
}
+static int
+kiblnd_pool_is_idle(kib_pool_t *pool, cfs_time_t now)
+{
+ if (pool->po_allocated != 0) /* still in use */
+ return 0;
+ if (pool->po_failed)
+ return 1;
+ return cfs_time_aftereq(now, pool->po_deadline);
+}
+
void
kiblnd_pool_free_node(kib_pool_t *pool, cfs_list_t *node)
{
if (ps->ps_pool_list.next == &pool->po_list)
continue;
- if (pool->po_allocated == 0 &&
- cfs_time_aftereq(now, pool->po_deadline))
+ if (kiblnd_pool_is_idle(pool, now))
cfs_list_move(&pool->po_list, &zombies);
}
cfs_spin_unlock(&ps->ps_lock);
if (!cfs_list_empty(&zombies))
- kiblnd_destroy_pool_list(ps, &zombies);
+ kiblnd_destroy_pool_list(&zombies);
}
cfs_list_t *
}
int
-kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_rdma_desc_t *rd,
- __u64 *iova, kib_phys_mr_t **pp_pmr)
+kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
+ kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr)
{
kib_phys_mr_t *pmr;
cfs_list_t *node;
}
pmr = container_of(node, kib_phys_mr_t, pmr_list);
+ if (pmr->pmr_pool->ppo_hdev != hdev) {
+ kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
+ return -EAGAIN;
+ }
+
for (i = 0; i < rd->rd_nfrags; i ++) {
pmr->pmr_ipb[i].addr = rd->rd_frags[i].rf_addr;
pmr->pmr_ipb[i].size = rd->rd_frags[i].rf_nob;
}
- pmr->pmr_mr = ib_reg_phys_mr(pps->pps_poolset.ps_net->ibn_dev->ibd_pd,
+ pmr->pmr_mr = ib_reg_phys_mr(hdev->ibh_pd,
pmr->pmr_ipb, rd->rd_nfrags,
IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE,
}
kiblnd_fini_pool(pool);
+ if (ppo->ppo_hdev != NULL)
+ kiblnd_hdev_decref(ppo->ppo_hdev);
+
LIBCFS_FREE(ppo, sizeof(kib_pmr_pool_t));
}
return -ENOMEM;
}
+ ppo->ppo_hdev = kiblnd_current_hdev(ps->ps_net->ibn_dev);
*pp_po = pool;
return 0;
}
LASSERT (pool->po_allocated == 0);
if (tpo->tpo_tx_pages != NULL) {
- if (tpo->tpo_tx_pages->ibp_device != NULL)
- kiblnd_unmap_tx_pool(tpo);
+ kiblnd_unmap_tx_pool(tpo);
kiblnd_free_pages(tpo->tpo_tx_pages);
}
kib_fmr_poolset_t *fps = &net->ibn_fmr_ps;
kib_pmr_poolset_t *pps = &net->ibn_pmr_ps;
kib_tx_poolset_t *tps = &net->ibn_tx_ps;
+ unsigned long flags;
int rc;
if (*kiblnd_tunables.kib_fmr_pool_size <
return -EINVAL;
}
+ cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (*kiblnd_tunables.kib_map_on_demand > 0 ||
- net->ibn_dev->ibd_nmrs > 1) { /* premapping can fail if ibd_nmr > 1,
- * so we always create FMR/PMR pool and
- * map-on-demand if premapping failed */
+ net->ibn_dev->ibd_hdev->ibh_nmrs > 1) {
+ /* premapping can fail if ibd_nmr > 1, so we always create
+ * FMR/PMR pool and map-on-demand if premapping failed */
+ cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
rc = kiblnd_init_fmr_pool_set(fps, net);
if (rc == 0) {
net->ibn_with_fmr = 1;
}
if (rc != 0)
return rc;
+ } else {
+ cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
}
rc = kiblnd_init_pool_set(&tps->tps_poolset, net, "TX", IBLND_TX_MSGS(),
return rc;
}
-void
-kiblnd_dev_cleanup(kib_dev_t *ibdev)
-{
- int i;
-
- if (ibdev->ibd_mrs == NULL)
- return;
-
- for (i = 0; i < ibdev->ibd_nmrs; i++) {
- if (ibdev->ibd_mrs[i] == NULL)
- break;
-
- ib_dereg_mr(ibdev->ibd_mrs[i]);
- }
-
- LIBCFS_FREE(ibdev->ibd_mrs, sizeof(*ibdev->ibd_mrs) * ibdev->ibd_nmrs);
- ibdev->ibd_mrs = NULL;
-}
-
static int
-kiblnd_dev_get_attr(kib_dev_t *ibdev)
+kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
{
struct ib_device_attr *attr;
int rc;
/* It's safe to assume a HCA can handle a page size
* matching that of the native system */
- ibdev->ibd_page_shift = PAGE_SHIFT;
- ibdev->ibd_page_size = 1 << PAGE_SHIFT;
- ibdev->ibd_page_mask = ~((__u64)ibdev->ibd_page_size - 1);
+ hdev->ibh_page_shift = PAGE_SHIFT;
+ hdev->ibh_page_size = 1 << PAGE_SHIFT;
+ hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1);
LIBCFS_ALLOC(attr, sizeof(*attr));
if (attr == NULL) {
return -ENOMEM;
}
- rc = ib_query_device(ibdev->ibd_cmid->device, attr);
+ rc = ib_query_device(hdev->ibh_ibdev, attr);
if (rc == 0)
- ibdev->ibd_mr_size = attr->max_mr_size;
+ hdev->ibh_mr_size = attr->max_mr_size;
LIBCFS_FREE(attr, sizeof(*attr));
/* XXX We can't trust this value returned by Chelsio driver, it's wrong
* and we have reported the bug, remove these in the future when Chelsio
* bug got fixed. */
- if (rdma_node_get_transport(ibdev->ibd_cmid->device->node_type) ==
+ if (rdma_node_get_transport(hdev->ibh_ibdev->node_type) ==
RDMA_TRANSPORT_IWARP)
- ibdev->ibd_mr_size = (1ULL << 32) - 1;
+ hdev->ibh_mr_size = (1ULL << 32) - 1;
#endif
- if (ibdev->ibd_mr_size == ~0ULL) {
- ibdev->ibd_mr_shift = 64;
+ if (hdev->ibh_mr_size == ~0ULL) {
+ hdev->ibh_mr_shift = 64;
return 0;
}
- for (ibdev->ibd_mr_shift = 0;
- ibdev->ibd_mr_shift < 64; ibdev->ibd_mr_shift ++) {
- if (ibdev->ibd_mr_size == (1ULL << ibdev->ibd_mr_shift) ||
- ibdev->ibd_mr_size == (1ULL << ibdev->ibd_mr_shift) - 1)
+ for (hdev->ibh_mr_shift = 0;
+ hdev->ibh_mr_shift < 64; hdev->ibh_mr_shift ++) {
+ if (hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) ||
+ hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) - 1)
return 0;
}
- CERROR("Invalid mr size: "LPX64"\n", ibdev->ibd_mr_size);
+ CERROR("Invalid mr size: "LPX64"\n", hdev->ibh_mr_size);
return -EINVAL;
}
+void
+kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
+{
+ int i;
+
+ if (hdev->ibh_nmrs == 0 || hdev->ibh_mrs == NULL)
+ return;
+
+ for (i = 0; i < hdev->ibh_nmrs; i++) {
+ if (hdev->ibh_mrs[i] == NULL)
+ break;
+
+ ib_dereg_mr(hdev->ibh_mrs[i]);
+ }
+
+ LIBCFS_FREE(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
+ hdev->ibh_mrs = NULL;
+ hdev->ibh_nmrs = 0;
+}
+
+void
+kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
+{
+ kiblnd_hdev_cleanup_mrs(hdev);
+
+ if (hdev->ibh_pd != NULL)
+ ib_dealloc_pd(hdev->ibh_pd);
+
+ if (hdev->ibh_cmid != NULL)
+ rdma_destroy_id(hdev->ibh_cmid);
+
+ LIBCFS_FREE(hdev, sizeof(*hdev));
+}
+
int
-kiblnd_dev_setup(kib_dev_t *ibdev)
+kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
{
struct ib_mr *mr;
int i;
int acflags = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE;
- rc = kiblnd_dev_get_attr(ibdev);
+ rc = kiblnd_hdev_get_attr(hdev);
if (rc != 0)
return rc;
- if (ibdev->ibd_mr_shift == 64) {
- LIBCFS_ALLOC(ibdev->ibd_mrs, 1 * sizeof(*ibdev->ibd_mrs));
- if (ibdev->ibd_mrs == NULL) {
+ if (hdev->ibh_mr_shift == 64) {
+ LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs));
+ if (hdev->ibh_mrs == NULL) {
CERROR("Failed to allocate MRs table\n");
return -ENOMEM;
}
- ibdev->ibd_mrs[0] = NULL;
- ibdev->ibd_nmrs = 1;
+ hdev->ibh_mrs[0] = NULL;
+ hdev->ibh_nmrs = 1;
- mr = ib_get_dma_mr(ibdev->ibd_pd, acflags);
+ mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
if (IS_ERR(mr)) {
CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr));
- kiblnd_dev_cleanup(ibdev);
+ kiblnd_hdev_cleanup_mrs(hdev);
return PTR_ERR(mr);
}
- ibdev->ibd_mrs[0] = mr;
+ hdev->ibh_mrs[0] = mr;
goto out;
}
- mr_size = (1ULL << ibdev->ibd_mr_shift);
+ mr_size = (1ULL << hdev->ibh_mr_shift);
mm_size = (unsigned long)high_memory - PAGE_OFFSET;
- ibdev->ibd_nmrs = (int)((mm_size + mr_size - 1) >> ibdev->ibd_mr_shift);
+ hdev->ibh_nmrs = (int)((mm_size + mr_size - 1) >> hdev->ibh_mr_shift);
- if (ibdev->ibd_mr_shift < 32 || ibdev->ibd_nmrs > 1024) {
+ if (hdev->ibh_mr_shift < 32 || hdev->ibh_nmrs > 1024) {
/* it's 4T..., assume we will re-code at that time */
CERROR("Can't support memory size: x"LPX64
" with MR size: x"LPX64"\n", mm_size, mr_size);
}
/* create an array of MRs to cover all memory */
- LIBCFS_ALLOC(ibdev->ibd_mrs, sizeof(*ibdev->ibd_mrs) * ibdev->ibd_nmrs);
- if (ibdev->ibd_mrs == NULL) {
+ LIBCFS_ALLOC(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
+ if (hdev->ibh_mrs == NULL) {
CERROR("Failed to allocate MRs' table\n");
return -ENOMEM;
}
- memset(ibdev->ibd_mrs, 0, sizeof(*ibdev->ibd_mrs) * ibdev->ibd_nmrs);
+ memset(hdev->ibh_mrs, 0, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
- for (i = 0; i < ibdev->ibd_nmrs; i++) {
+ for (i = 0; i < hdev->ibh_nmrs; i++) {
struct ib_phys_buf ipb;
__u64 iova;
- ipb.size = ibdev->ibd_mr_size;
+ ipb.size = hdev->ibh_mr_size;
ipb.addr = i * mr_size;
iova = ipb.addr;
- mr = ib_reg_phys_mr(ibdev->ibd_pd, &ipb, 1, acflags, &iova);
+ mr = ib_reg_phys_mr(hdev->ibh_pd, &ipb, 1, acflags, &iova);
if (IS_ERR(mr)) {
CERROR("Failed ib_reg_phys_mr addr "LPX64
" size "LPX64" : %ld\n",
ipb.addr, ipb.size, PTR_ERR(mr));
- kiblnd_dev_cleanup(ibdev);
+ kiblnd_hdev_cleanup_mrs(hdev);
return PTR_ERR(mr);
}
LASSERT (iova == ipb.addr);
- ibdev->ibd_mrs[i] = mr;
+ hdev->ibh_mrs[i] = mr;
}
out:
CDEBUG(D_CONSOLE, "Register global MR array, MR size: "
LPX64", array size: %d\n",
- ibdev->ibd_mr_size, ibdev->ibd_nmrs);
+ hdev->ibh_mr_size, hdev->ibh_nmrs);
+ return 0;
+}
- cfs_list_add_tail(&ibdev->ibd_list,
- &kiblnd_data.kib_devs);
+static int
+kiblnd_dummy_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
+{ /* DUMMY */
return 0;
}
+static int
+kiblnd_dev_need_failover(kib_dev_t *dev)
+{
+ struct rdma_cm_id *cmid;
+ struct sockaddr_in srcaddr;
+ struct sockaddr_in dstaddr;
+ int rc;
+
+ if (dev->ibd_hdev == NULL || /* initializing */
+ dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */
+ *kiblnd_tunables.kib_dev_failover > 1) /* debugging */
+ return 1;
+
+ /* XXX: it's UGLY, but I don't have better way to find
+ * ib-bonding HCA failover because:
+ *
+ * a. no reliable CM event for HCA failover...
+ * b. no OFED API to get ib_device for current net_device...
+ *
+ * We have only two choices at this point:
+ *
+ * a. rdma_bind_addr(), it will conflict with listener cmid
+ * b. rdma_resolve_addr() to zero addr */
+ cmid = rdma_create_id(kiblnd_dummy_callback,
+ dev, RDMA_PS_TCP);
+ if (IS_ERR(cmid)) {
+ rc = PTR_ERR(cmid);
+ CERROR("Failed to create cmid for failover: %d\n", rc);
+ return rc;
+ }
+
+ memset(&srcaddr, 0, sizeof(srcaddr));
+ srcaddr.sin_family = AF_INET;
+ srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
+
+ memset(&dstaddr, 0, sizeof(dstaddr));
+ dstaddr.sin_family = AF_INET;
+ rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
+ (struct sockaddr *)&dstaddr, 1);
+ if (rc != 0) {
+ CERROR("Failed to bind %s to device: %d\n",
+ dev->ibd_ifname, rc);
+ rdma_destroy_id(cmid);
+ return rc;
+ }
+
+ LASSERT (cmid->device != NULL);
+ if (dev->ibd_hdev->ibh_ibdev == cmid->device) {
+ /* don't need device failover */
+ rdma_destroy_id(cmid);
+ return 0;
+ }
+
+ return 1;
+}
+
+int
+kiblnd_dev_failover(kib_dev_t *dev)
+{
+ CFS_LIST_HEAD (zombie_tpo);
+ CFS_LIST_HEAD (zombie_ppo);
+ CFS_LIST_HEAD (zombie_fpo);
+ struct rdma_cm_id *cmid = NULL;
+ kib_hca_dev_t *hdev = NULL;
+ kib_hca_dev_t *old;
+ struct ib_pd *pd;
+ kib_net_t *net;
+ struct sockaddr_in addr;
+ unsigned long flags;
+ int rc = 0;
+
+ LASSERT (*kiblnd_tunables.kib_dev_failover > 1 ||
+ dev->ibd_can_failover ||
+ dev->ibd_hdev == NULL);
+
+ rc = kiblnd_dev_need_failover(dev);
+ if (rc <= 0)
+ goto out;
+
+ if (dev->ibd_hdev != NULL &&
+ dev->ibd_hdev->ibh_cmid != NULL) {
+ /* XXX it's not good to close old listener at here,
+ * because we can fail to create new listener.
+ * But we have to close it now, otherwise rdma_bind_addr
+ * will return EADDRINUSE... How crap! */
+ cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ cmid = dev->ibd_hdev->ibh_cmid;
+ /* make next schedule of kiblnd_dev_need_failover
+ * will return 1 for me */
+ dev->ibd_hdev->ibh_cmid = NULL;
+ cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ rdma_destroy_id(cmid);
+ }
+
+ cmid = rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP);
+ if (IS_ERR(cmid)) {
+ rc = PTR_ERR(cmid);
+ CERROR("Failed to create cmid for failover: %d\n", rc);
+ goto out;
+ }
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
+ addr.sin_port = htons(*kiblnd_tunables.kib_service);
+
+ /* Bind to failover device or port */
+ rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
+ if (rc != 0) {
+ CERROR("Failed to bind %s to device: %d\n",
+ dev->ibd_ifname, rc);
+ rdma_destroy_id(cmid);
+ goto out;
+ }
+
+ LIBCFS_ALLOC(hdev, sizeof(*hdev));
+ if (hdev == NULL) {
+ CERROR("Failed to allocate kib_hca_dev\n");
+ rdma_destroy_id(cmid);
+ rc = -ENOMEM;
+ goto out;
+ }
+ memset(hdev, 0, sizeof(*hdev));
+ atomic_set(&hdev->ibh_ref, 1);
+ hdev->ibh_dev = dev;
+ hdev->ibh_cmid = cmid;
+ hdev->ibh_ibdev = cmid->device;
+
+ pd = ib_alloc_pd(cmid->device);
+ if (IS_ERR(pd)) {
+ rc = PTR_ERR(pd);
+ CERROR("Can't allocate PD: %d\n", rc);
+ goto out;
+ }
+
+ hdev->ibh_pd = pd;
+
+ rc = rdma_listen(cmid, 0);
+ if (rc != 0) {
+ CERROR("Can't start new listener: %d\n", rc);
+ goto out;
+ }
+
+ rc = kiblnd_hdev_setup_mrs(hdev);
+ if (rc != 0) {
+ CERROR("Can't setup device: %d\n", rc);
+ goto out;
+ }
+
+ cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ old = dev->ibd_hdev;
+ dev->ibd_hdev = hdev; /* take over the refcount */
+ hdev = old;
+
+ cfs_list_for_each_entry(net, &dev->ibd_nets, ibn_list) {
+ kiblnd_fail_pool_set(&net->ibn_tx_ps.tps_poolset, &zombie_tpo);
+ if (net->ibn_with_pmr)
+ kiblnd_fail_pool_set(&net->ibn_pmr_ps.pps_poolset, &zombie_ppo);
+ if (net->ibn_with_fmr)
+ kiblnd_fail_fmr_pool_set(&net->ibn_fmr_ps, &zombie_fpo);
+ }
+
+ cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ out:
+ if (!cfs_list_empty(&zombie_tpo))
+ kiblnd_destroy_pool_list(&zombie_tpo);
+ if (!cfs_list_empty(&zombie_ppo))
+ kiblnd_destroy_pool_list(&zombie_ppo);
+ if (!cfs_list_empty(&zombie_fpo))
+ kiblnd_destroy_fmr_pool_list(&zombie_fpo);
+ if (hdev != NULL)
+ kiblnd_hdev_decref(hdev);
+
+ if (rc != 0)
+ dev->ibd_failed_failover++;
+ else
+ dev->ibd_failed_failover = 0;
+
+ return rc;
+}
+
void
kiblnd_destroy_dev (kib_dev_t *dev)
{
LASSERT (dev->ibd_nnets == 0);
+ LASSERT (cfs_list_empty(&dev->ibd_nets));
- if (!cfs_list_empty(&dev->ibd_list)) /* on kib_devs? */
- cfs_list_del_init(&dev->ibd_list);
+ cfs_list_del(&dev->ibd_fail_list);
+ cfs_list_del(&dev->ibd_list);
- kiblnd_dev_cleanup(dev);
+ if (dev->ibd_hdev != NULL)
+ kiblnd_hdev_decref(dev->ibd_hdev);
- if (dev->ibd_pd != NULL)
- ib_dealloc_pd(dev->ibd_pd);
+ LIBCFS_FREE(dev, sizeof(*dev));
+}
- if (dev->ibd_cmid != NULL)
- rdma_destroy_id(dev->ibd_cmid);
+kib_dev_t *
+kiblnd_create_dev(char *ifname)
+{
+ struct net_device *netdev;
+ kib_dev_t *dev;
+ __u32 netmask;
+ __u32 ip;
+ int up;
+ int rc;
- LIBCFS_FREE(dev, sizeof(*dev));
+ rc = libcfs_ipif_query(ifname, &up, &ip, &netmask);
+ if (rc != 0) {
+ CERROR("Can't query IPoIB interface %s: %d\n",
+ ifname, rc);
+ return NULL;
+ }
+
+ if (!up) {
+ CERROR("Can't query IPoIB interface %s: it's down\n", ifname);
+ return NULL;
+ }
+
+ LIBCFS_ALLOC(dev, sizeof(*dev));
+ if (dev == NULL)
+ return NULL;
+
+ memset(dev, 0, sizeof(*dev));
+#ifdef HAVE_DEV_GET_BY_NAME_2ARG
+ if ((netdev = dev_get_by_name(&init_net, ifname)) == NULL) {
+#else
+ if ((netdev = dev_get_by_name(ifname)) == NULL) {
+#endif
+ CERROR("Can't find netdev of IF: %s\n", ifname);
+ LIBCFS_FREE(dev, sizeof(*dev));
+ return NULL;
+ }
+
+ dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER);
+ dev_put(netdev);
+
+ CFS_INIT_LIST_HEAD(&dev->ibd_nets);
+ CFS_INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
+ CFS_INIT_LIST_HEAD(&dev->ibd_fail_list);
+ dev->ibd_ifip = ip;
+ strcpy(&dev->ibd_ifname[0], ifname);
+
+ /* initialize the device */
+ rc = kiblnd_dev_failover(dev);
+ if (rc != 0) {
+ CERROR("Can't initialize device: %d\n", rc);
+ LIBCFS_FREE(dev, sizeof(*dev));
+ return NULL;
+ }
+
+ cfs_list_add_tail(&dev->ibd_list,
+ &kiblnd_data.kib_devs);
+ return dev;
}
void
kiblnd_data.kib_shutdown = 1;
cfs_waitq_broadcast(&kiblnd_data.kib_sched_waitq);
cfs_waitq_broadcast(&kiblnd_data.kib_connd_waitq);
+ cfs_waitq_broadcast(&kiblnd_data.kib_failover_waitq);
i = 2;
while (cfs_atomic_read(&kiblnd_data.kib_nthreads) != 0) {
kiblnd_ni_fini_pools(net);
+ cfs_write_lock_irqsave(g_lock, flags);
LASSERT (net->ibn_dev->ibd_nnets > 0);
net->ibn_dev->ibd_nnets--;
+ cfs_list_del(&net->ibn_list);
+ cfs_write_unlock_irqrestore(g_lock, flags);
/* fall through */
cfs_rwlock_init(&kiblnd_data.kib_global_lock);
CFS_INIT_LIST_HEAD(&kiblnd_data.kib_devs);
+ CFS_INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
LIBCFS_ALLOC(kiblnd_data.kib_peers,
cfs_spin_lock_init(&kiblnd_data.kib_sched_lock);
CFS_INIT_LIST_HEAD(&kiblnd_data.kib_sched_conns);
cfs_waitq_init(&kiblnd_data.kib_sched_waitq);
+ cfs_waitq_init(&kiblnd_data.kib_failover_waitq);
kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR;
goto failed;
}
+ if (*kiblnd_tunables.kib_dev_failover != 0)
+ rc = kiblnd_thread_start(kiblnd_failover_thread, NULL);
+
+ if (rc != 0) {
+ CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
+ goto failed;
+ }
+
/* flag everything initialised */
kiblnd_data.kib_init = IBLND_INIT_ALL;
/*****************************************************/
kib_net_t *net;
cfs_list_t *tmp;
struct timeval tv;
+ unsigned long flags;
int rc;
LASSERT (ni->ni_lnd == &the_o2iblnd);
ibdev = NULL;
}
- if (ibdev == NULL) {
- __u32 ip;
- __u32 netmask;
- int up;
- struct rdma_cm_id *id;
- struct ib_pd *pd;
- struct sockaddr_in addr;
-
- rc = libcfs_ipif_query(ifname, &up, &ip, &netmask);
- if (rc != 0) {
- CERROR("Can't query IPoIB interface %s: %d\n",
- ifname, rc);
- goto failed;
- }
-
- if (!up) {
- CERROR("Can't query IPoIB interface %s: it's down\n",
- ifname);
- goto failed;
- }
-
- LIBCFS_ALLOC(ibdev, sizeof(*ibdev));
- if (ibdev == NULL)
- goto failed;
-
- memset(ibdev, 0, sizeof(*ibdev));
+ if (ibdev == NULL)
+ ibdev = kiblnd_create_dev(ifname);
- CFS_INIT_LIST_HEAD(&ibdev->ibd_list); /* not yet in kib_devs */
- ibdev->ibd_ifip = ip;
- strcpy(&ibdev->ibd_ifname[0], ifname);
-
- id = rdma_create_id(kiblnd_cm_callback, ibdev, RDMA_PS_TCP);
- if (IS_ERR(id)) {
- CERROR("Can't create listen ID: %ld\n", PTR_ERR(id));
- goto failed;
- }
-
- ibdev->ibd_cmid = id;
-
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_port = htons(*kiblnd_tunables.kib_service);
- addr.sin_addr.s_addr = htonl(ip);
-
- rc = rdma_bind_addr(id, (struct sockaddr *)&addr);
- if (rc != 0) {
- CERROR("Can't bind to %s: %d\n", ifname, rc);
- goto failed;
- }
-
- /* Binding should have assigned me an IB device */
- LASSERT (id->device != NULL);
- CDEBUG(D_CONSOLE, "Listener bound to %s:%u.%u.%u.%u:%d:%s\n",
- ifname, HIPQUAD(ip), *kiblnd_tunables.kib_service,
- id->device->name);
-
- pd = ib_alloc_pd(id->device);
- if (IS_ERR(pd)) {
- CERROR("Can't allocate PD: %ld\n", PTR_ERR(pd));
- goto failed;
- }
-
- ibdev->ibd_pd = pd;
-
- rc = rdma_listen(id, 256);
- if (rc != 0) {
- CERROR("Can't start listener: %d\n", rc);
- goto failed;
- }
-
- rc = kiblnd_dev_setup(ibdev);
- if (rc != 0) {
- CERROR("Can't setup device: %d\n", rc);
- goto failed;
- }
- }
+ if (ibdev == NULL)
+ goto failed;
- ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
net->ibn_dev = ibdev;
+ ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
rc = kiblnd_net_init_pools(net);
if (rc != 0) {
CERROR("Failed to initialize NI pools: %d\n", rc);
goto failed;
}
+
+ cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
ibdev->ibd_nnets++;
+ cfs_list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
+ cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
net->ibn_init = IBLND_INIT_ALL;
return 0;
typedef struct
{
+ int *kib_dev_failover; /* HCA failover */
unsigned int *kib_service; /* IB service number */
int *kib_min_reconnect_interval; /* first failed connection retry... */
int *kib_max_reconnect_interval; /* ...exponentially increasing to this */
#define IBLND_SEND_WRS(v) ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v))
#define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
+struct kib_hca_dev;
+
typedef struct
{
cfs_list_t ibd_list; /* chain on kib_devs */
+ cfs_list_t ibd_fail_list; /* chain on kib_failed_devs */
__u32 ibd_ifip; /* IPoIB interface IP */
char ibd_ifname[32]; /* IPoIB interface name */
int ibd_nnets; /* # nets extant */
- struct rdma_cm_id *ibd_cmid; /* IB listener (bound to 1 device) */
- struct ib_pd *ibd_pd; /* PD for the device */
- int ibd_page_shift; /* page shift of current HCA */
- int ibd_page_size; /* page size of current HCA */
- __u64 ibd_page_mask; /* page mask of current HCA */
- int ibd_mr_shift; /* bits shift of max MR size */
- __u64 ibd_mr_size; /* size of MR */
-
- int ibd_nmrs; /* # of global MRs */
- struct ib_mr **ibd_mrs; /* MR for non RDMA I/O */
+ cfs_time_t ibd_next_failover;
+ int ibd_failed_failover; /* # failover failures */
+ unsigned int ibd_failover; /* failover in progress */
+ unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
+ cfs_list_t ibd_nets;
+ struct kib_hca_dev *ibd_hdev;
} kib_dev_t;
+typedef struct kib_hca_dev
+{
+ struct rdma_cm_id *ibh_cmid; /* listener cmid */
+ struct ib_device *ibh_ibdev; /* IB device */
+ int ibh_page_shift; /* page shift of current HCA */
+ int ibh_page_size; /* page size of current HCA */
+ __u64 ibh_page_mask; /* page mask of current HCA */
+ int ibh_mr_shift; /* bits shift of max MR size */
+ __u64 ibh_mr_size; /* size of MR */
+ int ibh_nmrs; /* # of global MRs */
+ struct ib_mr **ibh_mrs; /* global MR */
+ struct ib_pd *ibh_pd; /* PD */
+ kib_dev_t *ibh_dev; /* owner */
+ cfs_atomic_t ibh_ref; /* refcount */
+} kib_hca_dev_t;
+
#define IBLND_POOL_DEADLINE 300 /* # of seconds to keep pool alive */
typedef struct
{
- struct ib_device *ibp_device; /* device for mapping */
int ibp_npages; /* # pages */
struct page *ibp_pages[0]; /* page array */
} kib_pages_t;
struct kib_net *ps_net; /* network it belongs to */
char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
cfs_list_t ps_pool_list; /* list of pools */
+ cfs_list_t ps_failed_pool_list; /* failed pool list */
cfs_time_t ps_next_retry; /* time stamp for retry if failed to allocate */
int ps_increasing; /* is allocating new pool */
int ps_pool_size; /* new pool size */
kib_poolset_t *po_owner; /* pool_set of this pool */
cfs_time_t po_deadline; /* deadline of this pool */
int po_allocated; /* # of elements in use */
+ int po_failed; /* pool is created on failed HCA */
int po_size; /* # of pre-allocated elements */
} kib_pool_t;
typedef struct {
kib_pool_t tpo_pool; /* pool */
+ struct kib_hca_dev *tpo_hdev; /* device for this pool */
struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */
} kib_tx_pool_t;
} kib_pmr_poolset_t;
typedef struct kib_pmr_pool {
+ struct kib_hca_dev *ppo_hdev; /* device for this pool */
kib_pool_t ppo_pool; /* pool */
} kib_pmr_pool_t;
cfs_spinlock_t fps_lock; /* serialize */
struct kib_net *fps_net; /* IB network */
cfs_list_t fps_pool_list; /* FMR pool list */
+ cfs_list_t fps_failed_pool_list; /* FMR pool list */
__u64 fps_version; /* validity stamp */
int fps_increasing; /* is allocating new pool */
cfs_time_t fps_next_retry; /* time stamp for retry if failed to allocate */
typedef struct
{
cfs_list_t fpo_list; /* chain on pool list */
+ struct kib_hca_dev *fpo_hdev; /* device for this pool */
kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
cfs_time_t fpo_deadline; /* deadline of this pool */
+ int fpo_failed; /* fmr pool is failed */
int fpo_map_count; /* # of mapped FMR */
} kib_fmr_pool_t;
typedef struct kib_net
{
+ cfs_list_t ibn_list; /* chain on kib_dev_t::ibd_nets */
__u64 ibn_incarnation; /* my epoch */
int ibn_init; /* initialisation state */
int ibn_shutdown; /* shutting down? */
int kib_init; /* initialisation state */
int kib_shutdown; /* shut down? */
cfs_list_t kib_devs; /* IB devices extant */
+ cfs_list_t kib_failed_devs; /* list head of failed devices */
cfs_atomic_t kib_nthreads; /* # live threads */
cfs_rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn ops */
cfs_waitq_t kib_sched_waitq; /* schedulers sleep here */
cfs_list_t kib_sched_conns; /* conns to check for rx completions */
cfs_spinlock_t kib_sched_lock; /* serialise */
+ cfs_waitq_t kib_failover_waitq; /* schedulers sleep here */
struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
} kib_data_t;
typedef struct kib_conn
{
struct kib_peer *ibc_peer; /* owning peer */
+ kib_hca_dev_t *ibc_hdev; /* HCA bound on */
cfs_list_t ibc_list; /* stash on peer's conn list */
cfs_list_t ibc_sched_list; /* schedule for attention */
__u16 ibc_version; /* version of connection */
extern kib_data_t kiblnd_data;
+extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
+
+static inline void
+kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
+{
+ LASSERT (cfs_atomic_read(&hdev->ibh_ref) > 0);
+ cfs_atomic_inc(&hdev->ibh_ref);
+}
+
+static inline void
+kiblnd_hdev_decref(kib_hca_dev_t *hdev)
+{
+ LASSERT (cfs_atomic_read(&hdev->ibh_ref) > 0);
+ if (cfs_atomic_dec_and_test(&hdev->ibh_ref))
+ kiblnd_hdev_destroy(hdev);
+}
+
+static inline int
+kiblnd_dev_can_failover(kib_dev_t *dev)
+{
+ if (!cfs_list_empty(&dev->ibd_fail_list)) /* already scheduled */
+ return 0;
+
+ if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
+ return 0;
+
+ if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
+ return 1;
+
+ return dev->ibd_can_failover;
+}
+
#define kiblnd_conn_addref(conn) \
do { \
CDEBUG(D_NET, "conn[%p] (%d)++\n", \
#endif
-struct ib_mr *kiblnd_find_rd_dma_mr(kib_net_t *net,
+struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
kib_rdma_desc_t *rd);
-struct ib_mr *kiblnd_find_dma_mr(kib_net_t *net,
+struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev,
__u64 addr, __u64 size);
void kiblnd_map_rx_descs(kib_conn_t *conn);
void kiblnd_unmap_rx_descs(kib_conn_t *conn);
int npages, __u64 iov, kib_fmr_t *fmr);
void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
-int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_rdma_desc_t *rd,
- __u64 *iova, kib_phys_mr_t **pp_pmr);
+int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
+ kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr);
void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr);
int kiblnd_startup (lnet_ni_t *ni);
int kiblnd_connd (void *arg);
int kiblnd_scheduler(void *arg);
int kiblnd_thread_start (int (*fn)(void *arg), void *arg);
+int kiblnd_failover_thread (void *arg);
int kiblnd_alloc_pages (kib_pages_t **pp, int npages);
void kiblnd_free_pages (kib_pages_t *p);
struct rdma_cm_event *event);
int kiblnd_translate_mtu(int value);
+int kiblnd_dev_failover(kib_dev_t *dev);
int kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
void kiblnd_destroy_peer (kib_peer_t *peer);
void kiblnd_destroy_dev (kib_dev_t *dev);
credit == IBLND_POSTRX_PEER_CREDIT ||
credit == IBLND_POSTRX_RSRVD_CREDIT);
- mr = kiblnd_find_dma_mr(net, rx->rx_msgaddr, IBLND_MSG_SIZE);
+ mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE);
LASSERT (mr != NULL);
rx->rx_sge.lkey = mr->lkey;
static int
kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
{
- kib_dev_t *ibdev = net->ibn_dev;
- __u64 *pages = tx->tx_pages;
+ kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
+ __u64 *pages = tx->tx_pages;
int npages;
int size;
int rc;
for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
for (size = 0; size < rd->rd_frags[i].rf_nob;
- size += ibdev->ibd_page_size) {
+ size += hdev->ibh_page_size) {
pages[npages ++] = (rd->rd_frags[i].rf_addr &
- ibdev->ibd_page_mask) + size;
+ hdev->ibh_page_mask) + size;
}
}
* the rkey */
rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey :
tx->tx_u.fmr.fmr_pfmr->fmr->lkey;
- rd->rd_frags[0].rf_addr &= ~ibdev->ibd_page_mask;
+ rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
rd->rd_frags[0].rf_nob = nob;
rd->rd_nfrags = 1;
static int
kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
{
+ kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
__u64 iova;
int rc;
- iova = rd->rd_frags[0].rf_addr & ~net->ibn_dev->ibd_page_mask;
+ iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask;
- rc = kiblnd_pmr_pool_map(&net->ibn_pmr_ps, rd, &iova, &tx->tx_u.pmr);
+ rc = kiblnd_pmr_pool_map(&net->ibn_pmr_ps, hdev, rd, &iova, &tx->tx_u.pmr);
if (rc != 0) {
CERROR("Failed to create MR by phybuf: %d\n", rc);
return rc;
}
if (tx->tx_nfrags != 0) {
- kiblnd_dma_unmap_sg(net->ibn_dev->ibd_cmid->device,
+ kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
tx->tx_nfrags = 0;
}
kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
kib_rdma_desc_t *rd, int nfrags)
{
+ kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
kib_net_t *net = ni->ni_data;
struct ib_mr *mr = NULL;
__u32 nob;
tx->tx_nfrags = nfrags;
rd->rd_nfrags =
- kiblnd_dma_map_sg(net->ibn_dev->ibd_cmid->device,
+ kiblnd_dma_map_sg(hdev->ibh_ibdev,
tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len(
- net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
+ hdev->ibh_ibdev, &tx->tx_frags[i]);
rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
- net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
+ hdev->ibh_ibdev, &tx->tx_frags[i]);
nob += rd->rd_frags[i].rf_nob;
}
/* looking for pre-mapping MR */
- mr = kiblnd_find_rd_dma_mr(net, rd);
+ mr = kiblnd_find_rd_dma_mr(hdev, rd);
if (mr != NULL) {
/* found pre-mapping MR */
rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
cfs_list_add(&tx->tx_list, &conn->ibc_active_txs);
/* I'm still holding ibc_lock! */
- if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
+ if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
rc = -ECONNABORTED;
- else
+ } else if (tx->tx_pool->tpo_pool.po_failed ||
+ conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
+ /* close_conn will launch failover */
+ rc = -ENETDOWN;
+ } else {
rc = ib_post_send(conn->ibc_cmid->qp,
tx->tx_wrq, &bad_wrq);
+ }
+
conn->ibc_last_send = jiffies;
if (rc == 0)
void
kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
{
- kib_net_t *net = ni->ni_data;
+ kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
int nob = offsetof (kib_msg_t, ibm_u) + body_nob;
struct ib_mr *mr;
- LASSERT (net != NULL);
LASSERT (tx->tx_nwrq >= 0);
LASSERT (tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
LASSERT (nob <= IBLND_MSG_SIZE);
kiblnd_init_msg(tx->tx_msg, type, body_nob);
- mr = kiblnd_find_dma_mr(net, tx->tx_msgaddr, nob);
+ mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob);
LASSERT (mr != NULL);
sge->lkey = mr->lkey;
* connection to be finished off by the connd. Otherwise the connd is
* already dealing with it (either to set it up or tear it down).
* Caller holds kib_global_lock exclusively in irq context */
- unsigned long flags;
kib_peer_t *peer = conn->ibc_peer;
+ kib_dev_t *dev;
+ unsigned long flags;
LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
cfs_list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
}
+ dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev;
cfs_list_del(&conn->ibc_list);
/* connd (see below) takes over ibc_list's ref */
kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
+ if (error != 0 &&
+ kiblnd_dev_can_failover(dev)) {
+ cfs_list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
+ }
+
cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
cfs_list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns);
CDEBUG(D_NET, "%s established\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid));
return;
-
+
default:
CERROR("%s: Async QP event type %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
kiblnd_thread_fini();
return (0);
}
+
+int
+kiblnd_failover_thread(void *arg)
+{
+ cfs_rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ kib_dev_t *dev;
+ cfs_waitlink_t wait;
+ unsigned long flags;
+ int rc;
+
+ LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
+
+ cfs_daemonize ("kiblnd_failover");
+ cfs_block_allsigs ();
+
+ cfs_waitlink_init(&wait);
+ cfs_write_lock_irqsave(glock, flags);
+
+ while (!kiblnd_data.kib_shutdown) {
+ int do_failover = 0;
+ int long_sleep;
+
+ cfs_list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
+ ibd_fail_list) {
+ if (cfs_time_before(cfs_time_current(),
+ dev->ibd_next_failover))
+ continue;
+ do_failover = 1;
+ break;
+ }
+
+ if (do_failover) {
+ cfs_list_del_init(&dev->ibd_fail_list);
+ dev->ibd_failover = 1;
+ cfs_write_unlock_irqrestore(glock, flags);
+
+ rc = kiblnd_dev_failover(dev);
+
+ cfs_write_lock_irqsave(glock, flags);
+
+ LASSERT (dev->ibd_failover);
+ dev->ibd_failover = 0;
+ if (rc >= 0) { /* Device is OK or failover succeed */
+ dev->ibd_next_failover = cfs_time_shift(3);
+ continue;
+ }
+
+ /* failed to failover, retry later */
+ dev->ibd_next_failover =
+ cfs_time_shift(min(dev->ibd_failed_failover, 10));
+ if (kiblnd_dev_can_failover(dev)) {
+ cfs_list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ }
+
+ continue;
+ }
+
+ /* long sleep if no more pending failover */
+ long_sleep = cfs_list_empty(&kiblnd_data.kib_failed_devs);
+
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ cfs_waitq_add(&kiblnd_data.kib_failover_waitq, &wait);
+ cfs_write_unlock_irqrestore(glock, flags);
+
+ rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
+ cfs_time_seconds(1));
+ cfs_set_current_state(CFS_TASK_RUNNING);
+ cfs_waitq_del(&kiblnd_data.kib_failover_waitq, &wait);
+ cfs_write_lock_irqsave(glock, flags);
+
+ if (!long_sleep || rc != 0)
+ continue;
+
+ /* have a long sleep, routine check all active devices,
+ * we need checking like this because if there is not active
+ * connection on the dev and no SEND from local, we may listen
+ * on wrong HCA for ever while there is a bonding failover */
+ cfs_list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
+ if (kiblnd_dev_can_failover(dev)) {
+ cfs_list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ }
+ }
+ }
+
+ cfs_write_unlock_irqrestore(glock, flags);
+
+ kiblnd_thread_fini();
+ return 0;
+}