credit == IBLND_POSTRX_PEER_CREDIT ||
credit == IBLND_POSTRX_RSRVD_CREDIT);
- mr = kiblnd_find_dma_mr(net, rx->rx_msgaddr, IBLND_MSG_SIZE);
+ mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE);
LASSERT (mr != NULL);
rx->rx_sge.lkey = mr->lkey;
conn->ibc_credits += credits;
+ /* This ensures the credit taken by NOOP can be returned */
+ if (msg->ibm_type == IBLND_MSG_NOOP &&
+ !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
+ conn->ibc_outstanding_credits++;
+
cfs_spin_unlock(&conn->ibc_lock);
kiblnd_check_sends(conn);
}
break;
case IBLND_MSG_NOOP:
- if (IBLND_OOB_CAPABLE(conn->ibc_version))
+ if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
post_credit = IBLND_POSTRX_NO_CREDIT;
- else
+ break;
+ }
+
+ if (credits != 0) /* credit already posted */
+ post_credit = IBLND_POSTRX_NO_CREDIT;
+ else /* a keepalive NOOP */
post_credit = IBLND_POSTRX_PEER_CREDIT;
break;
static int
kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
{
- kib_dev_t *ibdev = net->ibn_dev;
- __u64 *pages = tx->tx_pages;
+ kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
+ __u64 *pages = tx->tx_pages;
int npages;
int size;
int rc;
for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
for (size = 0; size < rd->rd_frags[i].rf_nob;
- size += ibdev->ibd_page_size) {
+ size += hdev->ibh_page_size) {
pages[npages ++] = (rd->rd_frags[i].rf_addr &
- ibdev->ibd_page_mask) + size;
+ hdev->ibh_page_mask) + size;
}
}
* the rkey */
rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey :
tx->tx_u.fmr.fmr_pfmr->fmr->lkey;
- rd->rd_frags[0].rf_addr &= ~ibdev->ibd_page_mask;
+ rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
rd->rd_frags[0].rf_nob = nob;
rd->rd_nfrags = 1;
static int
kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
{
+ kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
__u64 iova;
int rc;
- iova = rd->rd_frags[0].rf_addr & ~net->ibn_dev->ibd_page_mask;
+ iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask;
- rc = kiblnd_pmr_pool_map(&net->ibn_pmr_ps, rd, &iova, &tx->tx_u.pmr);
+ rc = kiblnd_pmr_pool_map(&net->ibn_pmr_ps, hdev, rd, &iova, &tx->tx_u.pmr);
if (rc != 0) {
CERROR("Failed to create MR by phybuf: %d\n", rc);
return rc;
}
if (tx->tx_nfrags != 0) {
- kiblnd_dma_unmap_sg(net->ibn_dev->ibd_cmid->device,
+ kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
tx->tx_nfrags = 0;
}
kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
kib_rdma_desc_t *rd, int nfrags)
{
+ kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
kib_net_t *net = ni->ni_data;
struct ib_mr *mr = NULL;
__u32 nob;
tx->tx_nfrags = nfrags;
rd->rd_nfrags =
- kiblnd_dma_map_sg(net->ibn_dev->ibd_cmid->device,
+ kiblnd_dma_map_sg(hdev->ibh_ibdev,
tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len(
- net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
+ hdev->ibh_ibdev, &tx->tx_frags[i]);
rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
- net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
+ hdev->ibh_ibdev, &tx->tx_frags[i]);
nob += rd->rd_frags[i].rf_nob;
}
/* looking for pre-mapping MR */
- mr = kiblnd_find_rd_dma_mr(net, rd);
+ mr = kiblnd_find_rd_dma_mr(hdev, rd);
if (mr != NULL) {
/* found pre-mapping MR */
rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
}
if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
- conn->ibc_credits == 1 && /* last credit reserved for */
- conn->ibc_outstanding_credits == 0) { /* giving back credits */
+ conn->ibc_credits == 1 && /* last credit reserved */
+ msg->ibm_type != IBLND_MSG_NOOP) { /* for NOOP */
CDEBUG(D_NET, "%s: not using last credit\n",
libcfs_nid2str(peer->ibp_nid));
return -EAGAIN;
cfs_list_add(&tx->tx_list, &conn->ibc_active_txs);
/* I'm still holding ibc_lock! */
- if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
+ if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
rc = -ECONNABORTED;
- else
+ } else if (tx->tx_pool->tpo_pool.po_failed ||
+ conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
+ /* close_conn will launch failover */
+ rc = -ENETDOWN;
+ } else {
rc = ib_post_send(conn->ibc_cmid->qp,
tx->tx_wrq, &bad_wrq);
+ }
+
conn->ibc_last_send = jiffies;
if (rc == 0)
credit = 0;
tx = cfs_list_entry(conn->ibc_tx_queue_nocred.next,
kib_tx_t, tx_list);
+ } else if (!cfs_list_empty(&conn->ibc_tx_noops)) {
+ LASSERT (!IBLND_OOB_CAPABLE(ver));
+ credit = 1;
+ tx = cfs_list_entry(conn->ibc_tx_noops.next,
+ kib_tx_t, tx_list);
} else if (!cfs_list_empty(&conn->ibc_tx_queue)) {
credit = 1;
tx = cfs_list_entry(conn->ibc_tx_queue.next,
void
kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
{
- kib_net_t *net = ni->ni_data;
+ kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
int nob = offsetof (kib_msg_t, ibm_u) + body_nob;
struct ib_mr *mr;
- LASSERT (net != NULL);
LASSERT (tx->tx_nwrq >= 0);
LASSERT (tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
LASSERT (nob <= IBLND_MSG_SIZE);
kiblnd_init_msg(tx->tx_msg, type, body_nob);
- mr = kiblnd_find_dma_mr(net, tx->tx_msgaddr, nob);
+ mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob);
LASSERT (mr != NULL);
sge->lkey = mr->lkey;
if (IBLND_OOB_CAPABLE(conn->ibc_version))
q = &conn->ibc_tx_queue_nocred;
else
- q = &conn->ibc_tx_queue;
+ q = &conn->ibc_tx_noops;
break;
case IBLND_MSG_IMMEDIATE:
* connection to be finished off by the connd. Otherwise the connd is
* already dealing with it (either to set it up or tear it down).
* Caller holds kib_global_lock exclusively in irq context */
- unsigned long flags;
kib_peer_t *peer = conn->ibc_peer;
+ kib_dev_t *dev;
+ unsigned long flags;
LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
return; /* already being handled */
if (error == 0 &&
+ cfs_list_empty(&conn->ibc_tx_noops) &&
cfs_list_empty(&conn->ibc_tx_queue) &&
cfs_list_empty(&conn->ibc_tx_queue_rsrvd) &&
cfs_list_empty(&conn->ibc_tx_queue_nocred) &&
CDEBUG(D_NET, "closing conn to %s\n",
libcfs_nid2str(peer->ibp_nid));
} else {
- CNETERR("Closing conn to %s: error %d%s%s%s%s\n",
+ CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
libcfs_nid2str(peer->ibp_nid), error,
cfs_list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+ cfs_list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
cfs_list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
cfs_list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
cfs_list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
}
+ dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev;
cfs_list_del(&conn->ibc_list);
/* connd (see below) takes over ibc_list's ref */
kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
+ if (error != 0 &&
+ kiblnd_dev_can_failover(dev)) {
+ cfs_list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
+ }
+
cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
cfs_list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns);
/* Complete all tx descs not waiting for sends to complete.
* NB we should be safe from RDMA now that the QP has changed state */
+ kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
kiblnd_conn_timed_out (kib_conn_t *conn)
{
return kiblnd_check_txs(conn, &conn->ibc_tx_queue) ||
+ kiblnd_check_txs(conn, &conn->ibc_tx_noops) ||
kiblnd_check_txs(conn, &conn->ibc_tx_queue_rsrvd) ||
kiblnd_check_txs(conn, &conn->ibc_tx_queue_nocred) ||
kiblnd_check_txs(conn, &conn->ibc_active_txs);
CDEBUG(D_NET, "%s established\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid));
return;
-
+
default:
CERROR("%s: Async QP event type %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
kiblnd_thread_fini();
return (0);
}
+
+int
+kiblnd_failover_thread(void *arg)
+{
+ cfs_rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ kib_dev_t *dev;
+ cfs_waitlink_t wait;
+ unsigned long flags;
+ int rc;
+
+ LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
+
+ cfs_daemonize ("kiblnd_failover");
+ cfs_block_allsigs ();
+
+ cfs_waitlink_init(&wait);
+ cfs_write_lock_irqsave(glock, flags);
+
+ while (!kiblnd_data.kib_shutdown) {
+ int do_failover = 0;
+ int long_sleep;
+
+ cfs_list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
+ ibd_fail_list) {
+ if (cfs_time_before(cfs_time_current(),
+ dev->ibd_next_failover))
+ continue;
+ do_failover = 1;
+ break;
+ }
+
+ if (do_failover) {
+ cfs_list_del_init(&dev->ibd_fail_list);
+ dev->ibd_failover = 1;
+ cfs_write_unlock_irqrestore(glock, flags);
+
+ rc = kiblnd_dev_failover(dev);
+
+ cfs_write_lock_irqsave(glock, flags);
+
+ LASSERT (dev->ibd_failover);
+ dev->ibd_failover = 0;
+ if (rc >= 0) { /* Device is OK or failover succeed */
+ dev->ibd_next_failover = cfs_time_shift(3);
+ continue;
+ }
+
+ /* failed to failover, retry later */
+ dev->ibd_next_failover =
+ cfs_time_shift(min(dev->ibd_failed_failover, 10));
+ if (kiblnd_dev_can_failover(dev)) {
+ cfs_list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ }
+
+ continue;
+ }
+
+ /* long sleep if no more pending failover */
+ long_sleep = cfs_list_empty(&kiblnd_data.kib_failed_devs);
+
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ cfs_waitq_add(&kiblnd_data.kib_failover_waitq, &wait);
+ cfs_write_unlock_irqrestore(glock, flags);
+
+ rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
+ cfs_time_seconds(1));
+ cfs_set_current_state(CFS_TASK_RUNNING);
+ cfs_waitq_del(&kiblnd_data.kib_failover_waitq, &wait);
+ cfs_write_lock_irqsave(glock, flags);
+
+ if (!long_sleep || rc != 0)
+ continue;
+
+ /* have a long sleep, routine check all active devices,
+ * we need checking like this because if there is not active
+ * connection on the dev and no SEND from local, we may listen
+ * on wrong HCA for ever while there is a bonding failover */
+ cfs_list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
+ if (kiblnd_dev_can_failover(dev)) {
+ cfs_list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ }
+ }
+ }
+
+ cfs_write_unlock_irqrestore(glock, flags);
+
+ kiblnd_thread_fini();
+ return 0;
+}