Whamcloud - gitweb
b=20153 add IB bonding failover support to o2iblnd
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd_cb.c
index 167005f..577db3e 100644 (file)
@@ -154,7 +154,7 @@ kiblnd_post_rx (kib_rx_t *rx, int credit)
                  credit == IBLND_POSTRX_PEER_CREDIT ||
                  credit == IBLND_POSTRX_RSRVD_CREDIT);
 
-        mr = kiblnd_find_dma_mr(net, rx->rx_msgaddr, IBLND_MSG_SIZE);
+        mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE);
         LASSERT (mr != NULL);
 
         rx->rx_sge.lkey   = mr->lkey;
@@ -538,8 +538,8 @@ kiblnd_kvaddr_to_page (unsigned long vaddr)
 static int
 kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 {
-        kib_dev_t          *ibdev  = net->ibn_dev;
-        __u64              *pages  = tx->tx_pages;
+        kib_hca_dev_t      *hdev  = tx->tx_pool->tpo_hdev;
+        __u64              *pages = tx->tx_pages;
         int                 npages;
         int                 size;
         int                 rc;
@@ -547,9 +547,9 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 
         for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
                 for (size = 0; size <  rd->rd_frags[i].rf_nob;
-                               size += ibdev->ibd_page_size) {
+                               size += hdev->ibh_page_size) {
                         pages[npages ++] = (rd->rd_frags[i].rf_addr &
-                                            ibdev->ibd_page_mask) + size;
+                                            hdev->ibh_page_mask) + size;
                 }
         }
 
@@ -563,7 +563,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
          * the rkey */
         rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey :
                                          tx->tx_u.fmr.fmr_pfmr->fmr->lkey;
-        rd->rd_frags[0].rf_addr &= ~ibdev->ibd_page_mask;
+        rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
         rd->rd_frags[0].rf_nob   = nob;
         rd->rd_nfrags = 1;
 
@@ -573,12 +573,13 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 static int
 kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 {
+        kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
         __u64   iova;
         int     rc;
 
-        iova = rd->rd_frags[0].rf_addr & ~net->ibn_dev->ibd_page_mask;
+        iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask;
 
-        rc = kiblnd_pmr_pool_map(&net->ibn_pmr_ps, rd, &iova, &tx->tx_u.pmr);
+        rc = kiblnd_pmr_pool_map(&net->ibn_pmr_ps, hdev, rd, &iova, &tx->tx_u.pmr);
         if (rc != 0) {
                 CERROR("Failed to create MR by phybuf: %d\n", rc);
                 return rc;
@@ -611,7 +612,7 @@ kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
         }
 
         if (tx->tx_nfrags != 0) {
-                kiblnd_dma_unmap_sg(net->ibn_dev->ibd_cmid->device,
+                kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
                                     tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
                 tx->tx_nfrags = 0;
         }
@@ -621,6 +622,7 @@ int
 kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
               kib_rdma_desc_t *rd, int nfrags)
 {
+        kib_hca_dev_t      *hdev  = tx->tx_pool->tpo_hdev;
         kib_net_t          *net   = ni->ni_data;
         struct ib_mr       *mr    = NULL;
         __u32               nob;
@@ -632,19 +634,19 @@ kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
         tx->tx_nfrags = nfrags;
 
         rd->rd_nfrags =
-                kiblnd_dma_map_sg(net->ibn_dev->ibd_cmid->device,
+                kiblnd_dma_map_sg(hdev->ibh_ibdev,
                                   tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
 
         for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
                 rd->rd_frags[i].rf_nob  = kiblnd_sg_dma_len(
-                        net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
+                        hdev->ibh_ibdev, &tx->tx_frags[i]);
                 rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
-                        net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
+                        hdev->ibh_ibdev, &tx->tx_frags[i]);
                 nob += rd->rd_frags[i].rf_nob;
         }
 
         /* looking for pre-mapping MR */
-        mr = kiblnd_find_rd_dma_mr(net, rd);
+        mr = kiblnd_find_rd_dma_mr(hdev, rd);
         if (mr != NULL) {
                 /* found pre-mapping MR */
                 rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
@@ -835,11 +837,17 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
         cfs_list_add(&tx->tx_list, &conn->ibc_active_txs);
 
         /* I'm still holding ibc_lock! */
-        if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
+        if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
                 rc = -ECONNABORTED;
-        else
+        } else if (tx->tx_pool->tpo_pool.po_failed ||
+                 conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
+                /* close_conn will launch failover */
+                rc = -ENETDOWN;
+        } else {
                 rc = ib_post_send(conn->ibc_cmid->qp,
                                   tx->tx_wrq, &bad_wrq);
+        }
+
         conn->ibc_last_send = jiffies;
 
         if (rc == 0)
@@ -1005,20 +1013,19 @@ kiblnd_tx_complete (kib_tx_t *tx, int status)
 void
 kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
 {
-        kib_net_t         *net = ni->ni_data;
+        kib_hca_dev_t     *hdev = tx->tx_pool->tpo_hdev;
         struct ib_sge     *sge = &tx->tx_sge[tx->tx_nwrq];
         struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
         int                nob = offsetof (kib_msg_t, ibm_u) + body_nob;
         struct ib_mr      *mr;
 
-        LASSERT (net != NULL);
         LASSERT (tx->tx_nwrq >= 0);
         LASSERT (tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
         LASSERT (nob <= IBLND_MSG_SIZE);
 
         kiblnd_init_msg(tx->tx_msg, type, body_nob);
 
-        mr = kiblnd_find_dma_mr(net, tx->tx_msgaddr, nob);
+        mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob);
         LASSERT (mr != NULL);
 
         sge->lkey   = mr->lkey;
@@ -1768,8 +1775,9 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
          * connection to be finished off by the connd.  Otherwise the connd is
          * already dealing with it (either to set it up or tear it down).
          * Caller holds kib_global_lock exclusively in irq context */
-        unsigned long     flags;
         kib_peer_t       *peer = conn->ibc_peer;
+        kib_dev_t        *dev;
+        unsigned long     flags;
 
         LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
@@ -1795,6 +1803,7 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
                        cfs_list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
         }
 
+        dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev;
         cfs_list_del(&conn->ibc_list);
         /* connd (see below) takes over ibc_list's ref */
 
@@ -1808,6 +1817,13 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
 
         kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
 
+        if (error != 0 &&
+            kiblnd_dev_can_failover(dev)) {
+                cfs_list_add_tail(&dev->ibd_fail_list,
+                              &kiblnd_data.kib_failed_devs);
+                cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
+        }
+
         cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
 
         cfs_list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns);
@@ -3107,7 +3123,7 @@ kiblnd_qp_event(struct ib_event *event, void *arg)
                 CDEBUG(D_NET, "%s established\n",
                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
                 return;
-                
+
         default:
                 CERROR("%s: Async QP event type %d\n",
                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
@@ -3311,3 +3327,94 @@ kiblnd_scheduler(void *arg)
         kiblnd_thread_fini();
         return (0);
 }
+
+int
+kiblnd_failover_thread(void *arg)
+{
+        cfs_rwlock_t      *glock = &kiblnd_data.kib_global_lock;
+        kib_dev_t         *dev;
+        cfs_waitlink_t     wait;
+        unsigned long      flags;
+        int                rc;
+
+        LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
+
+        cfs_daemonize ("kiblnd_failover");
+        cfs_block_allsigs ();
+
+        cfs_waitlink_init(&wait);
+        cfs_write_lock_irqsave(glock, flags);
+
+        while (!kiblnd_data.kib_shutdown) {
+                int     do_failover = 0;
+                int     long_sleep;
+
+                cfs_list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
+                                    ibd_fail_list) {
+                        if (cfs_time_before(cfs_time_current(),
+                                            dev->ibd_next_failover))
+                                continue;
+                        do_failover = 1;
+                        break;
+                }
+
+                if (do_failover) {
+                        cfs_list_del_init(&dev->ibd_fail_list);
+                        dev->ibd_failover = 1;
+                        cfs_write_unlock_irqrestore(glock, flags);
+
+                        rc = kiblnd_dev_failover(dev);
+
+                        cfs_write_lock_irqsave(glock, flags);
+
+                        LASSERT (dev->ibd_failover);
+                        dev->ibd_failover = 0;
+                        if (rc >= 0) { /* Device is OK or failover succeed */
+                                dev->ibd_next_failover = cfs_time_shift(3);
+                                continue;
+                        }
+
+                        /* failed to failover, retry later */
+                        dev->ibd_next_failover =
+                                cfs_time_shift(min(dev->ibd_failed_failover, 10));
+                        if (kiblnd_dev_can_failover(dev)) {
+                                cfs_list_add_tail(&dev->ibd_fail_list,
+                                              &kiblnd_data.kib_failed_devs);
+                        }
+
+                        continue;
+                }
+
+                /* long sleep if no more pending failover */
+                long_sleep = cfs_list_empty(&kiblnd_data.kib_failed_devs);
+
+                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+                cfs_waitq_add(&kiblnd_data.kib_failover_waitq, &wait);
+                cfs_write_unlock_irqrestore(glock, flags);
+
+                rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
+                                                   cfs_time_seconds(1));
+                cfs_set_current_state(CFS_TASK_RUNNING);
+                cfs_waitq_del(&kiblnd_data.kib_failover_waitq, &wait);
+                cfs_write_lock_irqsave(glock, flags);
+
+                if (!long_sleep || rc != 0)
+                        continue;
+
+                /* have a long sleep, routine check all active devices,
+                 * we need checking like this because if there is not active
+                 * connection on the dev and no SEND from local, we may listen
+                 * on wrong HCA for ever while there is a bonding failover */
+                cfs_list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
+                        if (kiblnd_dev_can_failover(dev)) {
+                                cfs_list_add_tail(&dev->ibd_fail_list,
+                                              &kiblnd_data.kib_failed_devs);
+                        }
+                }
+        }
+
+        cfs_write_unlock_irqrestore(glock, flags);
+
+        kiblnd_thread_fini();
+        return 0;
+}