Whamcloud - gitweb
LU-78 o2iblnd: kiblnd_check_conns can deadlock
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd_cb.c
index 167005f..b95a04f 100644 (file)
@@ -154,7 +154,7 @@ kiblnd_post_rx (kib_rx_t *rx, int credit)
                  credit == IBLND_POSTRX_PEER_CREDIT ||
                  credit == IBLND_POSTRX_RSRVD_CREDIT);
 
-        mr = kiblnd_find_dma_mr(net, rx->rx_msgaddr, IBLND_MSG_SIZE);
+        mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE);
         LASSERT (mr != NULL);
 
         rx->rx_sge.lkey   = mr->lkey;
@@ -328,6 +328,11 @@ kiblnd_handle_rx (kib_rx_t *rx)
 
                 conn->ibc_credits += credits;
 
+                /* This ensures the credit taken by NOOP can be returned */
+                if (msg->ibm_type == IBLND_MSG_NOOP &&
+                    !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
+                        conn->ibc_outstanding_credits++;
+
                 cfs_spin_unlock(&conn->ibc_lock);
                 kiblnd_check_sends(conn);
         }
@@ -341,9 +346,14 @@ kiblnd_handle_rx (kib_rx_t *rx)
                 break;
 
         case IBLND_MSG_NOOP:
-                if (IBLND_OOB_CAPABLE(conn->ibc_version))
+                if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
                         post_credit = IBLND_POSTRX_NO_CREDIT;
-                else
+                        break;
+                }
+
+                if (credits != 0) /* credit already posted */
+                        post_credit = IBLND_POSTRX_NO_CREDIT;
+                else              /* a keepalive NOOP */
                         post_credit = IBLND_POSTRX_PEER_CREDIT;
                 break;
 
@@ -538,8 +548,8 @@ kiblnd_kvaddr_to_page (unsigned long vaddr)
 static int
 kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 {
-        kib_dev_t          *ibdev  = net->ibn_dev;
-        __u64              *pages  = tx->tx_pages;
+        kib_hca_dev_t      *hdev  = tx->tx_pool->tpo_hdev;
+        __u64              *pages = tx->tx_pages;
         int                 npages;
         int                 size;
         int                 rc;
@@ -547,9 +557,9 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 
         for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
                 for (size = 0; size <  rd->rd_frags[i].rf_nob;
-                               size += ibdev->ibd_page_size) {
+                               size += hdev->ibh_page_size) {
                         pages[npages ++] = (rd->rd_frags[i].rf_addr &
-                                            ibdev->ibd_page_mask) + size;
+                                            hdev->ibh_page_mask) + size;
                 }
         }
 
@@ -563,7 +573,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
          * the rkey */
         rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey :
                                          tx->tx_u.fmr.fmr_pfmr->fmr->lkey;
-        rd->rd_frags[0].rf_addr &= ~ibdev->ibd_page_mask;
+        rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
         rd->rd_frags[0].rf_nob   = nob;
         rd->rd_nfrags = 1;
 
@@ -573,12 +583,13 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 static int
 kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 {
+        kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
         __u64   iova;
         int     rc;
 
-        iova = rd->rd_frags[0].rf_addr & ~net->ibn_dev->ibd_page_mask;
+        iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask;
 
-        rc = kiblnd_pmr_pool_map(&net->ibn_pmr_ps, rd, &iova, &tx->tx_u.pmr);
+        rc = kiblnd_pmr_pool_map(&net->ibn_pmr_ps, hdev, rd, &iova, &tx->tx_u.pmr);
         if (rc != 0) {
                 CERROR("Failed to create MR by phybuf: %d\n", rc);
                 return rc;
@@ -611,7 +622,7 @@ kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
         }
 
         if (tx->tx_nfrags != 0) {
-                kiblnd_dma_unmap_sg(net->ibn_dev->ibd_cmid->device,
+                kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
                                     tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
                 tx->tx_nfrags = 0;
         }
@@ -621,6 +632,7 @@ int
 kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
               kib_rdma_desc_t *rd, int nfrags)
 {
+        kib_hca_dev_t      *hdev  = tx->tx_pool->tpo_hdev;
         kib_net_t          *net   = ni->ni_data;
         struct ib_mr       *mr    = NULL;
         __u32               nob;
@@ -632,19 +644,19 @@ kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
         tx->tx_nfrags = nfrags;
 
         rd->rd_nfrags =
-                kiblnd_dma_map_sg(net->ibn_dev->ibd_cmid->device,
+                kiblnd_dma_map_sg(hdev->ibh_ibdev,
                                   tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
 
         for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
                 rd->rd_frags[i].rf_nob  = kiblnd_sg_dma_len(
-                        net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
+                        hdev->ibh_ibdev, &tx->tx_frags[i]);
                 rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
-                        net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
+                        hdev->ibh_ibdev, &tx->tx_frags[i]);
                 nob += rd->rd_frags[i].rf_nob;
         }
 
         /* looking for pre-mapping MR */
-        mr = kiblnd_find_rd_dma_mr(net, rd);
+        mr = kiblnd_find_rd_dma_mr(hdev, rd);
         if (mr != NULL) {
                 /* found pre-mapping MR */
                 rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
@@ -789,8 +801,8 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
         }
 
         if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
-            conn->ibc_credits == 1 &&   /* last credit reserved for */
-            conn->ibc_outstanding_credits == 0) { /* giving back credits */
+            conn->ibc_credits == 1 &&   /* last credit reserved */
+            msg->ibm_type != IBLND_MSG_NOOP) {      /* for NOOP */
                 CDEBUG(D_NET, "%s: not using last credit\n",
                        libcfs_nid2str(peer->ibp_nid));
                 return -EAGAIN;
@@ -801,7 +813,7 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
         tx->tx_queued = 0;
 
         if (msg->ibm_type == IBLND_MSG_NOOP &&
-            (!kiblnd_send_noop(conn) ||     /* redundant NOOP */
+            (!kiblnd_need_noop(conn) ||     /* redundant NOOP */
              (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
               conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
                 /* OK to drop when posted enough NOOPs, since
@@ -835,11 +847,17 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
         cfs_list_add(&tx->tx_list, &conn->ibc_active_txs);
 
         /* I'm still holding ibc_lock! */
-        if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
+        if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
                 rc = -ECONNABORTED;
-        else
+        } else if (tx->tx_pool->tpo_pool.po_failed ||
+                 conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
+                /* close_conn will launch failover */
+                rc = -ENETDOWN;
+        } else {
                 rc = ib_post_send(conn->ibc_cmid->qp,
                                   tx->tx_wrq, &bad_wrq);
+        }
+
         conn->ibc_last_send = jiffies;
 
         if (rc == 0)
@@ -910,7 +928,7 @@ kiblnd_check_sends (kib_conn_t *conn)
                 conn->ibc_reserved_credits--;
         }
 
-        if (kiblnd_send_noop(conn)) {
+        if (kiblnd_need_noop(conn)) {
                 cfs_spin_unlock(&conn->ibc_lock);
 
                 tx = kiblnd_get_idle_tx(ni);
@@ -931,6 +949,11 @@ kiblnd_check_sends (kib_conn_t *conn)
                         credit = 0;
                         tx = cfs_list_entry(conn->ibc_tx_queue_nocred.next,
                                             kib_tx_t, tx_list);
+                } else if (!cfs_list_empty(&conn->ibc_tx_noops)) {
+                        LASSERT (!IBLND_OOB_CAPABLE(ver));
+                        credit = 1;
+                        tx = cfs_list_entry(conn->ibc_tx_noops.next,
+                                        kib_tx_t, tx_list);
                 } else if (!cfs_list_empty(&conn->ibc_tx_queue)) {
                         credit = 1;
                         tx = cfs_list_entry(conn->ibc_tx_queue.next,
@@ -1005,20 +1028,19 @@ kiblnd_tx_complete (kib_tx_t *tx, int status)
 void
 kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
 {
-        kib_net_t         *net = ni->ni_data;
+        kib_hca_dev_t     *hdev = tx->tx_pool->tpo_hdev;
         struct ib_sge     *sge = &tx->tx_sge[tx->tx_nwrq];
         struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
         int                nob = offsetof (kib_msg_t, ibm_u) + body_nob;
         struct ib_mr      *mr;
 
-        LASSERT (net != NULL);
         LASSERT (tx->tx_nwrq >= 0);
         LASSERT (tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
         LASSERT (nob <= IBLND_MSG_SIZE);
 
         kiblnd_init_msg(tx->tx_msg, type, body_nob);
 
-        mr = kiblnd_find_dma_mr(net, tx->tx_msgaddr, nob);
+        mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob);
         LASSERT (mr != NULL);
 
         sge->lkey   = mr->lkey;
@@ -1164,7 +1186,7 @@ kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
                 if (IBLND_OOB_CAPABLE(conn->ibc_version))
                         q = &conn->ibc_tx_queue_nocred;
                 else
-                        q = &conn->ibc_tx_queue;
+                        q = &conn->ibc_tx_noops;
                 break;
 
         case IBLND_MSG_IMMEDIATE:
@@ -1185,6 +1207,48 @@ kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn)
         kiblnd_check_sends(conn);
 }
 
+static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
+                               struct sockaddr_in *srcaddr,
+                               struct sockaddr_in *dstaddr,
+                               int timeout_ms)
+{
+        unsigned short port;
+        int rc;
+
+#ifdef HAVE_OFED_RDMA_SET_REUSEADDR
+        /* allow the port to be reused */
+        rc = rdma_set_reuseaddr(cmid, 1);
+        if (rc != 0) {
+                CERROR("Unable to set reuse on cmid: %d\n", rc);
+                return rc;
+        }
+#endif
+
+        /* look for a free privileged port */
+        for (port = PROT_SOCK-1; port > 0; port--) {
+                srcaddr->sin_port = htons(port);
+                rc = rdma_resolve_addr(cmid,
+                                       (struct sockaddr *)srcaddr,
+                                       (struct sockaddr *)dstaddr,
+                                       timeout_ms);
+                if (rc == 0) {
+                        CDEBUG(D_NET, "bound to port %hu\n", port);
+                        return 0;
+                } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) {
+                        CDEBUG(D_NET, "bind to port %hu failed: %d\n",
+                               port, rc);
+                } else {
+                        return rc;
+                }
+        }
+
+        CERROR("Failed to bind to a free privileged port\n");
+#ifndef HAVE_OFED_RDMA_SET_REUSEADDR
+        CERROR("You may need IB verbs that supports rdma_set_reuseaddr()\n");
+#endif
+        return rc;
+}
+
 void
 kiblnd_connect_peer (kib_peer_t *peer)
 {
@@ -1198,7 +1262,9 @@ kiblnd_connect_peer (kib_peer_t *peer)
         LASSERT (net != NULL);
         LASSERT (peer->ibp_connecting > 0);
 
-        cmid = rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP);
+        cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP,
+                                     IB_QPT_RC);
+
         if (IS_ERR(cmid)) {
                 CERROR("Can't create CMID for %s: %ld\n",
                        libcfs_nid2str(peer->ibp_nid), PTR_ERR(cmid));
@@ -1218,22 +1284,30 @@ kiblnd_connect_peer (kib_peer_t *peer)
 
         kiblnd_peer_addref(peer);               /* cmid's ref */
 
-        rc = rdma_resolve_addr(cmid,
-                               (struct sockaddr *)&srcaddr,
-                               (struct sockaddr *)&dstaddr,
-                               *kiblnd_tunables.kib_timeout * 1000);
-        if (rc == 0) {
-                LASSERT (cmid->device != NULL);
-                CDEBUG(D_NET, "%s: connection bound to %s:%u.%u.%u.%u:%s\n",
-                       libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
-                       HIPQUAD(dev->ibd_ifip), cmid->device->name);
-                return;
+        if (*kiblnd_tunables.kib_use_priv_port) {
+                rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
+                                         *kiblnd_tunables.kib_timeout * 1000);
+        } else {
+                rc = rdma_resolve_addr(cmid,
+                                       (struct sockaddr *)&srcaddr,
+                                       (struct sockaddr *)&dstaddr,
+                                       *kiblnd_tunables.kib_timeout * 1000);
+        }
+        if (rc != 0) {
+                /* Can't initiate address resolution:  */
+                CERROR("Can't resolve addr for %s: %d\n",
+                       libcfs_nid2str(peer->ibp_nid), rc);
+                goto failed2;
         }
 
-        /* Can't initiate address resolution:  */
-        CERROR("Can't resolve addr for %s: %d\n",
-               libcfs_nid2str(peer->ibp_nid), rc);
+        LASSERT (cmid->device != NULL);
+        CDEBUG(D_NET, "%s: connection bound to %s:%u.%u.%u.%u:%s\n",
+               libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
+               HIPQUAD(dev->ibd_ifip), cmid->device->name);
 
+        return;
+
+ failed2:
         kiblnd_peer_decref(peer);               /* cmid's ref */
         rdma_destroy_id(cmid);
  failed:
@@ -1710,7 +1784,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 int
 kiblnd_thread_start (int (*fn)(void *arg), void *arg)
 {
-        long    pid = cfs_kernel_thread (fn, arg, 0);
+        long    pid = cfs_create_thread (fn, arg, 0);
 
         if (pid < 0)
                 return ((int)pid);
@@ -1768,8 +1842,9 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
          * connection to be finished off by the connd.  Otherwise the connd is
          * already dealing with it (either to set it up or tear it down).
          * Caller holds kib_global_lock exclusively in irq context */
-        unsigned long     flags;
         kib_peer_t       *peer = conn->ibc_peer;
+        kib_dev_t        *dev;
+        unsigned long     flags;
 
         LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
@@ -1780,6 +1855,7 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
                 return; /* already being handled  */
 
         if (error == 0 &&
+            cfs_list_empty(&conn->ibc_tx_noops) &&
             cfs_list_empty(&conn->ibc_tx_queue) &&
             cfs_list_empty(&conn->ibc_tx_queue_rsrvd) &&
             cfs_list_empty(&conn->ibc_tx_queue_nocred) &&
@@ -1787,14 +1863,16 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
                 CDEBUG(D_NET, "closing conn to %s\n", 
                        libcfs_nid2str(peer->ibp_nid));
         } else {
-                CNETERR("Closing conn to %s: error %d%s%s%s%s\n",
+                CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
                        libcfs_nid2str(peer->ibp_nid), error,
                        cfs_list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+                       cfs_list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
                        cfs_list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
                        cfs_list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
                        cfs_list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
         }
 
+        dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev;
         cfs_list_del(&conn->ibc_list);
         /* connd (see below) takes over ibc_list's ref */
 
@@ -1808,6 +1886,13 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
 
         kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
 
+        if (error != 0 &&
+            kiblnd_dev_can_failover(dev)) {
+                cfs_list_add_tail(&dev->ibd_fail_list,
+                              &kiblnd_data.kib_failed_devs);
+                cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
+        }
+
         cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
 
         cfs_list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns);
@@ -1905,6 +1990,7 @@ kiblnd_finalise_conn (kib_conn_t *conn)
         /* Complete all tx descs not waiting for sends to complete.
          * NB we should be safe from RDMA now that the QP has changed state */
 
+        kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
         kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
@@ -2092,7 +2178,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
         int                    version = IBLND_MSG_VERSION;
         unsigned long          flags;
         int                    rc;
-
+        struct sockaddr_in    *peer_addr;
         LASSERT (!cfs_in_interrupt());
 
         /* cmid inherits 'context' from the corresponding listener id */
@@ -2104,6 +2190,15 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
         rej.ibr_why                  = IBLND_REJECT_FATAL;
         rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
 
+        peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
+        if (*kiblnd_tunables.kib_require_priv_port &&
+            ntohs(peer_addr->sin_port) >= PROT_SOCK) {
+                __u32 ip = ntohl(peer_addr->sin_addr.s_addr);
+                CERROR("Peer's port (%u.%u.%u.%u:%hu) is not privileged\n",
+                       HIPQUAD(ip), ntohs(peer_addr->sin_port));
+                goto failed;
+        }
+
         if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
                 CERROR("Short connection request\n");
                 goto failed;
@@ -2874,14 +2969,11 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
         }
 }
 
-int
-kiblnd_check_txs (kib_conn_t *conn, cfs_list_t *txs)
+static int
+kiblnd_check_txs_locked(kib_conn_t *conn, cfs_list_t *txs)
 {
         kib_tx_t          *tx;
         cfs_list_t        *ttmp;
-        int                timed_out = 0;
-
-        cfs_spin_lock(&conn->ibc_lock);
 
         cfs_list_for_each (ttmp, txs) {
                 tx = cfs_list_entry (ttmp, kib_tx_t, tx_list);
@@ -2894,40 +2986,40 @@ kiblnd_check_txs (kib_conn_t *conn, cfs_list_t *txs)
                 }
 
                 if (cfs_time_aftereq (jiffies, tx->tx_deadline)) {
-                        timed_out = 1;
                         CERROR("Timed out tx: %s, %lu seconds\n",
                                kiblnd_queue2str(conn, txs),
                                cfs_duration_sec(jiffies - tx->tx_deadline));
-                        break;
+                        return 1;
                 }
         }
 
-        cfs_spin_unlock(&conn->ibc_lock);
-        return timed_out;
+        return 0;
 }
 
-int
-kiblnd_conn_timed_out (kib_conn_t *conn)
+static int
+kiblnd_conn_timed_out_locked(kib_conn_t *conn)
 {
-        return  kiblnd_check_txs(conn, &conn->ibc_tx_queue) ||
-                kiblnd_check_txs(conn, &conn->ibc_tx_queue_rsrvd) ||
-                kiblnd_check_txs(conn, &conn->ibc_tx_queue_nocred) ||
-                kiblnd_check_txs(conn, &conn->ibc_active_txs);
+        return  kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
+                kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
+                kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) ||
+                kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) ||
+                kiblnd_check_txs_locked(conn, &conn->ibc_active_txs);
 }
 
 void
 kiblnd_check_conns (int idx)
 {
-        cfs_list_t        *peers = &kiblnd_data.kib_peers[idx];
-        cfs_list_t        *ptmp;
-        kib_peer_t        *peer;
-        kib_conn_t        *conn;
-        cfs_list_t        *ctmp;
-        unsigned long      flags;
+        CFS_LIST_HEAD (closes);
+        CFS_LIST_HEAD (checksends);
+        cfs_list_t    *peers = &kiblnd_data.kib_peers[idx];
+        cfs_list_t    *ptmp;
+        kib_peer_t    *peer;
+        kib_conn_t    *conn;
+        cfs_list_t    *ctmp;
+        unsigned long  flags;
 
- again:
         /* NB. We expect to have a look at all the peers and not find any
-         * rdmas to time out, so we just use a shared lock while we
+         * RDMAs to time out, so we just use a shared lock while we
          * take a look... */
         cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
@@ -2935,41 +3027,66 @@ kiblnd_check_conns (int idx)
                 peer = cfs_list_entry (ptmp, kib_peer_t, ibp_list);
 
                 cfs_list_for_each (ctmp, &peer->ibp_conns) {
-                        conn = cfs_list_entry (ctmp, kib_conn_t, ibc_list);
+                        int timedout;
+                        int sendnoop;
+
+                        conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list);
 
                         LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED);
 
-                        /* In case we have enough credits to return via a
-                         * NOOP, but there were no non-blocking tx descs
-                         * free to do it last time... */
-                        kiblnd_check_sends(conn);
+                        cfs_spin_lock(&conn->ibc_lock);
 
-                        if (!kiblnd_conn_timed_out(conn))
+                        sendnoop = kiblnd_need_noop(conn);
+                        timedout = kiblnd_conn_timed_out_locked(conn);
+                        if (!sendnoop && !timedout) {
+                                cfs_spin_unlock(&conn->ibc_lock);
                                 continue;
+                        }
 
-                        /* Handle timeout by closing the whole connection.  We
-                         * can only be sure RDMA activity has ceased once the
-                         * QP has been modified. */
-
-                        kiblnd_conn_addref(conn); /* 1 ref for me... */
-
-                        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-                                                   flags);
-
-                        CERROR("Timed out RDMA with %s (%lu)\n",
-                               libcfs_nid2str(peer->ibp_nid),
-                               cfs_duration_sec(cfs_time_current() -
-                                                peer->ibp_last_alive));
-
-                        kiblnd_close_conn(conn, -ETIMEDOUT);
-                        kiblnd_conn_decref(conn); /* ...until here */
+                        if (timedout) {
+                                CERROR("Timed out RDMA with %s (%lu): "
+                                       "c: %u, oc: %u, rc: %u\n",
+                                       libcfs_nid2str(peer->ibp_nid),
+                                       cfs_duration_sec(cfs_time_current() -
+                                                        peer->ibp_last_alive),
+                                       conn->ibc_credits,
+                                       conn->ibc_outstanding_credits,
+                                       conn->ibc_reserved_credits);
+                                cfs_list_add(&conn->ibc_connd_list, &closes);
+                        } else {
+                                cfs_list_add(&conn->ibc_connd_list,
+                                             &checksends);
+                        }
+                        /* +ref for 'closes' or 'checksends' */
+                        kiblnd_conn_addref(conn);
 
-                        /* start again now I've dropped the lock */
-                        goto again;
+                        cfs_spin_unlock(&conn->ibc_lock);
                 }
         }
 
         cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+        /* Handle timeout by closing the whole
+         * connection. We can only be sure RDMA activity
+         * has ceased once the QP has been modified. */
+        while (!cfs_list_empty(&closes)) {
+                conn = cfs_list_entry(closes.next,
+                                      kib_conn_t, ibc_connd_list);
+                cfs_list_del(&conn->ibc_connd_list);
+                kiblnd_close_conn(conn, -ETIMEDOUT);
+                kiblnd_conn_decref(conn);
+        }
+
+        /* In case we have enough credits to return via a
+         * NOOP, but there were no non-blocking tx descs
+         * free to do it last time... */
+        while (!cfs_list_empty(&checksends)) {
+                conn = cfs_list_entry(checksends.next,
+                                      kib_conn_t, ibc_connd_list);
+                cfs_list_del(&conn->ibc_connd_list);
+                kiblnd_check_sends(conn);
+                kiblnd_conn_decref(conn);
+        }
 }
 
 void
@@ -3107,7 +3224,7 @@ kiblnd_qp_event(struct ib_event *event, void *arg)
                 CDEBUG(D_NET, "%s established\n",
                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
                 return;
-                
+
         default:
                 CERROR("%s: Async QP event type %d\n",
                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
@@ -3311,3 +3428,94 @@ kiblnd_scheduler(void *arg)
         kiblnd_thread_fini();
         return (0);
 }
+
+int
+kiblnd_failover_thread(void *arg)
+{
+        cfs_rwlock_t      *glock = &kiblnd_data.kib_global_lock;
+        kib_dev_t         *dev;
+        cfs_waitlink_t     wait;
+        unsigned long      flags;
+        int                rc;
+
+        LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
+
+        cfs_daemonize ("kiblnd_failover");
+        cfs_block_allsigs ();
+
+        cfs_waitlink_init(&wait);
+        cfs_write_lock_irqsave(glock, flags);
+
+        while (!kiblnd_data.kib_shutdown) {
+                int     do_failover = 0;
+                int     long_sleep;
+
+                cfs_list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
+                                    ibd_fail_list) {
+                        if (cfs_time_before(cfs_time_current(),
+                                            dev->ibd_next_failover))
+                                continue;
+                        do_failover = 1;
+                        break;
+                }
+
+                if (do_failover) {
+                        cfs_list_del_init(&dev->ibd_fail_list);
+                        dev->ibd_failover = 1;
+                        cfs_write_unlock_irqrestore(glock, flags);
+
+                        rc = kiblnd_dev_failover(dev);
+
+                        cfs_write_lock_irqsave(glock, flags);
+
+                        LASSERT (dev->ibd_failover);
+                        dev->ibd_failover = 0;
+                        if (rc >= 0) { /* Device is OK or failover succeed */
+                                dev->ibd_next_failover = cfs_time_shift(3);
+                                continue;
+                        }
+
+                        /* failed to failover, retry later */
+                        dev->ibd_next_failover =
+                                cfs_time_shift(min(dev->ibd_failed_failover, 10));
+                        if (kiblnd_dev_can_failover(dev)) {
+                                cfs_list_add_tail(&dev->ibd_fail_list,
+                                              &kiblnd_data.kib_failed_devs);
+                        }
+
+                        continue;
+                }
+
+                /* long sleep if no more pending failover */
+                long_sleep = cfs_list_empty(&kiblnd_data.kib_failed_devs);
+
+                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+                cfs_waitq_add(&kiblnd_data.kib_failover_waitq, &wait);
+                cfs_write_unlock_irqrestore(glock, flags);
+
+                rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
+                                                   cfs_time_seconds(1));
+                cfs_set_current_state(CFS_TASK_RUNNING);
+                cfs_waitq_del(&kiblnd_data.kib_failover_waitq, &wait);
+                cfs_write_lock_irqsave(glock, flags);
+
+                if (!long_sleep || rc != 0)
+                        continue;
+
+                /* have a long sleep, routine check all active devices,
+                 * we need checking like this because if there is not active
+                 * connection on the dev and no SEND from local, we may listen
+                 * on wrong HCA for ever while there is a bonding failover */
+                cfs_list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
+                        if (kiblnd_dev_can_failover(dev)) {
+                                cfs_list_add_tail(&dev->ibd_fail_list,
+                                              &kiblnd_data.kib_failed_devs);
+                        }
+                }
+        }
+
+        cfs_write_unlock_irqrestore(glock, flags);
+
+        kiblnd_thread_fini();
+        return 0;
+}