Whamcloud - gitweb
LU-178 prevent replays after recovery on server
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd_cb.c
index f869420..431aa90 100644 (file)
@@ -26,7 +26,7 @@
  * GPL HEADER END
  */
 /*
- * Copyright  2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  */
 /*
@@ -154,7 +154,7 @@ kiblnd_post_rx (kib_rx_t *rx, int credit)
                  credit == IBLND_POSTRX_PEER_CREDIT ||
                  credit == IBLND_POSTRX_RSRVD_CREDIT);
 
-        mr = kiblnd_find_dma_mr(net, rx->rx_msgaddr, IBLND_MSG_SIZE);
+        mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE);
         LASSERT (mr != NULL);
 
         rx->rx_sge.lkey   = mr->lkey;
@@ -328,6 +328,11 @@ kiblnd_handle_rx (kib_rx_t *rx)
 
                 conn->ibc_credits += credits;
 
+                /* This ensures the credit taken by NOOP can be returned */
+                if (msg->ibm_type == IBLND_MSG_NOOP &&
+                    !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
+                        conn->ibc_outstanding_credits++;
+
                 cfs_spin_unlock(&conn->ibc_lock);
                 kiblnd_check_sends(conn);
         }
@@ -341,9 +346,14 @@ kiblnd_handle_rx (kib_rx_t *rx)
                 break;
 
         case IBLND_MSG_NOOP:
-                if (IBLND_OOB_CAPABLE(conn->ibc_version))
+                if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
                         post_credit = IBLND_POSTRX_NO_CREDIT;
-                else
+                        break;
+                }
+
+                if (credits != 0) /* credit already posted */
+                        post_credit = IBLND_POSTRX_NO_CREDIT;
+                else              /* a keepalive NOOP */
                         post_credit = IBLND_POSTRX_PEER_CREDIT;
                 break;
 
@@ -458,8 +468,8 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
                 goto ignore;
 
         if (status != IB_WC_SUCCESS) {
-                CDEBUG(D_NETERROR, "Rx from %s failed: %d\n",
-                       libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
+                CNETERR("Rx from %s failed: %d\n",
+                        libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
                 goto failed;
         }
 
@@ -538,8 +548,8 @@ kiblnd_kvaddr_to_page (unsigned long vaddr)
 static int
 kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 {
-        kib_dev_t          *ibdev  = net->ibn_dev;
-        __u64              *pages  = tx->tx_pages;
+        kib_hca_dev_t      *hdev  = tx->tx_pool->tpo_hdev;
+        __u64              *pages = tx->tx_pages;
         int                 npages;
         int                 size;
         int                 rc;
@@ -547,9 +557,9 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 
         for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
                 for (size = 0; size <  rd->rd_frags[i].rf_nob;
-                               size += ibdev->ibd_page_size) {
+                               size += hdev->ibh_page_size) {
                         pages[npages ++] = (rd->rd_frags[i].rf_addr &
-                                            ibdev->ibd_page_mask) + size;
+                                            hdev->ibh_page_mask) + size;
                 }
         }
 
@@ -563,7 +573,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
          * the rkey */
         rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey :
                                          tx->tx_u.fmr.fmr_pfmr->fmr->lkey;
-        rd->rd_frags[0].rf_addr &= ~ibdev->ibd_page_mask;
+        rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
         rd->rd_frags[0].rf_nob   = nob;
         rd->rd_nfrags = 1;
 
@@ -573,12 +583,13 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 static int
 kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 {
+        kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
         __u64   iova;
         int     rc;
 
-        iova = rd->rd_frags[0].rf_addr & ~net->ibn_dev->ibd_page_mask;
+        iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask;
 
-        rc = kiblnd_pmr_pool_map(&net->ibn_pmr_ps, rd, &iova, &tx->tx_u.pmr);
+        rc = kiblnd_pmr_pool_map(&net->ibn_pmr_ps, hdev, rd, &iova, &tx->tx_u.pmr);
         if (rc != 0) {
                 CERROR("Failed to create MR by phybuf: %d\n", rc);
                 return rc;
@@ -611,7 +622,7 @@ kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
         }
 
         if (tx->tx_nfrags != 0) {
-                kiblnd_dma_unmap_sg(net->ibn_dev->ibd_cmid->device,
+                kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
                                     tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
                 tx->tx_nfrags = 0;
         }
@@ -621,6 +632,7 @@ int
 kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
               kib_rdma_desc_t *rd, int nfrags)
 {
+        kib_hca_dev_t      *hdev  = tx->tx_pool->tpo_hdev;
         kib_net_t          *net   = ni->ni_data;
         struct ib_mr       *mr    = NULL;
         __u32               nob;
@@ -632,19 +644,19 @@ kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
         tx->tx_nfrags = nfrags;
 
         rd->rd_nfrags =
-                kiblnd_dma_map_sg(net->ibn_dev->ibd_cmid->device,
+                kiblnd_dma_map_sg(hdev->ibh_ibdev,
                                   tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
 
         for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
                 rd->rd_frags[i].rf_nob  = kiblnd_sg_dma_len(
-                        net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
+                        hdev->ibh_ibdev, &tx->tx_frags[i]);
                 rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
-                        net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
+                        hdev->ibh_ibdev, &tx->tx_frags[i]);
                 nob += rd->rd_frags[i].rf_nob;
         }
 
         /* looking for pre-mapping MR */
-        mr = kiblnd_find_rd_dma_mr(net, rd);
+        mr = kiblnd_find_rd_dma_mr(hdev, rd);
         if (mr != NULL) {
                 /* found pre-mapping MR */
                 rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
@@ -789,8 +801,8 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
         }
 
         if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
-            conn->ibc_credits == 1 &&   /* last credit reserved for */
-            conn->ibc_outstanding_credits == 0) { /* giving back credits */
+            conn->ibc_credits == 1 &&   /* last credit reserved */
+            msg->ibm_type != IBLND_MSG_NOOP) {      /* for NOOP */
                 CDEBUG(D_NET, "%s: not using last credit\n",
                        libcfs_nid2str(peer->ibp_nid));
                 return -EAGAIN;
@@ -835,11 +847,17 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
         cfs_list_add(&tx->tx_list, &conn->ibc_active_txs);
 
         /* I'm still holding ibc_lock! */
-        if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
+        if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
                 rc = -ECONNABORTED;
-        else
+        } else if (tx->tx_pool->tpo_pool.po_failed ||
+                 conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
+                /* close_conn will launch failover */
+                rc = -ENETDOWN;
+        } else {
                 rc = ib_post_send(conn->ibc_cmid->qp,
                                   tx->tx_wrq, &bad_wrq);
+        }
+
         conn->ibc_last_send = jiffies;
 
         if (rc == 0)
@@ -874,6 +892,9 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
 
         if (done)
                 kiblnd_tx_done(peer->ibp_ni, tx);
+
+        cfs_spin_lock(&conn->ibc_lock);
+
         return -EIO;
 }
 
@@ -919,6 +940,8 @@ kiblnd_check_sends (kib_conn_t *conn)
                         kiblnd_queue_tx_locked(tx, conn);
         }
 
+        kiblnd_conn_addref(conn); /* 1 ref for me.... (see b21911) */
+
         for (;;) {
                 int credit;
 
@@ -926,6 +949,11 @@ kiblnd_check_sends (kib_conn_t *conn)
                         credit = 0;
                         tx = cfs_list_entry(conn->ibc_tx_queue_nocred.next,
                                             kib_tx_t, tx_list);
+                } else if (!cfs_list_empty(&conn->ibc_tx_noops)) {
+                        LASSERT (!IBLND_OOB_CAPABLE(ver));
+                        credit = 1;
+                        tx = cfs_list_entry(conn->ibc_tx_noops.next,
+                                        kib_tx_t, tx_list);
                 } else if (!cfs_list_empty(&conn->ibc_tx_queue)) {
                         credit = 1;
                         tx = cfs_list_entry(conn->ibc_tx_queue.next,
@@ -938,6 +966,8 @@ kiblnd_check_sends (kib_conn_t *conn)
         }
 
         cfs_spin_unlock(&conn->ibc_lock);
+
+        kiblnd_conn_decref(conn); /* ...until here */
 }
 
 void
@@ -951,11 +981,11 @@ kiblnd_tx_complete (kib_tx_t *tx, int status)
 
         if (failed) {
                 if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
-                        CDEBUG(D_NETERROR, "Tx -> %s cookie "LPX64
-                               " sending %d waiting %d: failed %d\n",
-                               libcfs_nid2str(conn->ibc_peer->ibp_nid),
-                               tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
-                               status);
+                        CNETERR("Tx -> %s cookie "LPX64
+                                " sending %d waiting %d: failed %d\n",
+                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
+                                tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
+                                status);
 
                 kiblnd_close_conn(conn, -EIO);
         } else {
@@ -998,20 +1028,19 @@ kiblnd_tx_complete (kib_tx_t *tx, int status)
 void
 kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
 {
-        kib_net_t         *net = ni->ni_data;
+        kib_hca_dev_t     *hdev = tx->tx_pool->tpo_hdev;
         struct ib_sge     *sge = &tx->tx_sge[tx->tx_nwrq];
         struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
         int                nob = offsetof (kib_msg_t, ibm_u) + body_nob;
         struct ib_mr      *mr;
 
-        LASSERT (net != NULL);
         LASSERT (tx->tx_nwrq >= 0);
         LASSERT (tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
         LASSERT (nob <= IBLND_MSG_SIZE);
 
         kiblnd_init_msg(tx->tx_msg, type, body_nob);
 
-        mr = kiblnd_find_dma_mr(net, tx->tx_msgaddr, nob);
+        mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob);
         LASSERT (mr != NULL);
 
         sge->lkey   = mr->lkey;
@@ -1157,7 +1186,7 @@ kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
                 if (IBLND_OOB_CAPABLE(conn->ibc_version))
                         q = &conn->ibc_tx_queue_nocred;
                 else
-                        q = &conn->ibc_tx_queue;
+                        q = &conn->ibc_tx_noops;
                 break;
 
         case IBLND_MSG_IMMEDIATE:
@@ -1761,8 +1790,9 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
          * connection to be finished off by the connd.  Otherwise the connd is
          * already dealing with it (either to set it up or tear it down).
          * Caller holds kib_global_lock exclusively in irq context */
-        unsigned long     flags;
         kib_peer_t       *peer = conn->ibc_peer;
+        kib_dev_t        *dev;
+        unsigned long     flags;
 
         LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
@@ -1773,6 +1803,7 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
                 return; /* already being handled  */
 
         if (error == 0 &&
+            cfs_list_empty(&conn->ibc_tx_noops) &&
             cfs_list_empty(&conn->ibc_tx_queue) &&
             cfs_list_empty(&conn->ibc_tx_queue_rsrvd) &&
             cfs_list_empty(&conn->ibc_tx_queue_nocred) &&
@@ -1780,14 +1811,16 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
                 CDEBUG(D_NET, "closing conn to %s\n", 
                        libcfs_nid2str(peer->ibp_nid));
         } else {
-                CDEBUG(D_NETERROR, "Closing conn to %s: error %d%s%s%s%s\n",
+                CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
                        libcfs_nid2str(peer->ibp_nid), error,
                        cfs_list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+                       cfs_list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
                        cfs_list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
                        cfs_list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
                        cfs_list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
         }
 
+        dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev;
         cfs_list_del(&conn->ibc_list);
         /* connd (see below) takes over ibc_list's ref */
 
@@ -1801,6 +1834,13 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
 
         kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
 
+        if (error != 0 &&
+            kiblnd_dev_can_failover(dev)) {
+                cfs_list_add_tail(&dev->ibd_fail_list,
+                              &kiblnd_data.kib_failed_devs);
+                cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
+        }
+
         cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
 
         cfs_list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns);
@@ -1898,6 +1938,7 @@ kiblnd_finalise_conn (kib_conn_t *conn)
         /* Complete all tx descs not waiting for sends to complete.
          * NB we should be safe from RDMA now that the QP has changed state */
 
+        kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
         kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
@@ -1954,7 +1995,7 @@ kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
         if (cfs_list_empty (&zombies))
                 return;
 
-        CDEBUG (D_NETERROR, "Deleting messages for %s: connection failed\n",
+        CNETERR("Deleting messages for %s: connection failed\n",
                 libcfs_nid2str(peer->ibp_nid));
 
         kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH);
@@ -2347,8 +2388,12 @@ kiblnd_reconnect (kib_conn_t *conn, int version,
         cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         /* retry connection if it's still needed and no other connection
-         * attempts (active or passive) are in progress */
-        if (!cfs_list_empty(&peer->ibp_tx_queue) &&
+         * attempts (active or passive) are in progress
+         * NB: reconnect is still needed even when ibp_tx_queue is
+         * empty if ibp_version != version because reconnect may be
+         * initiated by kiblnd_query() */
+        if ((!cfs_list_empty(&peer->ibp_tx_queue) ||
+             peer->ibp_version != version) &&
             peer->ibp_connecting == 1 &&
             peer->ibp_accepting == 0) {
                 retry = 1;
@@ -2381,13 +2426,13 @@ kiblnd_reconnect (kib_conn_t *conn, int version,
                 break;
         }
 
-        CDEBUG(D_NETERROR, "%s: retrying (%s), %x, %x, "
-                           "queue_dep: %d, max_frag: %d, msg_size: %d\n",
-               libcfs_nid2str(peer->ibp_nid),
-               reason, IBLND_MSG_VERSION, version,
-               cp != NULL ? cp->ibcp_queue_depth : IBLND_MSG_QUEUE_SIZE(version),
-               cp != NULL ? cp->ibcp_max_frags   : IBLND_RDMA_FRAGS(version),
-               cp != NULL ? cp->ibcp_max_msg_size: IBLND_MSG_SIZE);
+        CNETERR("%s: retrying (%s), %x, %x, "
+                "queue_dep: %d, max_frag: %d, msg_size: %d\n",
+                libcfs_nid2str(peer->ibp_nid),
+                reason, IBLND_MSG_VERSION, version,
+                cp != NULL? cp->ibcp_queue_depth :IBLND_MSG_QUEUE_SIZE(version),
+                cp != NULL? cp->ibcp_max_frags   : IBLND_RDMA_FRAGS(version),
+                cp != NULL? cp->ibcp_max_msg_size: IBLND_MSG_SIZE);
 
         kiblnd_connect_peer(peer);
 }
@@ -2407,9 +2452,9 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
                 break;
 
         case IB_CM_REJ_INVALID_SERVICE_ID:
-                CDEBUG(D_NETERROR, "%s rejected: no listener at %d\n",
-                       libcfs_nid2str(peer->ibp_nid),
-                       *kiblnd_tunables.kib_service);
+                CNETERR("%s rejected: no listener at %d\n",
+                        libcfs_nid2str(peer->ibp_nid),
+                        *kiblnd_tunables.kib_service);
                 break;
 
         case IB_CM_REJ_CONSUMER_DEFINED:
@@ -2519,8 +2564,8 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
                 }
                 /* fall through */
         default:
-                CDEBUG(D_NETERROR, "%s rejected: reason %d, size %d\n",
-                       libcfs_nid2str(peer->ibp_nid), reason, priv_nob);
+                CNETERR("%s rejected: reason %d, size %d\n",
+                        libcfs_nid2str(peer->ibp_nid), reason, priv_nob);
                 break;
         }
 
@@ -2713,7 +2758,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                 
        case RDMA_CM_EVENT_ADDR_ERROR:
                 peer = (kib_peer_t *)cmid->context;
-                CDEBUG(D_NETERROR, "%s: ADDR ERROR %d\n",
+                CNETERR("%s: ADDR ERROR %d\n",
                        libcfs_nid2str(peer->ibp_nid), event->status);
                 kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
                 kiblnd_peer_decref(peer);
@@ -2726,8 +2771,8 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                        libcfs_nid2str(peer->ibp_nid), event->status);
 
                 if (event->status != 0) {
-                        CDEBUG(D_NETERROR, "Can't resolve address for %s: %d\n",
-                               libcfs_nid2str(peer->ibp_nid), event->status);
+                        CNETERR("Can't resolve address for %s: %d\n",
+                                libcfs_nid2str(peer->ibp_nid), event->status);
                         rc = event->status;
                 } else {
                         rc = rdma_resolve_route(
@@ -2744,8 +2789,8 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
 
        case RDMA_CM_EVENT_ROUTE_ERROR:
                 peer = (kib_peer_t *)cmid->context;
-                CDEBUG(D_NETERROR, "%s: ROUTE ERROR %d\n",
-                       libcfs_nid2str(peer->ibp_nid), event->status);
+                CNETERR("%s: ROUTE ERROR %d\n",
+                        libcfs_nid2str(peer->ibp_nid), event->status);
                 kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
                 kiblnd_peer_decref(peer);
                 return -EHOSTUNREACH;           /* rc != 0 destroys cmid */
@@ -2758,7 +2803,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                 if (event->status == 0)
                         return kiblnd_active_connect(cmid);
 
-                CDEBUG(D_NETERROR, "Can't resolve route for %s: %d\n",
+                CNETERR("Can't resolve route for %s: %d\n",
                        libcfs_nid2str(peer->ibp_nid), event->status);
                 kiblnd_peer_connect_failed(peer, 1, event->status);
                 kiblnd_peer_decref(peer);
@@ -2768,7 +2813,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                 conn = (kib_conn_t *)cmid->context;
                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
-                CDEBUG(D_NETERROR, "%s: UNREACHABLE %d\n",
+                CNETERR("%s: UNREACHABLE %d\n",
                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
                 kiblnd_connreq_done(conn, -ENETDOWN);
                 kiblnd_conn_decref(conn);
@@ -2778,8 +2823,8 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                 conn = (kib_conn_t *)cmid->context;
                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
-                CDEBUG(D_NETERROR, "%s: CONNECT ERROR %d\n",
-                       libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
+                CNETERR("%s: CONNECT ERROR %d\n",
+                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
                 kiblnd_connreq_done(conn, -ENOTCONN);
                 kiblnd_conn_decref(conn);
                 return 0;
@@ -2899,6 +2944,7 @@ int
 kiblnd_conn_timed_out (kib_conn_t *conn)
 {
         return  kiblnd_check_txs(conn, &conn->ibc_tx_queue) ||
+                kiblnd_check_txs(conn, &conn->ibc_tx_noops) ||
                 kiblnd_check_txs(conn, &conn->ibc_tx_queue_rsrvd) ||
                 kiblnd_check_txs(conn, &conn->ibc_tx_queue_nocred) ||
                 kiblnd_check_txs(conn, &conn->ibc_active_txs);
@@ -3096,7 +3142,7 @@ kiblnd_qp_event(struct ib_event *event, void *arg)
                 CDEBUG(D_NET, "%s established\n",
                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
                 return;
-                
+
         default:
                 CERROR("%s: Async QP event type %d\n",
                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
@@ -3118,8 +3164,8 @@ kiblnd_complete (struct ib_wc *wc)
                  * failing RDMA because 'tx' might be back on the idle list or
                  * even reused already if we didn't manage to post all our work
                  * items */
-                CDEBUG(D_NETERROR, "RDMA (tx: %p) failed: %d\n",
-                       kiblnd_wreqid2ptr(wc->wr_id), wc->status);
+                CNETERR("RDMA (tx: %p) failed: %d\n",
+                        kiblnd_wreqid2ptr(wc->wr_id), wc->status);
                 return;
 
         case IBLND_WID_TX:
@@ -3300,3 +3346,94 @@ kiblnd_scheduler(void *arg)
         kiblnd_thread_fini();
         return (0);
 }
+
+int
+kiblnd_failover_thread(void *arg)
+{
+        cfs_rwlock_t      *glock = &kiblnd_data.kib_global_lock;
+        kib_dev_t         *dev;
+        cfs_waitlink_t     wait;
+        unsigned long      flags;
+        int                rc;
+
+        LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
+
+        cfs_daemonize ("kiblnd_failover");
+        cfs_block_allsigs ();
+
+        cfs_waitlink_init(&wait);
+        cfs_write_lock_irqsave(glock, flags);
+
+        while (!kiblnd_data.kib_shutdown) {
+                int     do_failover = 0;
+                int     long_sleep;
+
+                cfs_list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
+                                    ibd_fail_list) {
+                        if (cfs_time_before(cfs_time_current(),
+                                            dev->ibd_next_failover))
+                                continue;
+                        do_failover = 1;
+                        break;
+                }
+
+                if (do_failover) {
+                        cfs_list_del_init(&dev->ibd_fail_list);
+                        dev->ibd_failover = 1;
+                        cfs_write_unlock_irqrestore(glock, flags);
+
+                        rc = kiblnd_dev_failover(dev);
+
+                        cfs_write_lock_irqsave(glock, flags);
+
+                        LASSERT (dev->ibd_failover);
+                        dev->ibd_failover = 0;
+                        if (rc >= 0) { /* Device is OK or failover succeed */
+                                dev->ibd_next_failover = cfs_time_shift(3);
+                                continue;
+                        }
+
+                        /* failed to failover, retry later */
+                        dev->ibd_next_failover =
+                                cfs_time_shift(min(dev->ibd_failed_failover, 10));
+                        if (kiblnd_dev_can_failover(dev)) {
+                                cfs_list_add_tail(&dev->ibd_fail_list,
+                                              &kiblnd_data.kib_failed_devs);
+                        }
+
+                        continue;
+                }
+
+                /* long sleep if no more pending failover */
+                long_sleep = cfs_list_empty(&kiblnd_data.kib_failed_devs);
+
+                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+                cfs_waitq_add(&kiblnd_data.kib_failover_waitq, &wait);
+                cfs_write_unlock_irqrestore(glock, flags);
+
+                rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
+                                                   cfs_time_seconds(1));
+                cfs_set_current_state(CFS_TASK_RUNNING);
+                cfs_waitq_del(&kiblnd_data.kib_failover_waitq, &wait);
+                cfs_write_lock_irqsave(glock, flags);
+
+                if (!long_sleep || rc != 0)
+                        continue;
+
+                /* have a long sleep, routine check all active devices,
+                 * we need checking like this because if there is not active
+                 * connection on the dev and no SEND from local, we may listen
+                 * on wrong HCA for ever while there is a bonding failover */
+                cfs_list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
+                        if (kiblnd_dev_can_failover(dev)) {
+                                cfs_list_add_tail(&dev->ibd_fail_list,
+                                              &kiblnd_data.kib_failed_devs);
+                        }
+                }
+        }
+
+        cfs_write_unlock_irqrestore(glock, flags);
+
+        kiblnd_thread_fini();
+        return 0;
+}