Whamcloud - gitweb
LU-6142 lnet: convert kiblnd/ksocknal_thread_start to vararg
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd_cb.c
index 658d0ad..38c91a7 100644 (file)
@@ -27,7 +27,6 @@
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
  *
  * lnet/klnds/o2iblnd/o2iblnd_cb.c
  *
@@ -100,9 +99,9 @@ kiblnd_txlist_done(struct list_head *txlist, int status,
 {
        struct kib_tx *tx;
 
-       while (!list_empty(txlist)) {
-               tx = list_entry(txlist->next, struct kib_tx, tx_list);
-
+       while ((tx = list_first_entry_or_null(txlist,
+                                             struct kib_tx,
+                                             tx_list)) != NULL) {
                list_del(&tx->tx_list);
                /* complete now */
                tx->tx_waiting = 0;
@@ -243,11 +242,9 @@ out:
 static struct kib_tx *
 kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, u64 cookie)
 {
-       struct list_head *tmp;
-
-       list_for_each(tmp, &conn->ibc_active_txs) {
-               struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list);
+       struct kib_tx *tx;
 
+       list_for_each_entry(tx, &conn->ibc_active_txs, tx_list) {
                LASSERT(!tx->tx_queued);
                LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
 
@@ -814,6 +811,7 @@ __must_hold(&conn->ibc_lock)
        struct kib_msg *msg = tx->tx_msg;
        struct kib_peer_ni *peer_ni = conn->ibc_peer;
        struct lnet_ni *ni = peer_ni->ibp_ni;
+       struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd;
        int ver = conn->ibc_version;
        int rc;
        int done;
@@ -898,11 +896,10 @@ __must_hold(&conn->ibc_lock)
                 /* close_conn will launch failover */
                 rc = -ENETDOWN;
         } else {
-               struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd;
                struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
                struct ib_send_wr *wr  = &tx->tx_wrq[0].wr;
 
-               if (frd != NULL) {
+               if (frd != NULL && !frd->frd_posted) {
                        if (!frd->frd_valid) {
                                wr = &frd->frd_inv_wr.wr;
                                wr->next = &frd->frd_fastreg_wr.wr;
@@ -931,8 +928,11 @@ __must_hold(&conn->ibc_lock)
 
        conn->ibc_last_send = ktime_get();
 
-        if (rc == 0)
-                return 0;
+       if (rc == 0) {
+               if (frd != NULL)
+                       frd->frd_posted = true;
+               return 0;
+       }
 
         /* NB credits are transferred in the actual
          * message, which can only be the last work item */
@@ -990,9 +990,8 @@ kiblnd_check_sends_locked(struct kib_conn *conn)
         LASSERT (conn->ibc_reserved_credits >= 0);
 
         while (conn->ibc_reserved_credits > 0 &&
-              !list_empty(&conn->ibc_tx_queue_rsrvd)) {
-               tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
-                               struct kib_tx, tx_list);
+              (tx = list_first_entry_or_null(&conn->ibc_tx_queue_rsrvd,
+                                             struct kib_tx, tx_list)) != NULL) {
                list_move_tail(&tx->tx_list, &conn->ibc_tx_queue);
                 conn->ibc_reserved_credits--;
         }
@@ -1014,17 +1013,17 @@ kiblnd_check_sends_locked(struct kib_conn *conn)
 
                if (!list_empty(&conn->ibc_tx_queue_nocred)) {
                         credit = 0;
-                       tx = list_entry(conn->ibc_tx_queue_nocred.next,
-                                       struct kib_tx, tx_list);
+                       tx = list_first_entry(&conn->ibc_tx_queue_nocred,
+                                             struct kib_tx, tx_list);
                } else if (!list_empty(&conn->ibc_tx_noops)) {
                         LASSERT (!IBLND_OOB_CAPABLE(ver));
                         credit = 1;
-                       tx = list_entry(conn->ibc_tx_noops.next,
-                                       struct kib_tx, tx_list);
+                       tx = list_first_entry(&conn->ibc_tx_noops,
+                                             struct kib_tx, tx_list);
                } else if (!list_empty(&conn->ibc_tx_queue)) {
                         credit = 1;
-                       tx = list_entry(conn->ibc_tx_queue.next,
-                                       struct kib_tx, tx_list);
+                       tx = list_first_entry(&conn->ibc_tx_queue,
+                                             struct kib_tx, tx_list);
                 } else
                         break;
 
@@ -1657,7 +1656,7 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
 
                 /* is the REPLY message too small for RDMA? */
                nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
-                if (nob <= IBLND_MSG_SIZE)
+                if (nob <= IBLND_MSG_SIZE && !lntmsg->msg_rdma_force)
                         break;                  /* send IMMEDIATE */
 
                tx = kiblnd_get_idle_tx(ni, target.nid);
@@ -1704,7 +1703,7 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
         case LNET_MSG_PUT:
                 /* Is the payload small enough not to need RDMA? */
                nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]);
-                if (nob <= IBLND_MSG_SIZE)
+                if (nob <= IBLND_MSG_SIZE && !lntmsg->msg_rdma_force)
                         break;                  /* send IMMEDIATE */
 
                tx = kiblnd_get_idle_tx(ni, target.nid);
@@ -1932,18 +1931,6 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
         return rc;
 }
 
-int
-kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
-{
-       struct task_struct *task = kthread_run(fn, arg, "%s", name);
-
-       if (IS_ERR(task))
-               return PTR_ERR(task);
-
-       atomic_inc(&kiblnd_data.kib_nthreads);
-       return 0;
-}
-
 static void
 kiblnd_thread_fini (void)
 {
@@ -2076,9 +2063,9 @@ kiblnd_handle_early_rxs(struct kib_conn *conn)
        LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-       while (!list_empty(&conn->ibc_early_rxs)) {
-               rx = list_entry(conn->ibc_early_rxs.next,
-                               struct kib_rx, rx_list);
+       while ((rx = list_first_entry_or_null(&conn->ibc_early_rxs,
+                                             struct kib_rx,
+                                             rx_list)) != NULL) {
                list_del(&rx->rx_list);
                write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
@@ -2153,10 +2140,10 @@ kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs)
        kiblnd_txlist_done(&zombies, -ECONNABORTED, LNET_MSG_STATUS_OK);
 }
 
-static int
+static bool
 kiblnd_tx_may_discard(struct kib_conn *conn)
 {
-       int rc = 0;
+       bool rc = false;
        struct kib_tx *nxt;
        struct kib_tx *tx;
 
@@ -2169,7 +2156,7 @@ kiblnd_tx_may_discard(struct kib_conn *conn)
                        if (tx->tx_sending == 0) {
                                kiblnd_conn_decref(tx->tx_conn);
                                tx->tx_conn = NULL;
-                               rc = 1;
+                               rc = true;
                        }
                }
        }
@@ -2208,10 +2195,11 @@ kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
                           int error)
 {
        LIST_HEAD(zombies);
-       unsigned long   flags;
+       unsigned long flags;
+       enum lnet_msg_hstatus hstatus;
 
-       LASSERT (error != 0);
-       LASSERT (!in_interrupt());
+       LASSERT(error != 0);
+       LASSERT(!in_interrupt());
 
        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
@@ -2254,12 +2242,20 @@ kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
        CNETERR("Deleting messages for %s: connection failed\n",
                libcfs_nid2str(peer_ni->ibp_nid));
 
-       if (error == -EHOSTUNREACH || error == -ETIMEDOUT)
-               kiblnd_txlist_done(&zombies, error,
-                                  LNET_MSG_STATUS_NETWORK_TIMEOUT);
-       else
-               kiblnd_txlist_done(&zombies, error,
-                                  LNET_MSG_STATUS_LOCAL_DROPPED);
+       switch (error) {
+       case -EHOSTUNREACH:
+       case -ETIMEDOUT:
+               hstatus = LNET_MSG_STATUS_NETWORK_TIMEOUT;
+               break;
+       case -ECONNREFUSED:
+               hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
+               break;
+       default:
+               hstatus = LNET_MSG_STATUS_LOCAL_DROPPED;
+               break;
+       }
+
+       kiblnd_txlist_done(&zombies, error, hstatus);
 }
 
 static void
@@ -2296,9 +2292,6 @@ kiblnd_connreq_done(struct kib_conn *conn, int status)
        /* connection established */
        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
-       /* reset retry count */
-       peer_ni->ibp_retries = 0;
-
        conn->ibc_last_send = ktime_get();
        kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
        kiblnd_peer_alive(peer_ni);
@@ -2356,8 +2349,8 @@ kiblnd_connreq_done(struct kib_conn *conn, int status)
         * scheduled.  We won't be using round robin on this first batch.
         */
        spin_lock(&conn->ibc_lock);
-       while (!list_empty(&txs)) {
-               tx = list_entry(txs.next, struct kib_tx, tx_list);
+       while ((tx = list_first_entry_or_null(&txs, struct kib_tx,
+                                             tx_list)) != NULL) {
                list_del(&tx->tx_list);
 
                kiblnd_queue_tx_locked(tx, conn);
@@ -2744,11 +2737,6 @@ kiblnd_check_reconnect(struct kib_conn *conn, int version,
                goto out;
        }
 
-       if (peer_ni->ibp_retries > *kiblnd_tunables.kib_retry_count) {
-               reason = "retry count exceeded due to no listener";
-               goto out;
-       }
-
        switch (why) {
        default:
                reason = "Unknown";
@@ -2806,10 +2794,6 @@ kiblnd_check_reconnect(struct kib_conn *conn, int version,
         case IBLND_REJECT_CONN_UNCOMPAT:
                 reason = "version negotiation";
                 break;
-
-       case IBLND_REJECT_INVALID_SRV_ID:
-               reason = "invalid service id";
-               break;
         }
 
        conn->ibc_reconnect = 1;
@@ -2836,6 +2820,7 @@ static void
 kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob)
 {
        struct kib_peer_ni *peer_ni = conn->ibc_peer;
+       int status = -ECONNREFUSED;
 
        LASSERT (!in_interrupt());
        LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
@@ -2847,117 +2832,118 @@ kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob)
                break;
 
        case IB_CM_REJ_INVALID_SERVICE_ID:
-               peer_ni->ibp_retries++;
-               kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
-                                      IBLND_REJECT_INVALID_SRV_ID, NULL);
+               status = -EHOSTUNREACH;
                CNETERR("%s rejected: no listener at %d\n",
                        libcfs_nid2str(peer_ni->ibp_nid),
                        *kiblnd_tunables.kib_service);
                break;
 
-        case IB_CM_REJ_CONSUMER_DEFINED:
+       case IB_CM_REJ_CONSUMER_DEFINED:
                if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) {
                        struct kib_rej *rej = priv;
                        struct kib_connparams *cp = NULL;
-                        int               flip        = 0;
-                        __u64             incarnation = -1;
-
-                        /* NB. default incarnation is -1 because:
-                         * a) V1 will ignore dst incarnation in connreq.
-                         * b) V2 will provide incarnation while rejecting me,
-                         *    -1 will be overwrote.
-                         *
-                         * if I try to connect to a V1 peer_ni with V2 protocol,
-                         * it rejected me then upgrade to V2, I have no idea
-                         * about the upgrading and try to reconnect with V1,
-                         * in this case upgraded V2 can find out I'm trying to
-                         * talk to the old guy and reject me(incarnation is -1). 
-                         */
-
-                        if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
-                            rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
-                                __swab32s(&rej->ibr_magic);
-                                __swab16s(&rej->ibr_version);
-                                flip = 1;
-                        }
+                       bool flip = false;
+                       __u64 incarnation = -1;
+
+                       /* NB. default incarnation is -1 because:
+                        * a) V1 will ignore dst incarnation in connreq.
+                        * b) V2 will provide incarnation while rejecting me,
+                        *    -1 will be overwrote.
+                        *
+                        * if I try to connect to a V1 peer_ni with V2 protocol,
+                        * it rejected me then upgrade to V2, I have no idea
+                        * about the upgrading and try to reconnect with V1,
+                        * in this case upgraded V2 can find out I'm trying to
+                        * talk to the old guy and reject me(incarnation is -1).
+                        */
+
+                       if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
+                           rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
+                               __swab32s(&rej->ibr_magic);
+                               __swab16s(&rej->ibr_version);
+                               flip = true;
+                       }
 
                        if (priv_nob >= sizeof(struct kib_rej) &&
-                            rej->ibr_version > IBLND_MSG_VERSION_1) {
-                                /* priv_nob is always 148 in current version
-                                 * of OFED, so we still need to check version.
-                                 * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */
-                                cp = &rej->ibr_cp;
-
-                                if (flip) {
-                                        __swab64s(&rej->ibr_incarnation);
-                                        __swab16s(&cp->ibcp_queue_depth);
-                                        __swab16s(&cp->ibcp_max_frags);
-                                        __swab32s(&cp->ibcp_max_msg_size);
-                                }
-
-                                incarnation = rej->ibr_incarnation;
-                        }
-
-                        if (rej->ibr_magic != IBLND_MSG_MAGIC &&
-                            rej->ibr_magic != LNET_PROTO_MAGIC) {
-                                CERROR("%s rejected: consumer defined fatal error\n",
-                                       libcfs_nid2str(peer_ni->ibp_nid));
-                                break;
-                        }
-
-                        if (rej->ibr_version != IBLND_MSG_VERSION &&
-                            rej->ibr_version != IBLND_MSG_VERSION_1) {
-                                CERROR("%s rejected: o2iblnd version %x error\n",
-                                       libcfs_nid2str(peer_ni->ibp_nid),
-                                       rej->ibr_version);
-                                break;
-                        }
-
-                        if (rej->ibr_why     == IBLND_REJECT_FATAL &&
-                            rej->ibr_version == IBLND_MSG_VERSION_1) {
-                                CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n",
-                                       libcfs_nid2str(peer_ni->ibp_nid), rej->ibr_version);
-
-                                if (conn->ibc_version != IBLND_MSG_VERSION_1)
-                                        rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
-                        }
-
-                        switch (rej->ibr_why) {
-                        case IBLND_REJECT_CONN_RACE:
-                        case IBLND_REJECT_CONN_STALE:
-                        case IBLND_REJECT_CONN_UNCOMPAT:
+                           rej->ibr_version > IBLND_MSG_VERSION_1) {
+                               /* priv_nob is always 148 in current version
+                                * of OFED, so we still need to check version.
+                                * (define of IB_CM_REJ_PRIVATE_DATA_SIZE)
+                                */
+                               cp = &rej->ibr_cp;
+
+                               if (flip) {
+                                       __swab64s(&rej->ibr_incarnation);
+                                       __swab16s(&cp->ibcp_queue_depth);
+                                       __swab16s(&cp->ibcp_max_frags);
+                                       __swab32s(&cp->ibcp_max_msg_size);
+                               }
+
+                               incarnation = rej->ibr_incarnation;
+                       }
+
+                       if (rej->ibr_magic != IBLND_MSG_MAGIC &&
+                           rej->ibr_magic != LNET_PROTO_MAGIC) {
+                               CERROR("%s rejected: consumer defined fatal error\n",
+                                      libcfs_nid2str(peer_ni->ibp_nid));
+                               break;
+                       }
+
+                       if (rej->ibr_version != IBLND_MSG_VERSION &&
+                           rej->ibr_version != IBLND_MSG_VERSION_1) {
+                               CERROR("%s rejected: o2iblnd version %x error\n",
+                                      libcfs_nid2str(peer_ni->ibp_nid),
+                                      rej->ibr_version);
+                               break;
+                       }
+
+                       if (rej->ibr_why     == IBLND_REJECT_FATAL &&
+                           rej->ibr_version == IBLND_MSG_VERSION_1) {
+                               CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n",
+                                      libcfs_nid2str(peer_ni->ibp_nid),
+                                      rej->ibr_version);
+
+                               if (conn->ibc_version != IBLND_MSG_VERSION_1)
+                                       rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
+                       }
+
+                       switch (rej->ibr_why) {
+                       case IBLND_REJECT_CONN_RACE:
+                       case IBLND_REJECT_CONN_STALE:
+                       case IBLND_REJECT_CONN_UNCOMPAT:
                        case IBLND_REJECT_MSG_QUEUE_SIZE:
                        case IBLND_REJECT_RDMA_FRAGS:
                                kiblnd_check_reconnect(conn, rej->ibr_version,
-                                               incarnation, rej->ibr_why, cp);
-                                break;
-
-                        case IBLND_REJECT_NO_RESOURCES:
-                                CERROR("%s rejected: o2iblnd no resources\n",
-                                       libcfs_nid2str(peer_ni->ibp_nid));
-                                break;
-
-                        case IBLND_REJECT_FATAL:
-                                CERROR("%s rejected: o2iblnd fatal error\n",
-                                       libcfs_nid2str(peer_ni->ibp_nid));
-                                break;
-
-                        default:
-                                CERROR("%s rejected: o2iblnd reason %d\n",
-                                       libcfs_nid2str(peer_ni->ibp_nid),
-                                       rej->ibr_why);
-                                break;
-                        }
-                        break;
-                }
-                /* fall through */
-        default:
-                CNETERR("%s rejected: reason %d, size %d\n",
-                        libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob);
-                break;
-        }
+                                                      incarnation,
+                                                      rej->ibr_why, cp);
+                               break;
+
+                       case IBLND_REJECT_NO_RESOURCES:
+                               CERROR("%s rejected: o2iblnd no resources\n",
+                                      libcfs_nid2str(peer_ni->ibp_nid));
+                               break;
+
+                       case IBLND_REJECT_FATAL:
+                               CERROR("%s rejected: o2iblnd fatal error\n",
+                                      libcfs_nid2str(peer_ni->ibp_nid));
+                               break;
+
+                       default:
+                               CERROR("%s rejected: o2iblnd reason %d\n",
+                                      libcfs_nid2str(peer_ni->ibp_nid),
+                                      rej->ibr_why);
+                               break;
+                       }
+                       break;
+               }
+               /* fall through */
+       default:
+               CNETERR("%s rejected: reason %d, size %d\n",
+                       libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob);
+               break;
+       }
 
-        kiblnd_connreq_done(conn, -ECONNREFUSED);
+       kiblnd_connreq_done(conn, status);
 }
 
 static void
@@ -3115,8 +3101,7 @@ kiblnd_active_connect(struct rdma_cm_id *cmid)
 
         LASSERT(cmid->context == (void *)conn);
         LASSERT(conn->ibc_cmid == cmid);
-
-        rc = rdma_connect(cmid, &cp);
+       rc = rdma_connect_locked(cmid, &cp);
         if (rc != 0) {
                 CERROR("Can't connect to %s: %d\n",
                        libcfs_nid2str(peer_ni->ibp_nid), rc);
@@ -3312,11 +3297,8 @@ static int
 kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
 {
        struct kib_tx *tx;
-       struct list_head *ttmp;
-
-       list_for_each(ttmp, txs) {
-               tx = list_entry(ttmp, struct kib_tx, tx_list);
 
+       list_for_each_entry(tx, txs, tx_list) {
                if (txs != &conn->ibc_active_txs) {
                        LASSERT(tx->tx_queued);
                } else {
@@ -3358,7 +3340,6 @@ kiblnd_check_conns (int idx)
        struct kib_peer_ni *peer_ni;
        struct kib_conn *conn;
        struct kib_tx *tx, *tx_tmp;
-       struct list_head *ctmp;
        unsigned long flags;
 
        /* NB. We expect to have a look at all the peers and not find any
@@ -3379,12 +3360,10 @@ kiblnd_check_conns (int idx)
                        }
                }
 
-               list_for_each(ctmp, &peer_ni->ibp_conns) {
+               list_for_each_entry(conn, &peer_ni->ibp_conns, ibc_list) {
                        int timedout;
                        int sendnoop;
 
-                       conn = list_entry(ctmp, struct kib_conn, ibc_list);
-
                        LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
 
                        spin_lock(&conn->ibc_lock);
@@ -3425,9 +3404,9 @@ kiblnd_check_conns (int idx)
         * connection. We can only be sure RDMA activity
         * has ceased once the QP has been modified.
         */
-       while (!list_empty(&closes)) {
-               conn = list_entry(closes.next,
-                                 struct kib_conn, ibc_connd_list);
+       while ((conn = list_first_entry_or_null(&closes,
+                                               struct kib_conn,
+                                               ibc_connd_list)) != NULL) {
                list_del(&conn->ibc_connd_list);
                kiblnd_close_conn(conn, -ETIMEDOUT);
                kiblnd_conn_decref(conn);
@@ -3437,9 +3416,9 @@ kiblnd_check_conns (int idx)
         * NOOP, but there were no non-blocking tx descs
         * free to do it last time...
         */
-       while (!list_empty(&checksends)) {
-               conn = list_entry(checksends.next,
-                                 struct kib_conn, ibc_connd_list);
+       while ((conn = list_first_entry_or_null(&checksends,
+                                               struct kib_conn,
+                                               ibc_connd_list)) != NULL) {
                list_del(&conn->ibc_connd_list);
 
                spin_lock(&conn->ibc_lock);
@@ -3483,7 +3462,7 @@ kiblnd_connd (void *arg)
        struct kib_conn *conn;
        int timeout;
        int i;
-       int dropped_lock;
+       bool dropped_lock;
        int peer_index = 0;
        unsigned long deadline = jiffies;
 
@@ -3495,13 +3474,13 @@ kiblnd_connd (void *arg)
        while (!kiblnd_data.kib_shutdown) {
                int reconn = 0;
 
-               dropped_lock = 0;
+               dropped_lock = false;
 
-               if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
+               conn = list_first_entry_or_null(&kiblnd_data.kib_connd_zombies,
+                                               struct kib_conn, ibc_list);
+               if (conn) {
                        struct kib_peer_ni *peer_ni = NULL;
 
-                       conn = list_entry(kiblnd_data.kib_connd_zombies.next,
-                                         struct kib_conn, ibc_list);
                        list_del(&conn->ibc_list);
                        if (conn->ibc_reconnect) {
                                peer_ni = conn->ibc_peer;
@@ -3509,7 +3488,7 @@ kiblnd_connd (void *arg)
                        }
 
                        spin_unlock_irqrestore(lock, flags);
-                       dropped_lock = 1;
+                       dropped_lock = true;
 
                        kiblnd_destroy_conn(conn);
 
@@ -3528,14 +3507,15 @@ kiblnd_connd (void *arg)
                                              &kiblnd_data.kib_reconn_wait);
                }
 
-               if (!list_empty(&kiblnd_data.kib_connd_conns)) {
+               conn = list_first_entry_or_null(&kiblnd_data.kib_connd_conns,
+                                               struct kib_conn, ibc_list);
+               if (conn) {
                        int wait;
-                       conn = list_entry(kiblnd_data.kib_connd_conns.next,
-                                         struct kib_conn, ibc_list);
+
                        list_del(&conn->ibc_list);
 
                        spin_unlock_irqrestore(lock, flags);
-                       dropped_lock = 1;
+                       dropped_lock = true;
 
                        kiblnd_disconnect_conn(conn);
                        wait = conn->ibc_waits;
@@ -3557,15 +3537,15 @@ kiblnd_connd (void *arg)
                                                 &kiblnd_data.kib_reconn_list);
                        }
 
-                       if (list_empty(&kiblnd_data.kib_reconn_list))
+                       conn = list_first_entry_or_null(&kiblnd_data.kib_reconn_list,
+                                                       struct kib_conn, ibc_list);
+                       if (!conn)
                                break;
 
-                       conn = list_entry(kiblnd_data.kib_reconn_list.next,
-                                         struct kib_conn, ibc_list);
                        list_del(&conn->ibc_list);
 
                        spin_unlock_irqrestore(lock, flags);
-                       dropped_lock = 1;
+                       dropped_lock = true;
 
                        reconn += kiblnd_reconnect_peer(conn->ibc_peer);
                        kiblnd_peer_decref(conn->ibc_peer);
@@ -3574,9 +3554,10 @@ kiblnd_connd (void *arg)
                        spin_lock_irqsave(lock, flags);
                }
 
-               if (!list_empty(&kiblnd_data.kib_connd_waits)) {
-                       conn = list_entry(kiblnd_data.kib_connd_waits.next,
-                                         struct kib_conn, ibc_list);
+               conn = list_first_entry_or_null(&kiblnd_data.kib_connd_waits,
+                                               struct kib_conn,
+                                               ibc_sched_list);
+               if (conn) {
                        list_del(&conn->ibc_list);
                        spin_unlock_irqrestore(lock, flags);
 
@@ -3585,7 +3566,7 @@ kiblnd_connd (void *arg)
                                kiblnd_conn_decref(conn);
 
                        spin_lock_irqsave(lock, flags);
-                       if (dropped_lock == 0)
+                       if (!dropped_lock)
                                list_add_tail(&conn->ibc_list,
                                              &kiblnd_data.kib_connd_waits);
                }
@@ -3599,7 +3580,7 @@ kiblnd_connd (void *arg)
                        unsigned int lnd_timeout;
 
                        spin_unlock_irqrestore(lock, flags);
-                       dropped_lock = 1;
+                       dropped_lock = true;
 
                        /* Time to check for RDMA timeouts on a few more
                         * peers: I do checks every 'p' seconds on a
@@ -3760,14 +3741,14 @@ kiblnd_cq_event(struct ib_event *event, void *arg)
 int
 kiblnd_scheduler(void *arg)
 {
-       long                    id = (long)arg;
-       struct kib_sched_info   *sched;
+       long id = (long)arg;
+       struct kib_sched_info *sched;
        struct kib_conn *conn;
-       wait_queue_entry_t      wait;
-       unsigned long           flags;
-       struct ib_wc            wc;
-       int                     did_something;
-       int                     rc;
+       wait_queue_entry_t wait;
+       unsigned long flags;
+       struct ib_wc wc;
+       bool did_something;
+       int rc;
 
        init_wait(&wait);
 
@@ -3775,10 +3756,7 @@ kiblnd_scheduler(void *arg)
 
        rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
        if (rc != 0) {
-               CWARN("Unable to bind on CPU partition %d, please verify "
-                     "whether all CPUs are healthy and reload modules if "
-                     "necessary, otherwise your system might under risk of "
-                     "low performance\n", sched->ibs_cpt);
+               CWARN("Unable to bind on CPU partition %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n", sched->ibs_cpt);
        }
 
        spin_lock_irqsave(&sched->ibs_lock, flags);
@@ -3792,11 +3770,12 @@ kiblnd_scheduler(void *arg)
                        spin_lock_irqsave(&sched->ibs_lock, flags);
                }
 
-               did_something = 0;
+               did_something = false;
 
-               if (!list_empty(&sched->ibs_conns)) {
-                       conn = list_entry(sched->ibs_conns.next,
-                                         struct kib_conn, ibc_sched_list);
+               conn = list_first_entry_or_null(&sched->ibs_conns,
+                                               struct kib_conn,
+                                               ibc_sched_list);
+               if (conn) {
                        /* take over kib_sched_conns' ref on conn... */
                        LASSERT(conn->ibc_scheduled);
                        list_del(&conn->ibc_sched_list);
@@ -3806,18 +3785,17 @@ kiblnd_scheduler(void *arg)
 
                        wc.wr_id = IBLND_WID_INVAL;
 
-                        rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
-                        if (rc == 0) {
-                                rc = ib_req_notify_cq(conn->ibc_cq,
-                                                      IB_CQ_NEXT_COMP);
-                                if (rc < 0) {
-                                        CWARN("%s: ib_req_notify_cq failed: %d, "
-                                              "closing connection\n",
-                                              libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
-                                        kiblnd_close_conn(conn, -EIO);
-                                        kiblnd_conn_decref(conn);
+                       rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
+                       if (rc == 0) {
+                               rc = ib_req_notify_cq(conn->ibc_cq,
+                                                     IB_CQ_NEXT_COMP);
+                               if (rc < 0) {
+                                       CWARN("%s: ib_req_notify_cq failed: %d, closing connection\n",
+                                             libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
+                                       kiblnd_close_conn(conn, -EIO);
+                                       kiblnd_conn_decref(conn);
                                        spin_lock_irqsave(&sched->ibs_lock,
-                                                             flags);
+                                                         flags);
                                        continue;
                                }
 
@@ -3838,8 +3816,7 @@ kiblnd_scheduler(void *arg)
                        }
 
                        if (rc < 0) {
-                               CWARN("%s: ib_poll_cq failed: %d, "
-                                     "closing connection\n",
+                               CWARN("%s: ib_poll_cq failed: %d, closing connection\n",
                                      libcfs_nid2str(conn->ibc_peer->ibp_nid),
                                      rc);
                                kiblnd_close_conn(conn, -EIO);
@@ -3857,7 +3834,7 @@ kiblnd_scheduler(void *arg)
                                /* +1 ref for sched_conns */
                                kiblnd_conn_addref(conn);
                                list_add_tail(&conn->ibc_sched_list,
-                                                 &sched->ibs_conns);
+                                             &sched->ibs_conns);
                                if (waitqueue_active(&sched->ibs_waitq))
                                        wake_up(&sched->ibs_waitq);
                        } else {
@@ -3869,14 +3846,14 @@ kiblnd_scheduler(void *arg)
                                kiblnd_complete(&wc);
 
                                spin_lock_irqsave(&sched->ibs_lock, flags);
-                        }
+                       }
 
-                        kiblnd_conn_decref(conn); /* ...drop my ref from above */
-                        did_something = 1;
-                }
+                       kiblnd_conn_decref(conn); /* ..drop my ref from above */
+                       did_something = true;
+               }
 
-                if (did_something)
-                        continue;
+               if (did_something)
+                       continue;
 
                set_current_state(TASK_INTERRUPTIBLE);
                add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
@@ -3898,58 +3875,58 @@ kiblnd_scheduler(void *arg)
 int
 kiblnd_failover_thread(void *arg)
 {
-       rwlock_t        *glock = &kiblnd_data.kib_global_lock;
+       rwlock_t *glock = &kiblnd_data.kib_global_lock;
        struct kib_dev *dev;
        struct net *ns = arg;
        wait_queue_entry_t wait;
-       unsigned long    flags;
-       int              rc;
+       unsigned long flags;
+       int rc;
 
        LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
 
        init_wait(&wait);
        write_lock_irqsave(glock, flags);
 
-        while (!kiblnd_data.kib_shutdown) {
-                int     do_failover = 0;
-                int     long_sleep;
+       while (!kiblnd_data.kib_shutdown) {
+               bool do_failover = false;
+               int long_sleep;
 
                list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
-                                    ibd_fail_list) {
+                                   ibd_fail_list) {
                        if (ktime_get_seconds() < dev->ibd_next_failover)
-                                continue;
-                        do_failover = 1;
-                        break;
-                }
+                               continue;
+                       do_failover = true;
+                       break;
+               }
 
-                if (do_failover) {
+               if (do_failover) {
                        list_del_init(&dev->ibd_fail_list);
-                        dev->ibd_failover = 1;
+                       dev->ibd_failover = 1;
                        write_unlock_irqrestore(glock, flags);
 
                        rc = kiblnd_dev_failover(dev, ns);
 
                        write_lock_irqsave(glock, flags);
 
-                        LASSERT (dev->ibd_failover);
-                        dev->ibd_failover = 0;
-                        if (rc >= 0) { /* Device is OK or failover succeed */
+                       LASSERT(dev->ibd_failover);
+                       dev->ibd_failover = 0;
+                       if (rc >= 0) { /* Device is OK or failover succeed */
                                dev->ibd_next_failover = ktime_get_seconds() + 3;
-                                continue;
-                        }
+                               continue;
+                       }
 
-                        /* failed to failover, retry later */
+                       /* failed to failover, retry later */
                        dev->ibd_next_failover = ktime_get_seconds() +
-                                                min(dev->ibd_failed_failover, 10);
-                        if (kiblnd_dev_can_failover(dev)) {
+                               min(dev->ibd_failed_failover, 10);
+                       if (kiblnd_dev_can_failover(dev)) {
                                list_add_tail(&dev->ibd_fail_list,
-                                              &kiblnd_data.kib_failed_devs);
-                        }
+                                             &kiblnd_data.kib_failed_devs);
+                       }
 
-                        continue;
-                }
+                       continue;
+               }
 
-                /* long sleep if no more pending failover */
+               /* long sleep if no more pending failover */
                long_sleep = list_empty(&kiblnd_data.kib_failed_devs);
 
                set_current_state(TASK_INTERRUPTIBLE);
@@ -3957,28 +3934,29 @@ kiblnd_failover_thread(void *arg)
                write_unlock_irqrestore(glock, flags);
 
                rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
-                                                  cfs_time_seconds(1));
+                                     cfs_time_seconds(1));
                set_current_state(TASK_RUNNING);
                remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
                write_lock_irqsave(glock, flags);
 
-                if (!long_sleep || rc != 0)
-                        continue;
+               if (!long_sleep || rc != 0)
+                       continue;
 
-                /* have a long sleep, routine check all active devices,
-                 * we need checking like this because if there is not active
-                 * connection on the dev and no SEND from local, we may listen
-                 * on wrong HCA for ever while there is a bonding failover */
+               /* have a long sleep, routine check all active devices,
+                * we need checking like this because if there is not active
+                * connection on the dev and no SEND from local, we may listen
+                * on wrong HCA for ever while there is a bonding failover
+                */
                list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
-                        if (kiblnd_dev_can_failover(dev)) {
+                       if (kiblnd_dev_can_failover(dev)) {
                                list_add_tail(&dev->ibd_fail_list,
-                                              &kiblnd_data.kib_failed_devs);
-                        }
-                }
-        }
+                                             &kiblnd_data.kib_failed_devs);
+                       }
+               }
+       }
 
        write_unlock_irqrestore(glock, flags);
 
-        kiblnd_thread_fini();
-        return 0;
+       kiblnd_thread_fini();
+       return 0;
 }