Whamcloud - gitweb
LU-13972 o2iblnd: Don't retry indefinitely
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd_cb.c
index b116f2b..eacc525 100644 (file)
@@ -610,7 +610,8 @@ kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx,
        fps = net->ibn_fmr_ps[cpt];
        rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->tx_fmr);
        if (rc != 0) {
-               CERROR("Can't map %u pages: %d\n", nob, rc);
+               CERROR("Can't map %u bytes (%u/%u)s: %d\n", nob,
+                      tx->tx_nfrags, rd->rd_nfrags, rc);
                return rc;
        }
 
@@ -1020,24 +1021,28 @@ kiblnd_check_sends_locked(struct kib_conn *conn)
 static void
 kiblnd_tx_complete(struct kib_tx *tx, int status)
 {
-        int           failed = (status != IB_WC_SUCCESS);
+       int           failed = (status != IB_WC_SUCCESS);
        struct kib_conn   *conn = tx->tx_conn;
-        int           idle;
+       int           idle;
 
-        LASSERT (tx->tx_sending > 0);
+       if (tx->tx_sending <= 0) {
+               CERROR("Received an event on a freed tx: %p status %d\n",
+                      tx, tx->tx_status);
+               return;
+       }
 
-        if (failed) {
-                if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
+       if (failed) {
+               if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
                        CNETERR("Tx -> %s cookie %#llx"
-                                " sending %d waiting %d: failed %d\n",
-                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
-                                tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
-                                status);
+                               " sending %d waiting %d: failed %d\n",
+                               libcfs_nid2str(conn->ibc_peer->ibp_nid),
+                               tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
+                               status);
 
-                kiblnd_close_conn(conn, -EIO);
-        } else {
-                kiblnd_peer_alive(conn->ibc_peer);
-        }
+               kiblnd_close_conn(conn, -EIO);
+       } else {
+               kiblnd_peer_alive(conn->ibc_peer);
+       }
 
        spin_lock(&conn->ibc_lock);
 
@@ -1288,14 +1293,17 @@ kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn)
        spin_unlock(&conn->ibc_lock);
 }
 
-static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
-                               struct sockaddr_in *srcaddr,
-                               struct sockaddr_in *dstaddr,
-                               int timeout_ms)
+static int
+kiblnd_resolve_addr_cap(struct rdma_cm_id *cmid,
+                       struct sockaddr_in *srcaddr,
+                       struct sockaddr_in *dstaddr,
+                       int timeout_ms)
 {
         unsigned short port;
         int rc;
 
+       LASSERT(capable(CAP_NET_BIND_SERVICE));
+
         /* allow the port to be reused */
         rc = rdma_set_reuseaddr(cmid, 1);
         if (rc != 0) {
@@ -1325,6 +1333,33 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
         return rc;
 }
 
+static int
+kiblnd_resolve_addr(struct rdma_cm_id *cmid,
+                   struct sockaddr_in *srcaddr,
+                   struct sockaddr_in *dstaddr,
+                   int timeout_ms)
+{
+       const struct cred *old_creds = NULL;
+       struct cred *new_creds;
+       int rc;
+
+       if (!capable(CAP_NET_BIND_SERVICE)) {
+               new_creds = prepare_creds();
+               if (!new_creds)
+                       return -ENOMEM;
+
+               cap_raise(new_creds->cap_effective, CAP_NET_BIND_SERVICE);
+               old_creds = override_creds(new_creds);
+       }
+
+       rc = kiblnd_resolve_addr_cap(cmid, srcaddr, dstaddr, timeout_ms);
+
+       if (old_creds)
+               revert_creds(old_creds);
+
+       return rc;
+}
+
 static void
 kiblnd_connect_peer(struct kib_peer_ni *peer_ni)
 {
@@ -2042,15 +2077,12 @@ void
 kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs)
 {
        LIST_HEAD(zombies);
-       struct list_head        *tmp;
-       struct list_head        *nxt;
+       struct kib_tx *nxt;
        struct kib_tx *tx;
 
        spin_lock(&conn->ibc_lock);
 
-       list_for_each_safe(tmp, nxt, txs) {
-               tx = list_entry(tmp, struct kib_tx, tx_list);
-
+       list_for_each_entry_safe(tx, nxt, txs, tx_list) {
                if (txs == &conn->ibc_active_txs) {
                        LASSERT(!tx->tx_queued);
                        LASSERT(tx->tx_waiting ||
@@ -2202,22 +2234,25 @@ kiblnd_connreq_done(struct kib_conn *conn, int status)
                 (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
                  peer_ni->ibp_accepting > 0));
 
-        LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
-        conn->ibc_connvars = NULL;
+       LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
+       conn->ibc_connvars = NULL;
 
-        if (status != 0) {
-                /* failed to establish connection */
-                kiblnd_peer_connect_failed(peer_ni, active, status);
-                kiblnd_finalise_conn(conn);
-                return;
-        }
+       if (status != 0) {
+               /* failed to establish connection */
+               kiblnd_peer_connect_failed(peer_ni, active, status);
+               kiblnd_finalise_conn(conn);
+               return;
+       }
 
-        /* connection established */
+       /* connection established */
        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
+       /* reset retry count */
+       peer_ni->ibp_retries = 0;
+
        conn->ibc_last_send = ktime_get();
-        kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
-        kiblnd_peer_alive(peer_ni);
+       kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
+       kiblnd_peer_alive(peer_ni);
 
        /* Add conn to peer_ni's list and nuke any dangling conns from a different
         * peer_ni instance... */
@@ -2291,7 +2326,11 @@ kiblnd_reject(struct rdma_cm_id *cmid, struct kib_rej *rej)
 {
         int          rc;
 
+#ifdef HAVE_RDMA_REJECT_4ARGS
+       rc = rdma_reject(cmid, rej, sizeof(*rej), IB_CM_REJ_CONSUMER_DEFINED);
+#else
         rc = rdma_reject(cmid, rej, sizeof(*rej));
+#endif
 
         if (rc != 0)
                 CWARN("Error %d sending reject\n", rc);
@@ -2659,10 +2698,15 @@ kiblnd_check_reconnect(struct kib_conn *conn, int version,
                goto out;
        }
 
-        switch (why) {
-        default:
-                reason = "Unknown";
-                break;
+       if (peer_ni->ibp_retries > *kiblnd_tunables.kib_retry_count) {
+               reason = "retry count exceeded due to no listener";
+               goto out;
+       }
+
+       switch (why) {
+       default:
+               reason = "Unknown";
+               break;
 
        case IBLND_REJECT_RDMA_FRAGS: {
                struct lnet_ioctl_config_o2iblnd_tunables *tunables;
@@ -2756,13 +2800,14 @@ kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob)
                                       IBLND_REJECT_CONN_STALE, NULL);
                break;
 
-        case IB_CM_REJ_INVALID_SERVICE_ID:
+       case IB_CM_REJ_INVALID_SERVICE_ID:
+               peer_ni->ibp_retries++;
                kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
                                       IBLND_REJECT_INVALID_SRV_ID, NULL);
-                CNETERR("%s rejected: no listener at %d\n",
-                        libcfs_nid2str(peer_ni->ibp_nid),
-                        *kiblnd_tunables.kib_service);
-                break;
+               CNETERR("%s rejected: no listener at %d\n",
+                       libcfs_nid2str(peer_ni->ibp_nid),
+                       *kiblnd_tunables.kib_service);
+               break;
 
         case IB_CM_REJ_CONSUMER_DEFINED:
                if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) {
@@ -3650,7 +3695,6 @@ kiblnd_scheduler(void *arg)
        unsigned long           flags;
        struct ib_wc            wc;
        int                     did_something;
-       int                     busy_loops = 0;
        int                     rc;
 
        init_waitqueue_entry(&wait, current);
@@ -3668,11 +3712,10 @@ kiblnd_scheduler(void *arg)
        spin_lock_irqsave(&sched->ibs_lock, flags);
 
        while (!kiblnd_data.kib_shutdown) {
-               if (busy_loops++ >= IBLND_RESCHED) {
+               if (need_resched()) {
                        spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
                        cond_resched();
-                       busy_loops = 0;
 
                        spin_lock_irqsave(&sched->ibs_lock, flags);
                }
@@ -3768,7 +3811,6 @@ kiblnd_scheduler(void *arg)
                spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
                schedule();
-               busy_loops = 0;
 
                remove_wait_queue(&sched->ibs_waitq, &wait);
                set_current_state(TASK_RUNNING);