Whamcloud - gitweb
LU-14627 lnet: Ensure ref taken when queueing for discovery
[fs/lustre-release.git] / lnet / lnet / peer.c
index 20ae558..709fe24 100644 (file)
@@ -263,6 +263,7 @@ lnet_peer_alloc(lnet_nid_t nid)
        spin_lock_init(&lp->lp_lock);
        lp->lp_primary_nid = nid;
        lp->lp_disc_src_nid = LNET_NID_ANY;
+       lp->lp_disc_dst_nid = LNET_NID_ANY;
        if (lnet_peers_start_down())
                lp->lp_alive = false;
        else
@@ -1347,7 +1348,13 @@ LNetPrimaryNID(lnet_nid_t nid)
        }
        lp = lpni->lpni_peer_net->lpn_peer;
 
-       while (!lnet_peer_is_uptodate(lp)) {
+       /* If discovery is disabled locally then we needn't bother running
+        * discovery here because discovery will not modify whatever
+        * primary NID is currently set for this peer. If the specified peer is
+        * down then this discovery can introduce long delays into the mount
+        * process, so skip it if it isn't necessary.
+        */
+       while (!lnet_peer_discovery_disabled && !lnet_peer_is_uptodate(lp)) {
                spin_lock(&lp->lp_lock);
                /* force a full discovery cycle */
                lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
@@ -1368,7 +1375,11 @@ LNetPrimaryNID(lnet_nid_t nid)
                }
                lp = lpni->lpni_peer_net->lpn_peer;
 
-               /* Only try once if discovery is disabled */
+               /* If we find that the peer has discovery disabled then we will
+                * not modify whatever primary NID is currently set for this
+                * peer. Thus, we can break out of this loop even if the peer
+                * is not fully up to date.
+                */
                if (lnet_is_discovery_disabled(lp))
                        break;
        }
@@ -2521,6 +2532,7 @@ lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
        spin_lock(&lp->lp_lock);
 
        lp->lp_disc_src_nid = ev->target.nid;
+       lp->lp_disc_dst_nid = ev->source.nid;
 
        /*
         * If some kind of error happened the contents of message
@@ -2553,20 +2565,41 @@ lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
                goto out;
        }
 
-
        /*
         * The peer may have discovery disabled at its end. Set
         * NO_DISCOVERY as appropriate.
         */
-       if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) &&
-           !lnet_peer_discovery_disabled) {
-               CDEBUG(D_NET, "Peer %s has discovery enabled\n",
-                      libcfs_nid2str(lp->lp_primary_nid));
-               lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
-       } else {
+       if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) ||
+           lnet_peer_discovery_disabled) {
                CDEBUG(D_NET, "Peer %s has discovery disabled\n",
                       libcfs_nid2str(lp->lp_primary_nid));
+
+               /* Detect whether this peer has toggled discovery from on to
+                * off and whether we can delete and re-create the peer. Peers
+                * that were manually configured cannot be deleted by discovery.
+                * We need to delete this peer and re-create it if the peer was
+                * not configured manually, is currently considered DD capable,
+                * and either:
+                * 1. We've already discovered the peer (the peer has toggled
+                *    the discovery feature from on to off), or
+                * 2. The peer is considered MR, but it was not user configured
+                *    (this was a "temporary" peer created via the kernel APIs
+                *     that we're discovering for the first time)
+                */
+               if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
+                                     LNET_PEER_NO_DISCOVERY)) &&
+                   (lp->lp_state & (LNET_PEER_DISCOVERED |
+                                    LNET_PEER_MULTI_RAIL))) {
+                       CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
+                              libcfs_nid2str(lp->lp_primary_nid),
+                              lp->lp_state);
+                       lp->lp_state |= LNET_PEER_MARK_DELETION;
+               }
                lp->lp_state |= LNET_PEER_NO_DISCOVERY;
+       } else {
+               CDEBUG(D_NET, "Peer %s has discovery enabled\n",
+                      libcfs_nid2str(lp->lp_primary_nid));
+               lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
        }
 
        /*
@@ -2774,7 +2807,8 @@ static void lnet_discovery_event_handler(struct lnet_event *event)
 
        /* put peer back at end of request queue, if discovery not already
         * done */
-       if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp)) {
+       if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
+           lnet_peer_queue_for_discovery(lp)) {
                list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
                wake_up(&the_lnet.ln_dc_waitq);
        }
@@ -3087,7 +3121,7 @@ __must_hold(&lp->lp_lock)
         * of deleting it.
         */
        if (!list_empty(&lp->lp_dc_list))
-               list_del(&lp->lp_dc_list);
+               list_del_init(&lp->lp_dc_list);
        list_for_each_entry_safe(route, tmp,
                                 &lp->lp_routes,
                                 lr_gwlist)
@@ -3227,8 +3261,10 @@ __must_hold(&lp->lp_lock)
                         * received by lp, we need to set the discovery source
                         * NID for new_lp to the NID stored in lp.
                         */
-                       if (lp->lp_disc_src_nid != LNET_NID_ANY)
+                       if (lp->lp_disc_src_nid != LNET_NID_ANY) {
                                new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
+                               new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
+                       }
                        spin_unlock(&new_lp->lp_lock);
                        spin_unlock(&lp->lp_lock);
 
@@ -3278,41 +3314,10 @@ __must_hold(&lp->lp_lock)
        return rc ? rc : LNET_REDISCOVER_PEER;
 }
 
-/*
- * Select NID to send a Ping or Push to.
- */
-static lnet_nid_t lnet_peer_select_nid(struct lnet_peer *lp)
-{
-       struct lnet_peer_ni *lpni;
-
-       /* Look for a direct-connected NID for this peer. */
-       lpni = NULL;
-       while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
-               if (!lnet_get_net_locked(lpni->lpni_peer_net->lpn_net_id))
-                       continue;
-               break;
-       }
-       if (lpni)
-               return lpni->lpni_nid;
-
-       /* Look for a routed-connected NID for this peer. */
-       lpni = NULL;
-       while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
-               if (!lnet_find_rnet_locked(lpni->lpni_peer_net->lpn_net_id))
-                       continue;
-               break;
-       }
-       if (lpni)
-               return lpni->lpni_nid;
-
-       return LNET_NID_ANY;
-}
-
 /* Active side of ping. */
 static int lnet_peer_send_ping(struct lnet_peer *lp)
 __must_hold(&lp->lp_lock)
 {
-       lnet_nid_t pnid;
        int nnis;
        int rc;
        int cpt;
@@ -3324,12 +3329,11 @@ __must_hold(&lp->lp_lock)
        cpt = lnet_net_lock_current();
        /* Refcount for MD. */
        lnet_peer_addref_locked(lp);
-       pnid = lnet_peer_select_nid(lp);
        lnet_net_unlock(cpt);
 
        nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
 
-       rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp,
+       rc = lnet_send_ping(lp->lp_primary_nid, &lp->lp_ping_mdh, nnis, lp,
                            the_lnet.ln_dc_handler, false);
 
        /*
@@ -3454,18 +3458,17 @@ __must_hold(&lp->lp_lock)
                CERROR("Can't bind push source MD: %d\n", rc);
                goto fail_error;
        }
+
        cpt = lnet_net_lock_current();
        /* Refcount for MD. */
        lnet_peer_addref_locked(lp);
        id.pid = LNET_PID_LUSTRE;
-       id.nid = lnet_peer_select_nid(lp);
+       if (lp->lp_disc_dst_nid != LNET_NID_ANY)
+               id.nid = lp->lp_disc_dst_nid;
+       else
+               id.nid = lp->lp_primary_nid;
        lnet_net_unlock(cpt);
 
-       if (id.nid == LNET_NID_ANY) {
-               rc = -EHOSTUNREACH;
-               goto fail_unlink;
-       }
-
        rc = LNetPut(lp->lp_disc_src_nid, lp->lp_push_mdh,
                     LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
                     LNET_PROTO_PING_MATCHBITS, 0, 0);
@@ -3477,6 +3480,7 @@ __must_hold(&lp->lp_lock)
         * scratch
         */
        lp->lp_disc_src_nid = LNET_NID_ANY;
+       lp->lp_disc_dst_nid = LNET_NID_ANY;
 
        if (rc)
                goto fail_unlink;
@@ -4015,6 +4019,8 @@ int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
                  atomic_read(&lpni->lpni_hstats.hlt_remote_error);
                lpni_hstats->hlpni_health_value =
                  atomic_read(&lpni->lpni_healthv);
+               lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
+               lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
                if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
                        goto out_free_hstats;
                bulk += sizeof(*lpni_hstats);
@@ -4109,7 +4115,7 @@ lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
                        lnet_net_unlock(LNET_LOCK_EX);
                        return;
                }
-               atomic_set(&lpni->lpni_healthv, value);
+               lnet_set_lpni_healthv_locked(lpni, value);
                lnet_peer_ni_add_to_recoveryq_locked(lpni,
                                             &the_lnet.ln_mt_peerNIRecovq, now);
                lnet_peer_ni_decref_locked(lpni);
@@ -4130,7 +4136,8 @@ lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
                        list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
                                list_for_each_entry(lpni, &lpn->lpn_peer_nis,
                                                    lpni_peer_nis) {
-                                       atomic_set(&lpni->lpni_healthv, value);
+                                       lnet_set_lpni_healthv_locked(lpni,
+                                                                    value);
                                        lnet_peer_ni_add_to_recoveryq_locked(lpni,
                                             &the_lnet.ln_mt_peerNIRecovq, now);
                                }