init_waitqueue_head(&lp->lp_dc_waitq);
spin_lock_init(&lp->lp_lock);
lp->lp_primary_nid = nid;
+
+ /*
+ * all peers created on a router should have health on
+ * if it's not already on.
+ */
+ if (the_lnet.ln_routing && !lnet_health_sensitivity)
+ lp->lp_health_sensitivity = 1;
+
/*
* Turn off discovery for loopback peer. If you're creating a peer
* for the loopback interface then that was initiated when we
return lp;
}
+struct lnet_peer_net *
+lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
+{
+ struct lnet_peer_net *net;
+
+ if (!prev_lpn_id) {
+ /* no net id provided return the first net */
+ net = list_first_entry_or_null(&lp->lp_peer_nets,
+ struct lnet_peer_net,
+ lpn_peer_nets);
+
+ return net;
+ }
+
+ /* find the net after the one provided */
+ list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
+ if (net->lpn_net_id == prev_lpn_id) {
+ /*
+ * if we reached the end of the list loop to the
+ * beginning.
+ */
+ if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
+ return list_first_entry_or_null(&lp->lp_peer_nets,
+ struct lnet_peer_net,
+ lpn_peer_nets);
+ else
+ return list_next_entry(net, lpn_peer_nets);
+ }
+ }
+
+ return NULL;
+}
+
struct lnet_peer_ni *
lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
struct lnet_peer_net *peer_net,
int cpt;
lnet_net_lock(LNET_LOCK_EX);
+ if (lnet_peer_discovery_disabled)
+ force = 0;
lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
for (cpt = 0; cpt < lncpt; cpt++) {
ptable = the_lnet.ln_peer_tables[cpt];
return primary_nid;
}
+bool
+lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
+{
+ if (lnet_peer_discovery_disabled)
+ return true;
+
+ if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
+ (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Peer Discovery
+ */
+bool
+lnet_is_discovery_disabled(struct lnet_peer *lp)
+{
+ bool rc = false;
+
+ spin_lock(&lp->lp_lock);
+ rc = lnet_is_discovery_disabled_locked(lp);
+ spin_unlock(&lp->lp_lock);
+
+ return rc;
+}
+
lnet_nid_t
LNetPrimaryNID(lnet_nid_t nid)
{
goto out_unlock;
}
lp = lpni->lpni_peer_net->lpn_peer;
+
while (!lnet_peer_is_uptodate(lp)) {
rc = lnet_discover_peer_locked(lpni, cpt, true);
if (rc)
goto out_decref;
lp = lpni->lpni_peer_net->lpn_peer;
+
+ /* Only try once if discovery is disabled */
+ if (lnet_is_discovery_disabled(lp))
+ break;
}
primary_nid = lp->lp_primary_nid;
out_decref:
struct lnet_peer *lp;
struct lnet_peer_net *lpn;
struct lnet_peer_ni *lpni;
- unsigned flags = 0;
+ /*
+ * Assume peer is Multi-Rail capable and let discovery find out
+ * otherwise.
+ */
+ unsigned flags = LNET_PEER_MULTI_RAIL;
int rc = 0;
if (nid == LNET_NID_ANY) {
return lpni;
}
-/*
- * Peer Discovery
- */
-
bool
lnet_peer_gw_discovery(struct lnet_peer *lp)
{
return rc;
}
+bool
+lnet_peer_is_uptodate(struct lnet_peer *lp)
+{
+ bool rc;
+
+ spin_lock(&lp->lp_lock);
+ rc = lnet_peer_is_uptodate_locked(lp);
+ spin_unlock(&lp->lp_lock);
+ return rc;
+}
+
/*
* Is a peer uptodate from the point of view of discovery?
*
* Otherwise look at whether the peer needs rediscovering.
*/
bool
-lnet_peer_is_uptodate(struct lnet_peer *lp)
+lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
+__must_hold(&lp->lp_lock)
{
bool rc;
- spin_lock(&lp->lp_lock);
if (lp->lp_state & (LNET_PEER_DISCOVERING |
LNET_PEER_FORCE_PING |
LNET_PEER_FORCE_PUSH)) {
rc = false;
- } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
- rc = true;
} else if (lp->lp_state & LNET_PEER_REDISCOVER) {
- if (lnet_peer_discovery_disabled)
- rc = true;
- else
- rc = false;
+ rc = false;
} else if (lnet_peer_needs_push(lp)) {
rc = false;
} else if (lp->lp_state & LNET_PEER_DISCOVERED) {
} else {
rc = false;
}
- spin_unlock(&lp->lp_lock);
return rc;
}
DEFINE_WAIT(wait);
struct lnet_peer *lp;
int rc = 0;
+ int count = 0;
again:
lnet_net_unlock(cpt);
* zombie if we race with DLC, so we must check for that.
*/
for (;;) {
+ /* Keep lp alive when the lnet_net_lock is unlocked */
+ lnet_peer_addref_locked(lp);
prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
if (signal_pending(current))
break;
if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
break;
+ /*
+ * Don't repeat discovery if discovery is disabled. This is
+ * done to ensure we can use discovery as a standard ping as
+ * well for backwards compatibility with routers which do not
+ * have discovery or have discovery disabled
+ */
+ if (lnet_is_discovery_disabled(lp) && count > 0)
+ break;
if (lp->lp_dc_error)
break;
if (lnet_peer_is_uptodate(lp))
break;
lnet_peer_queue_for_discovery(lp);
+ count++;
+ CDEBUG(D_NET, "Discovery attempt # %d\n", count);
+
/*
- * if caller requested a non-blocking operation then
- * return immediately. Once discovery is complete then the
- * peer ref will be decremented and any pending messages
- * that were stopped due to discovery will be transmitted.
+ * If caller requested a non-blocking operation then
+ * return immediately. Once discovery is complete any
+ * pending messages that were stopped due to discovery
+ * will be transmitted.
*/
if (!block)
break;
- lnet_peer_addref_locked(lp);
lnet_net_unlock(LNET_LOCK_EX);
schedule();
finish_wait(&lp->lp_dc_waitq, &wait);
lnet_net_unlock(LNET_LOCK_EX);
lnet_net_lock(cpt);
-
+ lnet_peer_decref_locked(lp);
/*
- * If the peer has changed after we've discovered the older peer,
- * then we need to discovery the new peer to make sure the
- * interface information is up to date
+ * The peer may have changed, so re-check and rediscover if that turns
+ * out to have been the case. The reference count on lp ensured that
+ * even if it was unlinked from lpni the memory could not be recycled.
+ * Thus the check below is sufficient to determine whether the peer
+ * changed. If the peer changed, then lp must not be dereferenced.
*/
if (lp != lpni->lpni_peer_net->lpn_peer)
goto again;
rc = lp->lp_dc_error;
else if (!block)
CDEBUG(D_NET, "non-blocking discovery\n");
- else if (!lnet_peer_is_uptodate(lp))
+ else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
goto again;
CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
goto out;
}
+
+ /*
+ * Only enable the multi-rail feature on the peer if both sides of
+ * the connection have discovery on
+ */
+ if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
+ CDEBUG(D_NET, "Peer %s has Multi-Rail feature enabled\n",
+ libcfs_nid2str(lp->lp_primary_nid));
+ lp->lp_state |= LNET_PEER_MULTI_RAIL;
+ } else {
+ CDEBUG(D_NET, "Peer %s has Multi-Rail feature disabled\n",
+ libcfs_nid2str(lp->lp_primary_nid));
+ lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
+ }
+
+ /*
+ * The peer may have discovery disabled at its end. Set
+ * NO_DISCOVERY as appropriate.
+ */
+ if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) &&
+ !lnet_peer_discovery_disabled) {
+ CDEBUG(D_NET, "Peer %s has discovery enabled\n",
+ libcfs_nid2str(lp->lp_primary_nid));
+ lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
+ } else {
+ CDEBUG(D_NET, "Peer %s has discovery disabled\n",
+ libcfs_nid2str(lp->lp_primary_nid));
+ lp->lp_state |= LNET_PEER_NO_DISCOVERY;
+ }
+
/*
* Update the MULTI_RAIL flag based on the reply. If the peer
* was configured with DLC then the setting should match what
CWARN("Reply says %s is Multi-Rail, DLC says not\n",
libcfs_nid2str(lp->lp_primary_nid));
} else {
- lp->lp_state |= LNET_PEER_MULTI_RAIL;
- lnet_peer_clr_non_mr_pref_nids(lp);
+ /*
+ * if discovery is disabled then we don't want to
+ * update the state of the peer. All we'll do is
+ * update the peer_nis which were reported back in
+ * the initial ping
+ */
+
+ if (!lnet_is_discovery_disabled_locked(lp)) {
+ lp->lp_state |= LNET_PEER_MULTI_RAIL;
+ lnet_peer_clr_non_mr_pref_nids(lp);
+ }
}
} else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
if (lp->lp_state & LNET_PEER_CONFIGURED) {
lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
/*
- * The peer may have discovery disabled at its end. Set
- * NO_DISCOVERY as appropriate.
- */
- if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
- CDEBUG(D_NET, "Peer %s has discovery disabled\n",
- libcfs_nid2str(lp->lp_primary_nid));
- lp->lp_state |= LNET_PEER_NO_DISCOVERY;
- } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
- CDEBUG(D_NET, "Peer %s has discovery enabled\n",
- libcfs_nid2str(lp->lp_primary_nid));
- lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
- }
-
- /*
* Check for truncation of the Reply. Clear PING_SENT and set
* PING_FAILED to trigger a retry.
*/
}
/* We're happy with the state of the data in the buffer. */
- CDEBUG(D_NET, "peer %s data present %u\n",
- libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno);
+ CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
+ libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno, lp->lp_state);
if (lp->lp_state & LNET_PEER_DATA_PRESENT)
lnet_ping_buffer_decref(lp->lp_data);
else
out:
lp->lp_state &= ~LNET_PEER_PING_SENT;
spin_unlock(&lp->lp_lock);
+
+ lnet_net_lock(LNET_LOCK_EX);
+ /*
+ * If this peer is a gateway, call the routing callback to
+ * handle the ping reply
+ */
+ if (lp->lp_rtr_refcount > 0)
+ lnet_router_discovery_ping_reply(lp);
+ lnet_net_unlock(LNET_LOCK_EX);
}
/*
delnis[ndelnis++] = curnis[i];
}
+ /*
+ * If we get here and the discovery is disabled then we don't want
+ * to add or delete any NIs. We just updated the ones we have some
+ * information on, and call it a day
+ */
+ rc = 0;
+ if (lnet_is_discovery_disabled(lp))
+ goto out;
+
for (i = 0; i < naddnis; i++) {
rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
if (rc) {
LIBCFS_FREE(addnis, nnis * sizeof(*addnis));
LIBCFS_FREE(delnis, nnis * sizeof(*delnis));
lnet_ping_buffer_decref(pbuf);
- CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
+ CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
if (rc) {
spin_lock(&lp->lp_lock);
return 0;
}
+static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
+{
+ int i;
+
+ for (i = 0; i < pinfo->pi_nnis; i++) {
+ if (pinfo->pi_ni[i].ns_nid == nid)
+ return true;
+ }
+
+ return false;
+}
+
/*
* Update a peer using the data received.
*/
rc = lnet_peer_set_primary_nid(lp, nid, flags);
if (!rc)
rc = lnet_peer_merge_data(lp, pbuf);
- } else if (lp->lp_primary_nid == nid) {
+ /*
+ * if the primary nid of the peer is present in the ping info returned
+ * from the peer, but it's not the local primary peer we have
+ * cached and discovery is disabled, then we don't want to update
+ * our local peer info, by adding or removing NIDs, we just want
+ * to update the status of the nids that we currently have
+ * recorded in that peer.
+ */
+ } else if (lp->lp_primary_nid == nid ||
+ (lnet_is_nid_in_ping_info(lp->lp_primary_nid, &pbuf->pb_info) &&
+ lnet_is_discovery_disabled(lp))) {
rc = lnet_peer_merge_data(lp, pbuf);
} else {
lpni = lnet_find_peer_ni_locked(nid);
} else {
struct lnet_peer *new_lp;
new_lp = lpni->lpni_peer_net->lpn_peer;
+ /*
+ * if lp has discovery/MR enabled that means new_lp
+ * should have discovery/MR enabled as well, since
+ * it's the same peer, which we're about to merge
+ */
+ if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
+ new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
+ if (lp->lp_state & LNET_PEER_MULTI_RAIL)
+ new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
+
rc = lnet_peer_set_primary_data(new_lp, pbuf);
lnet_consolidate_routes_locked(lp, new_lp);
lnet_peer_ni_decref_locked(lpni);
}
}
out:
- CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
+ CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc,
+ lp->lp_state);
mutex_unlock(&the_lnet.ln_api_mutex);
spin_lock(&lp->lp_lock);
LNetMDUnlink(lp->lp_push_mdh);
LNetInvalidateMDHandle(&lp->lp_push_mdh);
fail_error:
- CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
+ CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
/*
* The errors that get us here are considered hard errors and
* cause Discovery to terminate. So we clear PUSH_SENT, but do
return 0;
}
-/*
- * Mark the peer as to be rediscovered.
- */
-static int lnet_peer_rediscover(struct lnet_peer *lp)
-__must_hold(&lp->lp_lock)
-{
- lp->lp_state |= LNET_PEER_REDISCOVER;
- lp->lp_state &= ~LNET_PEER_DISCOVERING;
-
- CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
-
- return 0;
-}
/*
* Discovering this peer is taking too long. Cancel any Ping or Push
struct lnet_peer *lp;
int rc;
+ wait_for_completion(&the_lnet.ln_started);
+
CDEBUG(D_NET, "started\n");
cfs_block_allsigs();
* forcing a Ping or Push.
*/
spin_lock(&lp->lp_lock);
- CDEBUG(D_NET, "peer %s state %#x\n",
- libcfs_nid2str(lp->lp_primary_nid),
+ CDEBUG(D_NET, "peer %s(%p) state %#x\n",
+ libcfs_nid2str(lp->lp_primary_nid), lp,
lp->lp_state);
if (lp->lp_state & LNET_PEER_DATA_PRESENT)
rc = lnet_peer_data_present(lp);
rc = lnet_peer_send_ping(lp);
else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
rc = lnet_peer_send_push(lp);
- else if (lnet_peer_discovery_disabled)
- rc = lnet_peer_rediscover(lp);
else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
rc = lnet_peer_send_ping(lp);
else if (lnet_peer_needs_push(lp))
rc = lnet_peer_send_push(lp);
else
rc = lnet_peer_discovered(lp);
- CDEBUG(D_NET, "peer %s state %#x rc %d\n",
- libcfs_nid2str(lp->lp_primary_nid),
+ CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
+ libcfs_nid2str(lp->lp_primary_nid), lp,
lp->lp_state, rc);
spin_unlock(&lp->lp_lock);
LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
- wake_up(&the_lnet.ln_dc_waitq);
+
+ /* In the LNetNIInit() path we may be stopping discovery before it
+ * entered its work loop
+ */
+ if (!completion_done(&the_lnet.ln_started))
+ complete(&the_lnet.ln_started);
+ else
+ wake_up(&the_lnet.ln_dc_waitq);
wait_event(the_lnet.ln_dc_waitq,
the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);