goto out;
}
-
/*
* The peer may have discovery disabled at its end. Set
* NO_DISCOVERY as appropriate.
*/
- if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) &&
- !lnet_peer_discovery_disabled) {
- CDEBUG(D_NET, "Peer %s has discovery enabled\n",
- libcfs_nid2str(lp->lp_primary_nid));
- lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
- } else {
+ if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) ||
+ lnet_peer_discovery_disabled) {
CDEBUG(D_NET, "Peer %s has discovery disabled\n",
libcfs_nid2str(lp->lp_primary_nid));
+
+ /* Detect whether this peer has toggled discovery from on to
+ * off and whether we can delete and re-create the peer. Peers
+ * that were manually configured cannot be deleted by discovery.
+ * We need to delete this peer and re-create it if the peer was
+ * not configured manually, is currently considered DD capable,
+ * and either:
+ * 1. We've already discovered the peer (the peer has toggled
+ * the discovery feature from on to off), or
+ * 2. The peer is considered MR, but it was not user configured
+ * (this was a "temporary" peer created via the kernel APIs
+ * that we're discovering for the first time)
+ */
+ if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
+ LNET_PEER_NO_DISCOVERY)) &&
+ (lp->lp_state & (LNET_PEER_DISCOVERED |
+ LNET_PEER_MULTI_RAIL))) {
+ CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
+ libcfs_nid2str(lp->lp_primary_nid),
+ lp->lp_state);
+ lp->lp_state |= LNET_PEER_MARK_DELETION;
+ }
lp->lp_state |= LNET_PEER_NO_DISCOVERY;
+ } else {
+ CDEBUG(D_NET, "Peer %s has discovery enabled\n",
+ libcfs_nid2str(lp->lp_primary_nid));
+ lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
}
/*
/* put peer back at end of request queue, if discovery not already
* done */
- if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp)) {
+ if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
+ lnet_peer_queue_for_discovery(lp)) {
list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
wake_up(&the_lnet.ln_dc_waitq);
}
* of deleting it.
*/
if (!list_empty(&lp->lp_dc_list))
- list_del(&lp->lp_dc_list);
+ list_del_init(&lp->lp_dc_list);
list_for_each_entry_safe(route, tmp,
&lp->lp_routes,
lr_gwlist)
atomic_read(&lpni->lpni_hstats.hlt_remote_error);
lpni_hstats->hlpni_health_value =
atomic_read(&lpni->lpni_healthv);
+ lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
+ lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
goto out_free_hstats;
bulk += sizeof(*lpni_hstats);
lnet_net_unlock(LNET_LOCK_EX);
return;
}
- atomic_set(&lpni->lpni_healthv, value);
+ lnet_set_lpni_healthv_locked(lpni, value);
lnet_peer_ni_add_to_recoveryq_locked(lpni,
&the_lnet.ln_mt_peerNIRecovq, now);
lnet_peer_ni_decref_locked(lpni);
list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
list_for_each_entry(lpni, &lpn->lpn_peer_nis,
lpni_peer_nis) {
- atomic_set(&lpni->lpni_healthv, value);
+ lnet_set_lpni_healthv_locked(lpni,
+ value);
lnet_peer_ni_add_to_recoveryq_locked(lpni,
&the_lnet.ln_mt_peerNIRecovq, now);
}