int cpt)
{
struct lnet_peer *peer;
+ struct lnet_peer_ni *new_lpni;
int rc;
lnet_peer_ni_addref_locked(lpni);
lnet_peer_ni_decref_locked(lpni);
return rc;
}
- /* The peer may have changed. */
- peer = lpni->lpni_peer_net->lpn_peer;
+
+ new_lpni = lnet_find_peer_ni_locked(lpni->lpni_nid);
+ if (!new_lpni) {
+ lnet_peer_ni_decref_locked(lpni);
+ return -ENOENT;
+ }
+
+ peer = new_lpni->lpni_peer_net->lpn_peer;
spin_lock(&peer->lp_lock);
- if (lnet_peer_is_uptodate_locked(peer)) {
+ if (lpni == new_lpni && lnet_peer_is_uptodate_locked(peer)) {
+ /* The peer NI did not change and the peer is up to date.
+ * Nothing more to do.
+ */
spin_unlock(&peer->lp_lock);
lnet_peer_ni_decref_locked(lpni);
+ lnet_peer_ni_decref_locked(new_lpni);
return 0;
}
- /* queue message and return */
+ spin_unlock(&peer->lp_lock);
+
+ /* Either the peer NI changed during discovery, or the peer isn't up
+ * to date. In both cases we want to queue the message on the
+ * (possibly new) peer's pending queue and queue the peer for discovery
+ */
msg->msg_sending = 0;
msg->msg_txpeer = NULL;
- list_add_tail(&msg->msg_list, &peer->lp_dc_pendq);
- spin_unlock(&peer->lp_lock);
+ lnet_net_unlock(cpt);
+ lnet_peer_queue_message(peer, msg);
+ lnet_net_lock(cpt);
lnet_peer_ni_decref_locked(lpni);
+ lnet_peer_ni_decref_locked(new_lpni);
CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
msg, libcfs_nid2str(peer->lp_primary_nid));
lnet_nid_t nid;
int healthv;
int rc;
+ time64_t now;
/*
* Always use cpt 0 for locking across all interactions with
&local_queue);
lnet_net_unlock(0);
+ now = ktime_get_seconds();
+
list_for_each_entry_safe(lpni, tmp, &local_queue,
lpni_recovery) {
/*
}
lpni->lpni_recovery_ping_mdh = mdh;
- /*
- * While we're unlocked the lpni could've been
- * readded on the recovery queue. In this case we
- * don't need to add it to the local queue, since
- * it's already on there and the thread that added
- * it would've incremented the refcount on the
- * peer, which means we need to decref the refcount
- * that was implicitly grabbed by find_peer_ni_locked.
- * Otherwise, if the lpni is still not on
- * the recovery queue, then we'll add it to the
- * processed list.
- */
- if (list_empty(&lpni->lpni_recovery))
- list_add_tail(&lpni->lpni_recovery, &processed_list);
- else
- lnet_peer_ni_decref_locked(lpni);
- lnet_net_unlock(0);
- spin_lock(&lpni->lpni_lock);
- if (rc)
+ lnet_peer_ni_add_to_recoveryq_locked(lpni,
+ &processed_list,
+ now);
+ if (rc) {
+ spin_lock(&lpni->lpni_lock);
lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
- }
- spin_unlock(&lpni->lpni_lock);
+ spin_unlock(&lpni->lpni_lock);
+ }
+
+ /* Drop the ref taken by lnet_find_peer_ni_locked() */
+ lnet_peer_ni_decref_locked(lpni);
+ lnet_net_unlock(0);
+ } else
+ spin_unlock(&lpni->lpni_lock);
}
list_splice_init(&processed_list, &local_queue);
goto drop;
}
- if (lnet_drop_asym_route && for_me &&
- LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
- struct lnet_net *net;
- struct lnet_remotenet *rnet;
- bool found = true;
-
- /* we are dealing with a routed message,
- * so see if route to reach src_nid goes through from_nid
- */
- lnet_net_lock(cpt);
- net = lnet_get_net_locked(LNET_NIDNET(ni->ni_nid));
- if (!net) {
- lnet_net_unlock(cpt);
- CERROR("net %s not found\n",
- libcfs_net2str(LNET_NIDNET(ni->ni_nid)));
- return -EPROTO;
- }
-
- rnet = lnet_find_rnet_locked(LNET_NIDNET(src_nid));
- if (rnet) {
- struct lnet_peer *gw = NULL;
- struct lnet_peer_ni *lpni = NULL;
- struct lnet_route *route;
-
- list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
- found = false;
- gw = route->lr_gateway;
- if (route->lr_lnet != net->net_id)
- continue;
- /*
- * if the nid is one of the gateway's NIDs
- * then this is a valid gateway
- */
- while ((lpni = lnet_get_next_peer_ni_locked(gw,
- NULL, lpni)) != NULL) {
- if (lpni->lpni_nid == from_nid) {
- found = true;
- break;
- }
- }
- }
- }
- lnet_net_unlock(cpt);
- if (!found) {
- /* we would not use from_nid to route a message to
- * src_nid
- * => asymmetric routing detected but forbidden
- */
- CERROR("%s, src %s: Dropping asymmetrical route %s\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid), lnet_msgtyp2str(type));
- goto drop;
- }
- }
-
msg = lnet_msg_alloc();
if (msg == NULL) {
CERROR("%s, src %s: Dropping %s (out of memory)\n",
goto drop;
}
- if (the_lnet.ln_routing)
- lpni->lpni_last_alive = ktime_get_seconds();
+ if (lnet_drop_asym_route && for_me &&
+ LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
+ __u32 src_net_id = LNET_NIDNET(src_nid);
+ struct lnet_peer *gw = lpni->lpni_peer_net->lpn_peer;
+ struct lnet_route *route;
+ bool found = false;
+
+ list_for_each_entry(route, &gw->lp_routes, lr_gwlist) {
+ if (route->lr_net == src_net_id) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ lnet_net_unlock(cpt);
+ /* we would not use from_nid to route a message to
+ * src_nid
+ * => asymmetric routing detected but forbidden
+ */
+ CERROR("%s, src %s: Dropping asymmetrical route %s\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid), lnet_msgtyp2str(type));
+ lnet_msg_free(msg);
+ goto drop;
+ }
+ }
+
+ lpni->lpni_last_alive = ktime_get_seconds();
msg->msg_rxpeer = lpni;
msg->msg_rxni = ni;