*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lnet/lnet/lib-move.c
*
msg->msg_hdr.payload_length = cpu_to_le32(len);
}
-static void
+void
lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
{
void *priv = msg->msg_private;
LASSERT(!do_send || msg->msg_tx_delayed);
LASSERT(!msg->msg_receiving);
LASSERT(msg->msg_tx_committed);
+
/* can't get here if we're sending to the loopback interface */
- LASSERT(lp->lpni_nid != the_lnet.ln_loni->ni_nid);
+ if (the_lnet.ln_loni)
+ LASSERT(lp->lpni_nid != the_lnet.ln_loni->ni_nid);
/* NB 'lp' is always the next hop */
if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
}
}
+ if (unlikely(!list_empty(&the_lnet.ln_delay_rules)) &&
+ lnet_delay_rule_match_locked(&msg->msg_hdr, msg)) {
+ msg->msg_tx_delayed = 1;
+ return LNET_CREDIT_WAIT;
+ }
+
/* unset the tx_delay flag as we're going to send it now */
msg->msg_tx_delayed = 0;
struct lnet_msg *msg = sd->sd_msg;
int cpt = sd->sd_cpt;
+ if (the_lnet.ln_state != LNET_STATE_RUNNING)
+ return -ESHUTDOWN;
+
/* No send credit hassles with LOLND */
lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
msg->msg_hdr.dest_nid = cpu_to_le64(the_lnet.ln_loni->ni_nid);
__u32 routing = send_case & REMOTE_DST;
struct lnet_rsp_tracker *rspt;
- /*
- * Increment sequence number of the selected peer so that we
- * pick the next one in Round Robin.
+ /* Increment sequence number of the selected peer, peer net,
+ * local ni and local net so that we pick the next ones
+ * in Round Robin.
*/
best_lpni->lpni_seq++;
+ best_lpni->lpni_peer_net->lpn_seq++;
+ best_ni->ni_seq++;
+ best_ni->ni_net->net_seq++;
+
+ CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n",
+ libcfs_nid2str(best_ni->ni_nid),
+ best_ni->ni_seq, best_ni->ni_net->net_seq,
+ atomic_read(&best_ni->ni_tx_credits),
+ best_ni->ni_sel_priority,
+ libcfs_nid2str(best_lpni->lpni_nid),
+ best_lpni->lpni_seq, best_lpni->lpni_peer_net->lpn_seq,
+ best_lpni->lpni_txcredits,
+ best_lpni->lpni_sel_priority);
/*
* grab a reference on the peer_ni so it sticks around even if
lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
struct lnet_peer *peer,
struct lnet_peer_net *peer_net,
- int cpt,
- bool incr_seq)
+ int cpt)
{
struct lnet_net *local_net;
struct lnet_ni *best_ni;
best_ni = lnet_get_best_ni(local_net, cur_best_ni,
peer, peer_net, cpt);
- if (incr_seq && best_ni)
- best_ni->ni_seq++;
-
return best_ni;
}
int cpt)
{
struct lnet_peer *peer;
+ struct lnet_peer_ni *new_lpni;
int rc;
lnet_peer_ni_addref_locked(lpni);
lnet_peer_ni_decref_locked(lpni);
return rc;
}
- /* The peer may have changed. */
- peer = lpni->lpni_peer_net->lpn_peer;
+
+ new_lpni = lnet_find_peer_ni_locked(lpni->lpni_nid);
+ if (!new_lpni) {
+ lnet_peer_ni_decref_locked(lpni);
+ return -ENOENT;
+ }
+
+ peer = new_lpni->lpni_peer_net->lpn_peer;
spin_lock(&peer->lp_lock);
- if (lnet_peer_is_uptodate_locked(peer)) {
+ if (lpni == new_lpni && lnet_peer_is_uptodate_locked(peer)) {
+ /* The peer NI did not change and the peer is up to date.
+ * Nothing more to do.
+ */
spin_unlock(&peer->lp_lock);
lnet_peer_ni_decref_locked(lpni);
+ lnet_peer_ni_decref_locked(new_lpni);
return 0;
}
- /* queue message and return */
+ spin_unlock(&peer->lp_lock);
+
+ /* Either the peer NI changed during discovery, or the peer isn't up
+ * to date. In both cases we want to queue the message on the
+ * (possibly new) peer's pending queue and queue the peer for discovery
+ */
msg->msg_sending = 0;
msg->msg_txpeer = NULL;
- list_add_tail(&msg->msg_list, &peer->lp_dc_pendq);
- spin_unlock(&peer->lp_lock);
+ lnet_net_unlock(cpt);
+ lnet_peer_queue_message(peer, msg);
+ lnet_net_lock(cpt);
lnet_peer_ni_decref_locked(lpni);
+ lnet_peer_ni_decref_locked(new_lpni);
CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
msg, libcfs_nid2str(peer->lp_primary_nid));
struct lnet_peer **gw_peer)
{
int rc;
- __u32 local_lnet;
struct lnet_peer *gw;
struct lnet_peer *lp;
struct lnet_peer_net *lpn;
if (gwni) {
gw = gwni->lpni_peer_net->lpn_peer;
lnet_peer_ni_decref_locked(gwni);
- if (gw->lp_rtr_refcount) {
- local_lnet = LNET_NIDNET(sd->sd_rtr_nid);
+ if (gw->lp_rtr_refcount)
route_found = true;
- }
} else {
CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n",
libcfs_nid2str(sd->sd_rtr_nid));
gw = best_route->lr_gateway;
LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
- local_lnet = best_route->lr_lnet;
}
/*
* This means we might delay the message until discovery has
* completed
*/
- sd->sd_msg->msg_src_nid_param = sd->sd_src_nid;
rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt);
if (rc)
return rc;
- if (!sd->sd_best_ni)
- sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw,
- lnet_peer_get_net_locked(gw,
- local_lnet),
- sd->sd_md_cpt,
- true);
-
if (!sd->sd_best_ni) {
- CERROR("Internal Error. Expected local ni on %s but non found :%s\n",
- libcfs_net2str(local_lnet),
- libcfs_nid2str(sd->sd_src_nid));
- return -EFAULT;
+ lpn = gwni->lpni_peer_net;
+ sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw, lpn,
+ sd->sd_md_cpt);
+ if (!sd->sd_best_ni) {
+ CERROR("Internal Error. Expected local ni on %s but non found: %s\n",
+ libcfs_net2str(lpn->lpn_net_id),
+ libcfs_nid2str(sd->sd_src_nid));
+ return -EFAULT;
+ }
}
*gw_lpni = gwni;
lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
bool discovery)
{
- struct lnet_peer_net *peer_net = NULL;
+ struct lnet_peer_net *lpn = NULL;
+ struct lnet_peer_net *best_lpn = NULL;
+ struct lnet_net *net = NULL;
+ struct lnet_net *best_net = NULL;
struct lnet_ni *best_ni = NULL;
- int lpn_healthv = 0;
+ int best_lpn_healthv = 0;
+ int best_net_healthv = 0;
+ int net_healthv;
+ __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
+ __u32 lpn_sel_prio;
+ __u32 best_net_sel_prio = LNET_MAX_SELECTION_PRIORITY;
+ __u32 net_sel_prio;
+ bool exit = false;
/*
* The peer can have multiple interfaces, some of them can be on
*/
/* go through all the peer nets and find the best_ni */
- list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
+ list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) {
/*
* The peer's list of nets can contain non-local nets. We
* want to only examine the local ones.
*/
- if (!lnet_get_net_locked(peer_net->lpn_net_id))
- continue;
-
- /* always select the lpn with the best health */
- if (lpn_healthv <= peer_net->lpn_healthv)
- lpn_healthv = peer_net->lpn_healthv;
- else
+ net = lnet_get_net_locked(lpn->lpn_net_id);
+ if (!net)
continue;
- best_ni = lnet_find_best_ni_on_spec_net(best_ni, peer, peer_net,
- md_cpt, false);
+ lpn_sel_prio = lpn->lpn_sel_priority;
+ net_healthv = lnet_get_net_healthv_locked(net);
+ net_sel_prio = net->net_sel_priority;
/*
* if this is a discovery message and lp_disc_net_id is
* specified then use that net to send the discovery on.
*/
- if (peer->lp_disc_net_id == peer_net->lpn_net_id &&
- discovery)
+ if (peer->lp_disc_net_id == lpn->lpn_net_id &&
+ discovery) {
+ exit = true;
+ goto select_lpn;
+ }
+
+ if (!best_lpn)
+ goto select_lpn;
+
+ /* always select the lpn with the best health */
+ if (best_lpn_healthv > lpn->lpn_healthv)
+ continue;
+ else if (best_lpn_healthv < lpn->lpn_healthv)
+ goto select_lpn;
+
+ /* select the preferred peer and local nets */
+ if (best_lpn_sel_prio < lpn_sel_prio)
+ continue;
+ else if (best_lpn_sel_prio > lpn_sel_prio)
+ goto select_lpn;
+
+ if (best_net_healthv > net_healthv)
+ continue;
+ else if (best_net_healthv < net_healthv)
+ goto select_lpn;
+
+ if (best_net_sel_prio < net_sel_prio)
+ continue;
+ else if (best_net_sel_prio > net_sel_prio)
+ goto select_lpn;
+
+ if (best_lpn->lpn_seq < lpn->lpn_seq)
+ continue;
+ else if (best_lpn->lpn_seq > lpn->lpn_seq)
+ goto select_lpn;
+
+ /* round robin over the local networks */
+ if (best_net->net_seq <= net->net_seq)
+ continue;
+
+select_lpn:
+ best_net_healthv = net_healthv;
+ best_net_sel_prio = net_sel_prio;
+ best_lpn_healthv = lpn->lpn_healthv;
+ best_lpn_sel_prio = lpn_sel_prio;
+ best_lpn = lpn;
+ best_net = net;
+
+ if (exit)
break;
}
- if (best_ni)
- /* increment sequence number so we can round robin */
- best_ni->ni_seq++;
+ if (best_lpn) {
+ /* Select the best NI on the same net as best_lpn chosen
+ * above
+ */
+ best_ni = lnet_find_best_ni_on_spec_net(NULL, peer,
+ best_lpn, md_cpt);
+ }
return best_ni;
}
best_ni =
lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
sd->sd_best_lpni->lpni_peer_net,
- sd->sd_md_cpt, true);
+ sd->sd_md_cpt);
/* If there is no best_ni we don't have a route */
if (!best_ni) {
CERROR("no path to %s from net %s\n",
sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL,
sd->sd_peer,
sd->sd_best_lpni->lpni_peer_net,
- sd->sd_md_cpt,
- true);
+ sd->sd_md_cpt);
if (!sd->sd_best_ni) {
CERROR("Unable to forward message to %s. No local NI available\n",
libcfs_nid2str(sd->sd_dst_nid));
sd->sd_best_ni =
lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
sd->sd_best_lpni->lpni_peer_net,
- sd->sd_md_cpt, true);
+ sd->sd_md_cpt);
if (!sd->sd_best_ni) {
/*
lnet_nid_t nid;
int healthv;
int rc;
+ time64_t now;
/*
* splice the recovery queue on a local queue. We will iterate
&local_queue);
lnet_net_unlock(0);
+ now = ktime_get_seconds();
+
list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
/*
* if an NI is being deleted or it is now healthy, there
ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
}
+
lnet_ni_unlock(ni);
- lnet_net_unlock(0);
+ if (now < ni->ni_next_ping) {
+ lnet_net_unlock(0);
+ continue;
+ }
+
+ lnet_net_unlock(0);
CDEBUG(D_NET, "attempting to recover local ni: %s\n",
libcfs_nid2str(ni->ni_nid));
LNetMDUnlink(mdh);
continue;
}
- /*
- * Same note as in lnet_recover_peer_nis(). When
- * we're sending the ping, the NI is free to be
- * deleted or manipulated. By this point it
- * could've been added back on the recovery queue,
- * and a refcount taken on it.
- * So we can't just add it blindly again or we'll
- * corrupt the queue. We must check under lock if
- * it's not on any list and if not then add it
- * to the processed list, which will eventually be
- * spliced back on to the recovery queue.
- */
+ ni->ni_ping_count++;
+
ni->ni_ping_mdh = mdh;
- if (list_empty(&ni->ni_recovery)) {
- list_add_tail(&ni->ni_recovery, &processed_list);
- lnet_ni_addref_locked(ni, 0);
- }
- lnet_net_unlock(0);
+ lnet_ni_add_to_recoveryq_locked(ni, &processed_list,
+ now);
- lnet_ni_lock(ni);
- if (rc)
+ if (rc) {
+ lnet_ni_lock(ni);
ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
- }
- lnet_ni_unlock(ni);
+ lnet_ni_unlock(ni);
+ }
+ lnet_net_unlock(0);
+ } else
+ lnet_ni_unlock(ni);
}
/*
lnet_nid_t nid;
int healthv;
int rc;
+ time64_t now;
/*
* Always use cpt 0 for locking across all interactions with
&local_queue);
lnet_net_unlock(0);
+ now = ktime_get_seconds();
+
list_for_each_entry_safe(lpni, tmp, &local_queue,
lpni_recovery) {
/*
}
spin_unlock(&lpni->lpni_lock);
+
+ if (now < lpni->lpni_next_ping) {
+ lnet_net_unlock(0);
+ continue;
+ }
+
lnet_net_unlock(0);
/*
continue;
}
+ lpni->lpni_ping_count++;
+
lpni->lpni_recovery_ping_mdh = mdh;
- /*
- * While we're unlocked the lpni could've been
- * readded on the recovery queue. In this case we
- * don't need to add it to the local queue, since
- * it's already on there and the thread that added
- * it would've incremented the refcount on the
- * peer, which means we need to decref the refcount
- * that was implicitly grabbed by find_peer_ni_locked.
- * Otherwise, if the lpni is still not on
- * the recovery queue, then we'll add it to the
- * processed list.
- */
- if (list_empty(&lpni->lpni_recovery))
- list_add_tail(&lpni->lpni_recovery, &processed_list);
- else
- lnet_peer_ni_decref_locked(lpni);
- lnet_net_unlock(0);
- spin_lock(&lpni->lpni_lock);
- if (rc)
+ lnet_peer_ni_add_to_recoveryq_locked(lpni,
+ &processed_list,
+ now);
+ if (rc) {
+ spin_lock(&lpni->lpni_lock);
lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
- }
- spin_unlock(&lpni->lpni_lock);
+ spin_unlock(&lpni->lpni_lock);
+ }
+
+ /* Drop the ref taken by lnet_find_peer_ni_locked() */
+ lnet_peer_ni_decref_locked(lpni);
+ lnet_net_unlock(0);
+ } else
+ spin_unlock(&lpni->lpni_lock);
}
list_splice_init(&processed_list, &local_queue);
static int
lnet_monitor_thread(void *arg)
{
- time64_t recovery_timeout = 0;
time64_t rsp_timeout = 0;
- int interval;
time64_t now;
wait_for_completion(&the_lnet.ln_started);
rsp_timeout = now + (lnet_transaction_timeout / 2);
}
- if (now >= recovery_timeout) {
- lnet_recover_local_nis();
- lnet_recover_peer_nis();
- recovery_timeout = now + lnet_recovery_interval;
- }
+ lnet_recover_local_nis();
+ lnet_recover_peer_nis();
/*
* TODO do we need to check if we should sleep without
* if we wake up every 1 second? Although, we've seen
* cases where we get a complaint that an idle thread
* is waking up unnecessarily.
- *
- * Take into account the current net_count when you wake
- * up for alive router checking, since we need to check
- * possibly as many networks as we have configured.
*/
- interval = min(lnet_recovery_interval,
- min((unsigned int) alive_router_check_interval /
- lnet_current_net_count,
- lnet_transaction_timeout / 2));
wait_for_completion_interruptible_timeout(
&the_lnet.ln_mt_wait_complete,
- cfs_time_seconds(interval));
+ cfs_time_seconds(1));
/* Must re-init the completion before testing anything,
* including ln_mt_state.
*/
goto drop;
}
- if (lnet_drop_asym_route && for_me &&
- LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
- struct lnet_net *net;
- struct lnet_remotenet *rnet;
- bool found = true;
-
- /* we are dealing with a routed message,
- * so see if route to reach src_nid goes through from_nid
- */
- lnet_net_lock(cpt);
- net = lnet_get_net_locked(LNET_NIDNET(ni->ni_nid));
- if (!net) {
- lnet_net_unlock(cpt);
- CERROR("net %s not found\n",
- libcfs_net2str(LNET_NIDNET(ni->ni_nid)));
- return -EPROTO;
- }
-
- rnet = lnet_find_rnet_locked(LNET_NIDNET(src_nid));
- if (rnet) {
- struct lnet_peer *gw = NULL;
- struct lnet_peer_ni *lpni = NULL;
- struct lnet_route *route;
-
- list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
- found = false;
- gw = route->lr_gateway;
- if (route->lr_lnet != net->net_id)
- continue;
- /*
- * if the nid is one of the gateway's NIDs
- * then this is a valid gateway
- */
- while ((lpni = lnet_get_next_peer_ni_locked(gw,
- NULL, lpni)) != NULL) {
- if (lpni->lpni_nid == from_nid) {
- found = true;
- break;
- }
- }
- }
- }
- lnet_net_unlock(cpt);
- if (!found) {
- /* we would not use from_nid to route a message to
- * src_nid
- * => asymmetric routing detected but forbidden
- */
- CERROR("%s, src %s: Dropping asymmetrical route %s\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid), lnet_msgtyp2str(type));
- goto drop;
- }
- }
-
msg = lnet_msg_alloc();
if (msg == NULL) {
CERROR("%s, src %s: Dropping %s (out of memory)\n",
goto drop;
}
- if (the_lnet.ln_routing)
- lpni->lpni_last_alive = ktime_get_seconds();
+ /* If this message was forwarded to us from a router then we may need
+ * to update router aliveness or check for an asymmetrical route
+ * (or both)
+ */
+ if (((lnet_drop_asym_route && for_me) ||
+ !lpni->lpni_peer_net->lpn_peer->lp_alive) &&
+ LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
+ __u32 src_net_id = LNET_NIDNET(src_nid);
+ struct lnet_peer *gw = lpni->lpni_peer_net->lpn_peer;
+ struct lnet_route *route;
+ bool found = false;
+
+ list_for_each_entry(route, &gw->lp_routes, lr_gwlist) {
+ if (route->lr_net == src_net_id) {
+ found = true;
+ /* If we're transitioning the gateway from
+ * dead -> alive, and discovery is disabled
+ * locally or on the gateway, then we need to
+ * update the cached route aliveness for each
+ * route to the src_nid's net.
+ *
+ * Otherwise, we're only checking for
+ * symmetrical route, and we can break the
+ * loop
+ */
+ if (!gw->lp_alive &&
+ lnet_is_discovery_disabled(gw))
+ lnet_set_route_aliveness(route, true);
+ else
+ break;
+ }
+ }
+ if (lnet_drop_asym_route && for_me && !found) {
+ lnet_net_unlock(cpt);
+ /* we would not use from_nid to route a message to
+ * src_nid
+ * => asymmetric routing detected but forbidden
+ */
+ CERROR("%s, src %s: Dropping asymmetrical route %s\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid), lnet_msgtyp2str(type));
+ lnet_msg_free(msg);
+ goto drop;
+ }
+ if (!gw->lp_alive) {
+ struct lnet_peer_net *lpn;
+ struct lnet_peer_ni *lpni2;
+
+ gw->lp_alive = true;
+ /* Mark all remote NIs on src_nid's net UP */
+ lpn = lnet_peer_get_net_locked(gw, src_net_id);
+ if (lpn)
+ list_for_each_entry(lpni2, &lpn->lpn_peer_nis,
+ lpni_peer_nis)
+ lpni2->lpni_ns_status = LNET_NI_STATUS_UP;
+ }
+ }
+
+ lpni->lpni_last_alive = ktime_get_seconds();
msg->msg_rxpeer = lpni;
msg->msg_rxni = ni;
int cpt;
__u32 order = 2;
struct list_head *rn_list;
+ bool matched_dstnet = false;
/* if !local_nid_dist_zero, I don't return a distance of 0 ever
* (when lustre sees a distance of 0, it substitutes 0@lo), so I
return local_nid_dist_zero ? 0 : 1;
}
- if (LNET_NIDNET(ni->ni_nid) == dstnet) {
- /* Check if ni was originally created in
- * current net namespace.
- * If not, assign order above 0xffff0000,
- * to make this ni not a priority. */
- if (current->nsproxy &&
- !net_eq(ni->ni_net_ns, current->nsproxy->net_ns))
- order += 0xffff0000;
- if (srcnidp != NULL)
+ if (!matched_dstnet && LNET_NIDNET(ni->ni_nid) == dstnet) {
+ matched_dstnet = true;
+ /* We matched the destination net, but we may have
+ * additional local NIs to inspect.
+ *
+ * We record the nid and order as appropriate, but
+ * they may be overwritten if we match local NI above.
+ */
+ if (srcnidp)
*srcnidp = ni->ni_nid;
- if (orderp != NULL)
- *orderp = order;
- lnet_net_unlock(cpt);
- return 1;
+
+ if (orderp) {
+ /* Check if ni was originally created in
+ * current net namespace.
+ * If not, assign order above 0xffff0000,
+ * to make this ni not a priority.
+ */
+ if (current->nsproxy &&
+ !net_eq(ni->ni_net_ns,
+ current->nsproxy->net_ns))
+ *orderp = order + 0xffff0000;
+ else
+ *orderp = order;
+ }
}
order++;
}
+ if (matched_dstnet) {
+ lnet_net_unlock(cpt);
+ return 1;
+ }
+
rn_list = lnet_net2rnethash(dstnet);
list_for_each(e, rn_list) {
rnet = list_entry(e, struct lnet_remotenet, lrn_list);