MODULE_PARM_DESC(router_sensitivity_percentage,
"How healthy a gateway should be to be used in percent");
+static void lnet_add_route_to_rnet(struct lnet_remotenet *rnet,
+ struct lnet_route *route);
+static void lnet_del_route_from_rnet(lnet_nid_t gw_nid, struct list_head *route_list,
+ struct list_head *zombies);
+
static int
rtr_sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
{
}
void
+lnet_move_route(struct lnet_route *route, struct lnet_peer *lp,
+ struct list_head *rt_list)
+{
+ struct lnet_remotenet *rnet;
+ struct list_head zombies;
+ struct list_head *l;
+
+ INIT_LIST_HEAD(&zombies);
+
+ if (rt_list)
+ l = rt_list;
+ else
+ l = &zombies;
+
+ rnet = lnet_find_rnet_locked(route->lr_net);
+ LASSERT(rnet);
+
+ CDEBUG(D_NET, "deleting route %s->%s\n",
+ libcfs_net2str(route->lr_net),
+ libcfs_nid2str(route->lr_nid));
+
+ /*
+ * use the gateway's lp_primary_nid to delete the route as the
+ * lr_nid can be a constituent NID of the peer
+ */
+ lnet_del_route_from_rnet(route->lr_gateway->lp_primary_nid,
+ &rnet->lrn_routes, l);
+
+ if (lp) {
+ route = list_first_entry(l, struct lnet_route,
+ lr_list);
+ route->lr_gateway = lp;
+ lnet_add_route_to_rnet(rnet, route);
+ } else {
+ while (!list_empty(l) && !rt_list) {
+ route = list_first_entry(l, struct lnet_route,
+ lr_list);
+ list_del(&route->lr_list);
+ LIBCFS_FREE(route, sizeof(*route));
+ }
+ }
+}
+
+void
lnet_rtr_transfer_to_peer(struct lnet_peer *src, struct lnet_peer *target)
{
struct lnet_route *route;
+ struct lnet_route *tmp, *tmp2;
lnet_net_lock(LNET_LOCK_EX);
- target->lp_rtr_refcount += src->lp_rtr_refcount;
- /* move the list of queued messages to the new peer */
+ CDEBUG(D_NET, "transfering routes from %s -> %s\n",
+ libcfs_nid2str(src->lp_primary_nid),
+ libcfs_nid2str(target->lp_primary_nid));
+ list_for_each_entry(route, &src->lp_routes, lr_gwlist) {
+ CDEBUG(D_NET, "%s: %s->%s\n", libcfs_nid2str(src->lp_primary_nid),
+ libcfs_net2str(route->lr_net),
+ libcfs_nid2str(route->lr_nid));
+ }
list_splice_init(&src->lp_rtrq, &target->lp_rtrq);
- /* move all the routes that reference the peer */
- list_splice_init(&src->lp_routes, &target->lp_routes);
- /* update all the routes to point to the new peer */
- list_for_each_entry(route, &target->lp_routes, lr_gwlist)
- route->lr_gateway = target;
- /* remove the old peer from the ln_routers list */
- list_del_init(&src->lp_rtr_list);
- /* add the new peer to the ln_routers list */
+ list_for_each_entry_safe(route, tmp, &src->lp_routes, lr_gwlist) {
+ struct lnet_route *r2;
+ bool present = false;
+ list_for_each_entry_safe(r2, tmp2, &target->lp_routes, lr_gwlist) {
+ if (route->lr_net == r2->lr_net) {
+ if (route->lr_priority >= r2->lr_priority)
+ present = true;
+ else if (route->lr_hops >= r2->lr_hops)
+ present = true;
+ else
+ lnet_move_route(r2, NULL, NULL);
+ }
+ }
+ if (present)
+ lnet_move_route(route, NULL, NULL);
+ else
+ lnet_move_route(route, target, NULL);
+ }
+
if (list_empty(&target->lp_rtr_list)) {
lnet_peer_addref_locked(target);
list_add_tail(&target->lp_rtr_list, &the_lnet.ln_routers);
}
- /* reset the ref count on the old peer and decrement its ref count */
- src->lp_rtr_refcount = 0;
- lnet_peer_decref_locked(src);
- /* update the router version */
+
the_lnet.ln_routers_version++;
lnet_net_unlock(LNET_LOCK_EX);
}
* enabled.
*/
if (lnet_is_discovery_disabled(gw))
- return route->lr_alive;
+ return atomic_read(&route->lr_alive) == 1;
/*
* check the gateway's interfaces on the local network
* that the remote net must exist on the gateway. For multi-hop
* routes the next-hop will not have the remote net.
*/
- if (avoid_asym_router_failure && route->lr_single_hop) {
+ if (avoid_asym_router_failure &&
+ (route->lr_hops == 1 || route->lr_hops == LNET_UNDEFINED_HOPS)) {
rlpn = lnet_peer_get_net_locked(gw, route->lr_net);
if (!rlpn)
return false;
static inline void
lnet_check_route_inconsistency(struct lnet_route *route)
{
- if (!route->lr_single_hop && (int)route->lr_hops <= 1) {
+ if (!route->lr_single_hop &&
+ (route->lr_hops == 1 || route->lr_hops == LNET_UNDEFINED_HOPS)) {
CWARN("route %s->%s is detected to be multi-hop but hop count is set to %d\n",
libcfs_net2str(route->lr_net),
libcfs_nid2str(route->lr_gateway->lp_primary_nid),
lnet_check_route_inconsistency(route);
}
-static inline void
-lnet_set_route_aliveness(struct lnet_route *route, bool alive)
-{
- /* Log when there's a state change */
- if (route->lr_alive != alive) {
- CERROR("route to %s through %s has gone from %s to %s\n",
- libcfs_net2str(route->lr_net),
- libcfs_nid2str(route->lr_gateway->lp_primary_nid),
- (route->lr_alive) ? "up" : "down",
- alive ? "up" : "down");
- route->lr_alive = alive;
- }
-}
-
+/* Must hold net_lock/EX */
void
lnet_router_discovery_ping_reply(struct lnet_peer *lp)
{
}
route->lr_single_hop = single_hop;
- if (avoid_asym_router_failure && single_hop)
+ if (avoid_asym_router_failure &&
+ (route->lr_hops == 1 ||
+ route->lr_hops == LNET_UNDEFINED_HOPS))
lnet_set_route_aliveness(route, net_up);
else
lnet_set_route_aliveness(route, true);
lp->lp_alive = lp->lp_dc_error == 0;
spin_unlock(&lp->lp_lock);
- /* ping replies are being handled when discovery is disabled */
- if (lnet_is_discovery_disabled_locked(lp))
- return;
-
if (!lp->lp_dc_error) {
+ /* ping replies are being handled when discovery is disabled */
+ if (lnet_is_discovery_disabled_locked(lp))
+ return;
+
/*
* mark single-hop routes. If the remote net is not configured on
* the gateway we assume this is intentional and we mark the
* gateway as multi-hop
*/
- list_for_each_entry(route, &lp->lp_routes, lr_gwlist)
+ list_for_each_entry(route, &lp->lp_routes, lr_gwlist) {
+ lnet_set_route_aliveness(route, true);
lnet_set_route_hop_type(lp, route);
+ }
return;
}
libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
if (gateway == LNET_NID_ANY ||
- LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
- net == LNET_NIDNET(LNET_NID_ANY) ||
+ gateway == LNET_NID_LO_0 ||
+ net == LNET_NET_ANY ||
LNET_NETTYP(net) == LOLND ||
LNET_NIDNET(gateway) == net ||
(hops != LNET_UNDEFINED_HOPS && (hops < 1 || hops > 255)))
route->lr_nid = gateway;
route->lr_priority = priority;
route->lr_hops = hops;
+ if (lnet_peers_start_down())
+ atomic_set(&route->lr_alive, 0);
+ else
+ atomic_set(&route->lr_alive, 1);
lnet_net_lock(LNET_LOCK_EX);
return rc;
}
-static void
+void
lnet_del_route_from_rnet(lnet_nid_t gw_nid, struct list_head *route_list,
struct list_head *zombies)
{
lnet_peer_ni_decref_locked(lpni);
}
- if (net != LNET_NIDNET(LNET_NID_ANY)) {
+ if (net != LNET_NET_ANY) {
rnet = lnet_find_rnet_locked(net);
if (!rnet) {
lnet_net_unlock(LNET_LOCK_EX);
void
lnet_destroy_routes (void)
{
- lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
+ lnet_del_route(LNET_NET_ANY, LNET_NID_ANY);
}
int lnet_get_rtr_pool_cfg(int cpt, struct lnet_ioctl_pool_cfg *pool_cfg)
struct lnet_ni *ni;
bool update = false;
- list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
- lnet_ni_lock(ni);
- if (ni->ni_status &&
- ni->ni_status->ns_status != status) {
- ni->ni_status->ns_status = status;
- update = true;
- }
- lnet_ni_unlock(ni);
- }
+ list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
+ if (lnet_ni_set_status(ni, status))
+ update = true;
return update;
}
lnet_update_ni_status_locked(void)
{
struct lnet_net *net;
+ struct lnet_ni *ni;
bool push = false;
time64_t now;
time64_t timeout;
continue;
if (now < net->net_last_alive + timeout)
- continue;
+ goto check_ni_fatal;
spin_lock(&net->net_lock);
/* re-check with lock */
if (now < net->net_last_alive + timeout) {
spin_unlock(&net->net_lock);
- continue;
+ goto check_ni_fatal;
}
spin_unlock(&net->net_lock);
* timeout on any of its constituent NIs, then mark all
* the NIs down.
*/
- push = lnet_net_set_status_locked(net, LNET_NI_STATUS_DOWN);
+ if (lnet_net_set_status_locked(net, LNET_NI_STATUS_DOWN)) {
+ push = true;
+ continue;
+ }
+
+check_ni_fatal:
+ list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
+ /* lnet_ni_set_status() will perform the same check of
+ * ni_status while holding the ni lock. We can safely
+ * check ni_status without that lock because it is only
+ * written to under net_lock/EX and our caller is
+ * holding a net lock.
+ */
+ if (atomic_read(&ni->ni_fatal_error_on) &&
+ ni->ni_status &&
+ ni->ni_status->ns_status != LNET_NI_STATUS_DOWN &&
+ lnet_ni_set_status(ni, LNET_NI_STATUS_DOWN))
+ push = true;
+ }
}
return push;
* This function is called from the monitor thread to check if there are
* any active routers that need to be checked.
*/
-inline bool
-lnet_router_checker_active(void)
+bool lnet_router_checker_active(void)
{
/* Router Checker thread needs to run when routing is enabled in
* order to call lnet_update_ni_status_locked() */
spin_unlock(&rtr->lp_lock);
continue;
}
- /* make sure we actively discover the router */
+ /* make sure we fully discover the router */
rtr->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
- rtr->lp_state |= LNET_PEER_RTR_DISCOVERY;
+ rtr->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH |
+ LNET_PEER_RTR_DISCOVERY;
spin_unlock(&rtr->lp_lock);
/* find the peer_ni associated with the primary NID */
int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
while (--npages >= 0)
- __free_page(rb->rb_kiov[npages].kiov_page);
+ __free_page(rb->rb_kiov[npages].bv_page);
LIBCFS_FREE(rb, sz);
}
GFP_KERNEL | __GFP_ZERO);
if (page == NULL) {
while (--i >= 0)
- __free_page(rb->rb_kiov[i].kiov_page);
+ __free_page(rb->rb_kiov[i].bv_page);
LIBCFS_FREE(rb, sz);
return NULL;
}
- rb->rb_kiov[i].kiov_len = PAGE_SIZE;
- rb->rb_kiov[i].kiov_offset = 0;
- rb->rb_kiov[i].kiov_page = page;
+ rb->rb_kiov[i].bv_len = PAGE_SIZE;
+ rb->rb_kiov[i].bv_offset = 0;
+ rb->rb_kiov[i].bv_page = page;
}
return rb;
time64_t now = ktime_get_seconds();
int cpt;
- LASSERT (!in_interrupt ());
+ LASSERT(!in_interrupt());
- CDEBUG (D_NET, "%s notifying %s: %s\n",
- (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
- libcfs_nid2str(nid),
- alive ? "up" : "down");
+ CDEBUG(D_NET, "%s notifying %s: %s\n",
+ (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
+ libcfs_nid2str(nid), alive ? "up" : "down");
if (ni != NULL &&
LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
}
if (alive) {
- if (reset)
+ if (reset) {
+ lpni->lpni_ns_status = LNET_NI_STATUS_UP;
lnet_set_lpni_healthv_locked(lpni,
LNET_MAX_HEALTH_VALUE);
- else
- lnet_inc_lpni_healthv_locked(lpni);
- } else {
- lnet_handle_remote_failure_locked(lpni);
+ } else {
+ __u32 sensitivity = lpni->lpni_peer_net->
+ lpn_peer->lp_health_sensitivity;
+
+ lnet_inc_lpni_healthv_locked(lpni,
+ (sensitivity) ? sensitivity :
+ lnet_health_sensitivity);
+ }
+ } else if (reset) {
+ lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
}
/* recalculate aliveness */
alive = lnet_is_peer_ni_alive(lpni);
+
+ lp = lpni->lpni_peer_net->lpn_peer;
+ /* If this is an LNet router then update route aliveness */
+ if (lp->lp_rtr_refcount) {
+ if (reset)
+ /* reset flag indicates gateway peer went up or down */
+ lp->lp_alive = alive;
+
+ /* If discovery is disabled, locally or on the gateway, then
+ * any routes using lpni as next-hop need to be updated
+ *
+ * NB: We can get many notifications while a route is down, so
+ * we try and avoid the expensive net_lock/EX here for the
+ * common case of receiving duplicate lnet_notify() calls (i.e.
+ * only grab EX lock when we actually need to update the route
+ * aliveness).
+ */
+ if (lnet_is_discovery_disabled(lp)) {
+ list_for_each_entry(route, &lp->lp_routes, lr_gwlist) {
+ if (route->lr_nid == lpni->lpni_nid)
+ lnet_set_route_aliveness(route, alive);
+ }
+ }
+ }
+
lnet_net_unlock(0);
if (ni != NULL && !alive)
cpt = lpni->lpni_cpt;
lnet_net_lock(cpt);
lnet_peer_ni_decref_locked(lpni);
- if (lpni && lpni->lpni_peer_net && lpni->lpni_peer_net->lpn_peer) {
- lp = lpni->lpni_peer_net->lpn_peer;
- lp->lp_alive = alive;
- list_for_each_entry(route, &lp->lp_routes, lr_gwlist)
- lnet_set_route_aliveness(route, alive);
- }
lnet_net_unlock(cpt);
return 0;