2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2016, Intel Corporation.
6 * This file is part of Lustre, https://wiki.hpdd.intel.com/
8 * Portals is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Portals is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Portals; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_LNET
24 #include <lnet/lib-lnet.h>
26 #define LNET_NRB_TINY_MIN 512 /* min value for each CPT */
27 #define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4)
28 #define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */
29 #define LNET_NRB_SMALL (LNET_NRB_SMALL_MIN * 4)
30 #define LNET_NRB_SMALL_PAGES 1
31 #define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
32 #define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
33 #define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
36 static char *forwarding = "";
37 module_param(forwarding, charp, 0444);
38 MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
40 static int tiny_router_buffers;
41 module_param(tiny_router_buffers, int, 0444);
42 MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
43 static int small_router_buffers;
44 module_param(small_router_buffers, int, 0444);
45 MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
46 static int large_router_buffers;
47 module_param(large_router_buffers, int, 0444);
48 MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
49 static int peer_buffer_credits;
50 module_param(peer_buffer_credits, int, 0444);
51 MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
53 static int auto_down = 1;
54 module_param(auto_down, int, 0444);
55 MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
58 lnet_peer_buffer_credits(struct lnet_net *net)
60 /* NI option overrides LNet default */
61 if (net->net_tunables.lct_peer_rtr_credits > 0)
62 return net->net_tunables.lct_peer_rtr_credits;
63 if (peer_buffer_credits > 0)
64 return peer_buffer_credits;
66 /* As an approximation, allow this peer the same number of router
67 * buffers as it is allowed outstanding sends */
68 return net->net_tunables.lct_peer_tx_credits;
72 static int lnet_router_checker(void *);
74 static int check_routers_before_use;
75 module_param(check_routers_before_use, int, 0444);
76 MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
78 int avoid_asym_router_failure = 1;
79 module_param(avoid_asym_router_failure, int, 0644);
80 MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
82 static int dead_router_check_interval = 60;
83 module_param(dead_router_check_interval, int, 0644);
84 MODULE_PARM_DESC(dead_router_check_interval, "Seconds between dead router health checks (<= 0 to disable)");
86 static int live_router_check_interval = 60;
87 module_param(live_router_check_interval, int, 0644);
88 MODULE_PARM_DESC(live_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
90 static int router_ping_timeout = 50;
91 module_param(router_ping_timeout, int, 0644);
92 MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
95 lnet_peers_start_down(void)
97 return check_routers_before_use;
101 lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, cfs_time_t when)
103 if (cfs_time_before(when, lp->lp_timestamp)) { /* out of date information */
104 CDEBUG(D_NET, "Out of date\n");
108 lp->lp_timestamp = when; /* update timestamp */
109 lp->lp_ping_deadline = 0; /* disable ping timeout */
111 if (lp->lp_alive_count != 0 && /* got old news */
112 (!lp->lp_alive) == (!alive)) { /* new date for old news */
113 CDEBUG(D_NET, "Old news\n");
117 /* Flag that notification is outstanding */
119 lp->lp_alive_count++;
120 lp->lp_alive = !(!alive); /* 1 bit! */
122 lp->lp_notifylnd |= notifylnd;
124 lp->lp_ping_feats = LNET_PING_FEAT_INVAL; /* reset */
126 CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lp_nid), alive);
130 lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp)
135 /* Notify only in 1 thread at any time to ensure ordered notification.
136 * NB individual events can be missed; the only guarantee is that you
137 * always get the most recent news */
139 if (lp->lp_notifying || ni == NULL)
142 lp->lp_notifying = 1;
144 while (lp->lp_notify) {
145 alive = lp->lp_alive;
146 notifylnd = lp->lp_notifylnd;
148 lp->lp_notifylnd = 0;
151 if (notifylnd && ni->ni_net->net_lnd->lnd_notify != NULL) {
152 lnet_net_unlock(lp->lp_cpt);
154 /* A new notification could happen now; I'll handle it
155 * when control returns to me */
157 (ni->ni_net->net_lnd->lnd_notify)(ni, lp->lp_nid,
160 lnet_net_lock(lp->lp_cpt);
164 lp->lp_notifying = 0;
169 lnet_rtr_addref_locked(lnet_peer_t *lp)
171 LASSERT(lp->lp_refcount > 0);
172 LASSERT(lp->lp_rtr_refcount >= 0);
174 /* lnet_net_lock must be exclusively locked */
175 lp->lp_rtr_refcount++;
176 if (lp->lp_rtr_refcount == 1) {
177 struct list_head *pos;
179 /* a simple insertion sort */
180 list_for_each_prev(pos, &the_lnet.ln_routers) {
181 lnet_peer_t *rtr = list_entry(pos, lnet_peer_t,
184 if (rtr->lp_nid < lp->lp_nid)
188 list_add(&lp->lp_rtr_list, pos);
189 /* addref for the_lnet.ln_routers */
190 lnet_peer_addref_locked(lp);
191 the_lnet.ln_routers_version++;
196 lnet_rtr_decref_locked(lnet_peer_t *lp)
198 LASSERT(lp->lp_refcount > 0);
199 LASSERT(lp->lp_rtr_refcount > 0);
201 /* lnet_net_lock must be exclusively locked */
202 lp->lp_rtr_refcount--;
203 if (lp->lp_rtr_refcount == 0) {
204 LASSERT(list_empty(&lp->lp_routes));
206 if (lp->lp_rcd != NULL) {
207 list_add(&lp->lp_rcd->rcd_list,
208 &the_lnet.ln_rcd_deathrow);
212 list_del(&lp->lp_rtr_list);
213 /* decref for the_lnet.ln_routers */
214 lnet_peer_decref_locked(lp);
215 the_lnet.ln_routers_version++;
220 lnet_find_rnet_locked(__u32 net)
222 lnet_remotenet_t *rnet;
223 struct list_head *tmp;
224 struct list_head *rn_list;
226 LASSERT(!the_lnet.ln_shutdown);
228 rn_list = lnet_net2rnethash(net);
229 list_for_each(tmp, rn_list) {
230 rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
232 if (rnet->lrn_net == net)
238 static void lnet_shuffle_seed(void)
243 struct timespec64 ts;
244 lnet_ni_t *ni = NULL;
249 cfs_get_random_bytes(seed, sizeof(seed));
251 /* Nodes with small feet have little entropy
252 * the NID for this node gives the most entropy in the low bits */
253 while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
254 lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
256 if (lnd_type != LOLND)
257 seed[0] ^= (LNET_NIDADDR(ni->ni_nid) | lnd_type);
261 cfs_srand(ts.tv_sec ^ seed[0], ts.tv_nsec ^ seed[1]);
266 /* NB expects LNET_LOCK held */
268 lnet_add_route_to_rnet(lnet_remotenet_t *rnet, lnet_route_t *route)
270 unsigned int len = 0;
271 unsigned int offset = 0;
276 list_for_each(e, &rnet->lrn_routes) {
280 /* len+1 positions to add a new entry, also prevents division by 0 */
281 offset = cfs_rand() % (len + 1);
282 list_for_each(e, &rnet->lrn_routes) {
287 list_add(&route->lr_list, e);
288 list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes);
290 the_lnet.ln_remote_nets_version++;
291 lnet_rtr_addref_locked(route->lr_gateway);
295 lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
296 unsigned int priority)
299 lnet_remotenet_t *rnet;
300 lnet_remotenet_t *rnet2;
306 CDEBUG(D_NET, "Add route: net %s hops %d priority %u gw %s\n",
307 libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
309 if (gateway == LNET_NID_ANY ||
310 LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
311 net == LNET_NIDNET(LNET_NID_ANY) ||
312 LNET_NETTYP(net) == LOLND ||
313 LNET_NIDNET(gateway) == net ||
314 (hops != LNET_UNDEFINED_HOPS && (hops < 1 || hops > 255)))
317 if (lnet_islocalnet(net)) /* it's a local network */
320 /* Assume net, route, all new */
321 LIBCFS_ALLOC(route, sizeof(*route));
322 LIBCFS_ALLOC(rnet, sizeof(*rnet));
323 if (route == NULL || rnet == NULL) {
324 CERROR("Out of memory creating route %s %d %s\n",
325 libcfs_net2str(net), hops, libcfs_nid2str(gateway));
327 LIBCFS_FREE(route, sizeof(*route));
329 LIBCFS_FREE(rnet, sizeof(*rnet));
333 INIT_LIST_HEAD(&rnet->lrn_routes);
335 route->lr_hops = hops;
337 route->lr_priority = priority;
339 lnet_net_lock(LNET_LOCK_EX);
341 rc = lnet_nid2peer_locked(&route->lr_gateway, gateway, LNET_LOCK_EX);
343 lnet_net_unlock(LNET_LOCK_EX);
345 LIBCFS_FREE(route, sizeof(*route));
346 LIBCFS_FREE(rnet, sizeof(*rnet));
348 if (rc == -EHOSTUNREACH) /* gateway is not on a local net. */
349 return rc; /* ignore the route entry */
350 CERROR("Error %d creating route %s %d %s\n", rc,
351 libcfs_net2str(net), hops,
352 libcfs_nid2str(gateway));
356 LASSERT(!the_lnet.ln_shutdown);
358 rnet2 = lnet_find_rnet_locked(net);
361 list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
365 /* Search for a duplicate route (it's a NOOP if it is) */
367 list_for_each(e, &rnet2->lrn_routes) {
368 lnet_route_t *route2 = list_entry(e, lnet_route_t, lr_list);
370 if (route2->lr_gateway == route->lr_gateway) {
375 /* our lookups must be true */
376 LASSERT(route2->lr_gateway->lp_nid != gateway);
380 lnet_peer_addref_locked(route->lr_gateway); /* +1 for notify */
381 lnet_add_route_to_rnet(rnet2, route);
383 ni = lnet_get_next_ni_locked(route->lr_gateway->lp_net, NULL);
384 lnet_net_unlock(LNET_LOCK_EX);
386 /* XXX Assume alive */
387 if (ni->ni_net->net_lnd->lnd_notify != NULL)
388 (ni->ni_net->net_lnd->lnd_notify)(ni, gateway, 1);
390 lnet_net_lock(LNET_LOCK_EX);
393 /* -1 for notify or !add_route */
394 lnet_peer_decref_locked(route->lr_gateway);
395 lnet_net_unlock(LNET_LOCK_EX);
401 LIBCFS_FREE(route, sizeof(*route));
405 LIBCFS_FREE(rnet, sizeof(*rnet));
407 /* indicate to startup the router checker if configured */
408 wake_up(&the_lnet.ln_rc_waitq);
414 lnet_check_routes(void)
416 lnet_remotenet_t *rnet;
418 lnet_route_t *route2;
419 struct list_head *e1;
420 struct list_head *e2;
422 struct list_head *rn_list;
425 cpt = lnet_net_lock_current();
427 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
428 rn_list = &the_lnet.ln_remote_nets_hash[i];
429 list_for_each(e1, rn_list) {
430 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
433 list_for_each(e2, &rnet->lrn_routes) {
438 route = list_entry(e2, lnet_route_t,
441 if (route2 == NULL) {
446 if (route->lr_gateway->lp_net ==
447 route2->lr_gateway->lp_net)
450 nid1 = route->lr_gateway->lp_nid;
451 nid2 = route2->lr_gateway->lp_nid;
454 lnet_net_unlock(cpt);
456 CERROR("Routes to %s via %s and %s not "
459 libcfs_nid2str(nid1),
460 libcfs_nid2str(nid2));
466 lnet_net_unlock(cpt);
471 lnet_del_route(__u32 net, lnet_nid_t gw_nid)
473 struct lnet_peer *gateway;
474 lnet_remotenet_t *rnet;
476 struct list_head *e1;
477 struct list_head *e2;
479 struct list_head *rn_list;
482 CDEBUG(D_NET, "Del route: net %s : gw %s\n",
483 libcfs_net2str(net), libcfs_nid2str(gw_nid));
485 /* NB Caller may specify either all routes via the given gateway
486 * or a specific route entry actual NIDs) */
488 lnet_net_lock(LNET_LOCK_EX);
489 if (net == LNET_NIDNET(LNET_NID_ANY))
490 rn_list = &the_lnet.ln_remote_nets_hash[0];
492 rn_list = lnet_net2rnethash(net);
495 list_for_each(e1, rn_list) {
496 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
498 if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
499 net == rnet->lrn_net))
502 list_for_each(e2, &rnet->lrn_routes) {
503 route = list_entry(e2, lnet_route_t, lr_list);
505 gateway = route->lr_gateway;
506 if (!(gw_nid == LNET_NID_ANY ||
507 gw_nid == gateway->lp_nid))
510 list_del(&route->lr_list);
511 list_del(&route->lr_gwlist);
512 the_lnet.ln_remote_nets_version++;
514 if (list_empty(&rnet->lrn_routes))
515 list_del(&rnet->lrn_list);
519 lnet_rtr_decref_locked(gateway);
520 lnet_peer_decref_locked(gateway);
522 lnet_net_unlock(LNET_LOCK_EX);
524 LIBCFS_FREE(route, sizeof(*route));
527 LIBCFS_FREE(rnet, sizeof(*rnet));
530 lnet_net_lock(LNET_LOCK_EX);
535 if (net == LNET_NIDNET(LNET_NID_ANY) &&
536 ++idx < LNET_REMOTE_NETS_HASH_SIZE) {
537 rn_list = &the_lnet.ln_remote_nets_hash[idx];
540 lnet_net_unlock(LNET_LOCK_EX);
546 lnet_destroy_routes (void)
548 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
551 int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg)
553 int i, rc = -ENOENT, j;
555 if (the_lnet.ln_rtrpools == NULL)
558 for (i = 0; i < LNET_NRBPOOLS; i++) {
559 lnet_rtrbufpool_t *rbp;
561 lnet_net_lock(LNET_LOCK_EX);
562 cfs_percpt_for_each(rbp, j, the_lnet.ln_rtrpools) {
566 pool_cfg->pl_pools[i].pl_npages = rbp[i].rbp_npages;
567 pool_cfg->pl_pools[i].pl_nbuffers = rbp[i].rbp_nbuffers;
568 pool_cfg->pl_pools[i].pl_credits = rbp[i].rbp_credits;
569 pool_cfg->pl_pools[i].pl_mincredits = rbp[i].rbp_mincredits;
573 lnet_net_unlock(LNET_LOCK_EX);
576 lnet_net_lock(LNET_LOCK_EX);
577 pool_cfg->pl_routing = the_lnet.ln_routing;
578 lnet_net_unlock(LNET_LOCK_EX);
584 lnet_get_route(int idx, __u32 *net, __u32 *hops,
585 lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
587 struct list_head *e1;
588 struct list_head *e2;
589 lnet_remotenet_t *rnet;
593 struct list_head *rn_list;
595 cpt = lnet_net_lock_current();
597 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
598 rn_list = &the_lnet.ln_remote_nets_hash[i];
599 list_for_each(e1, rn_list) {
600 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
602 list_for_each(e2, &rnet->lrn_routes) {
603 route = list_entry(e2, lnet_route_t,
607 *net = rnet->lrn_net;
608 *hops = route->lr_hops;
609 *priority = route->lr_priority;
610 *gateway = route->lr_gateway->lp_nid;
611 *alive = lnet_is_route_alive(route);
612 lnet_net_unlock(cpt);
619 lnet_net_unlock(cpt);
624 lnet_swap_pinginfo(struct lnet_ping_info *info)
627 struct lnet_ni_status *stat;
629 __swab32s(&info->pi_magic);
630 __swab32s(&info->pi_features);
631 __swab32s(&info->pi_pid);
632 __swab32s(&info->pi_nnis);
633 for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
634 stat = &info->pi_ni[i];
635 __swab64s(&stat->ns_nid);
636 __swab32s(&stat->ns_status);
642 * parse router-checker pinginfo, record number of down NIs for remote
643 * networks on that router.
646 lnet_parse_rc_info(lnet_rc_data_t *rcd)
648 struct lnet_ping_info *info = rcd->rcd_pinginfo;
649 struct lnet_peer *gw = rcd->rcd_gateway;
655 if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
656 lnet_swap_pinginfo(info);
658 /* NB always racing with network! */
659 if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
660 CDEBUG(D_NET, "%s: Unexpected magic %08x\n",
661 libcfs_nid2str(gw->lp_nid), info->pi_magic);
662 gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
666 gw->lp_ping_feats = info->pi_features;
667 if ((gw->lp_ping_feats & LNET_PING_FEAT_MASK) == 0) {
668 CDEBUG(D_NET, "%s: Unexpected features 0x%x\n",
669 libcfs_nid2str(gw->lp_nid), gw->lp_ping_feats);
670 return; /* nothing I can understand */
673 if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) == 0)
674 return; /* can't carry NI status info */
676 list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) {
681 if ((gw->lp_ping_feats & LNET_PING_FEAT_RTE_DISABLED) != 0) {
686 for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
687 struct lnet_ni_status *stat = &info->pi_ni[i];
688 lnet_nid_t nid = stat->ns_nid;
690 if (nid == LNET_NID_ANY) {
691 CDEBUG(D_NET, "%s: unexpected LNET_NID_ANY\n",
692 libcfs_nid2str(gw->lp_nid));
693 gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
697 if (LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
700 if (stat->ns_status == LNET_NI_STATUS_DOWN) {
705 if (stat->ns_status == LNET_NI_STATUS_UP) {
706 if (LNET_NIDNET(nid) == rte->lr_net) {
713 CDEBUG(D_NET, "%s: Unexpected status 0x%x\n",
714 libcfs_nid2str(gw->lp_nid), stat->ns_status);
715 gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
719 if (up) { /* ignore downed NIs if NI for dest network is up */
723 /* if @down is zero and this route is single-hop, it means
724 * we can't find NI for target network */
725 if (down == 0 && rte->lr_hops == 1)
728 rte->lr_downis = down;
733 lnet_router_checker_event(lnet_event_t *event)
735 lnet_rc_data_t *rcd = event->md.user_ptr;
736 struct lnet_peer *lp;
738 LASSERT(rcd != NULL);
740 if (event->unlinked) {
741 LNetInvalidateHandle(&rcd->rcd_mdh);
745 LASSERT(event->type == LNET_EVENT_SEND ||
746 event->type == LNET_EVENT_REPLY);
748 lp = rcd->rcd_gateway;
751 /* NB: it's called with holding lnet_res_lock, we have a few
752 * places need to hold both locks at the same time, please take
753 * care of lock ordering */
754 lnet_net_lock(lp->lp_cpt);
755 if (!lnet_isrouter(lp) || lp->lp_rcd != rcd) {
756 /* ignore if no longer a router or rcd is replaced */
760 if (event->type == LNET_EVENT_SEND) {
761 lp->lp_ping_notsent = 0;
762 if (event->status == 0)
766 /* LNET_EVENT_REPLY */
767 /* A successful REPLY means the router is up. If _any_ comms
768 * to the router fail I assume it's down (this will happen if
769 * we ping alive routers to try to detect router death before
770 * apps get burned). */
772 lnet_notify_locked(lp, 1, (event->status == 0), cfs_time_current());
773 /* The router checker will wake up very shortly and do the
774 * actual notification.
775 * XXX If 'lp' stops being a router before then, it will still
776 * have the notification pending!!! */
778 if (avoid_asym_router_failure && event->status == 0)
779 lnet_parse_rc_info(rcd);
782 lnet_net_unlock(lp->lp_cpt);
786 lnet_wait_known_routerstate(void)
789 struct list_head *entry;
792 LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
795 int cpt = lnet_net_lock_current();
798 list_for_each(entry, &the_lnet.ln_routers) {
799 rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
801 if (rtr->lp_alive_count == 0) {
807 lnet_net_unlock(cpt);
812 set_current_state(TASK_UNINTERRUPTIBLE);
813 schedule_timeout(cfs_time_seconds(1));
818 lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net)
822 if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0) {
823 list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) {
824 if (rte->lr_net == net) {
833 lnet_update_ni_status_locked(void)
835 lnet_ni_t *ni = NULL;
839 LASSERT(the_lnet.ln_routing);
841 timeout = router_ping_timeout +
842 MAX(live_router_check_interval, dead_router_check_interval);
844 now = ktime_get_real_seconds();
845 while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
846 if (ni->ni_net->net_lnd->lnd_type == LOLND)
849 if (now < ni->ni_last_alive + timeout)
853 /* re-check with lock */
854 if (now < ni->ni_last_alive + timeout) {
859 LASSERT(ni->ni_status != NULL);
861 if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
862 CDEBUG(D_NET, "NI(%s:%d) status changed to down\n",
863 libcfs_nid2str(ni->ni_nid), timeout);
864 /* NB: so far, this is the only place to set
865 * NI status to "down" */
866 ni->ni_status->ns_status = LNET_NI_STATUS_DOWN;
873 lnet_destroy_rc_data(lnet_rc_data_t *rcd)
875 LASSERT(list_empty(&rcd->rcd_list));
876 /* detached from network */
877 LASSERT(LNetHandleIsInvalid(rcd->rcd_mdh));
879 if (rcd->rcd_gateway != NULL) {
880 int cpt = rcd->rcd_gateway->lp_cpt;
883 lnet_peer_decref_locked(rcd->rcd_gateway);
884 lnet_net_unlock(cpt);
887 if (rcd->rcd_pinginfo != NULL)
888 LIBCFS_FREE(rcd->rcd_pinginfo, LNET_PINGINFO_SIZE);
890 LIBCFS_FREE(rcd, sizeof(*rcd));
893 static lnet_rc_data_t *
894 lnet_create_rc_data_locked(lnet_peer_t *gateway)
896 lnet_rc_data_t *rcd = NULL;
897 struct lnet_ping_info *pi;
901 lnet_net_unlock(gateway->lp_cpt);
903 LIBCFS_ALLOC(rcd, sizeof(*rcd));
907 LNetInvalidateHandle(&rcd->rcd_mdh);
908 INIT_LIST_HEAD(&rcd->rcd_list);
910 LIBCFS_ALLOC(pi, LNET_PINGINFO_SIZE);
914 for (i = 0; i < LNET_MAX_RTR_NIS; i++) {
915 pi->pi_ni[i].ns_nid = LNET_NID_ANY;
916 pi->pi_ni[i].ns_status = LNET_NI_STATUS_INVALID;
918 rcd->rcd_pinginfo = pi;
920 LASSERT(!LNetHandleIsInvalid(the_lnet.ln_rc_eqh));
921 rc = LNetMDBind((lnet_md_t){.start = pi,
923 .length = LNET_PINGINFO_SIZE,
924 .threshold = LNET_MD_THRESH_INF,
925 .options = LNET_MD_TRUNCATE,
926 .eq_handle = the_lnet.ln_rc_eqh},
930 CERROR("Can't bind MD: %d\n", rc);
935 lnet_net_lock(gateway->lp_cpt);
936 /* router table changed or someone has created rcd for this gateway */
937 if (!lnet_isrouter(gateway) || gateway->lp_rcd != NULL) {
938 lnet_net_unlock(gateway->lp_cpt);
942 lnet_peer_addref_locked(gateway);
943 rcd->rcd_gateway = gateway;
944 gateway->lp_rcd = rcd;
945 gateway->lp_ping_notsent = 0;
951 if (!LNetHandleIsInvalid(rcd->rcd_mdh)) {
952 rc = LNetMDUnlink(rcd->rcd_mdh);
955 lnet_destroy_rc_data(rcd);
958 lnet_net_lock(gateway->lp_cpt);
959 return gateway->lp_rcd;
963 lnet_router_check_interval (lnet_peer_t *rtr)
967 secs = rtr->lp_alive ? live_router_check_interval :
968 dead_router_check_interval;
976 lnet_ping_router_locked (lnet_peer_t *rtr)
978 lnet_rc_data_t *rcd = NULL;
979 cfs_time_t now = cfs_time_current();
983 lnet_peer_addref_locked(rtr);
985 if (rtr->lp_ping_deadline != 0 && /* ping timed out? */
986 cfs_time_after(now, rtr->lp_ping_deadline))
987 lnet_notify_locked(rtr, 1, 0, now);
989 /* Run any outstanding notifications */
990 ni = lnet_get_next_ni_locked(rtr->lp_net, NULL);
991 lnet_ni_notify_locked(ni, rtr);
993 if (!lnet_isrouter(rtr) ||
994 the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
995 /* router table changed or router checker is shutting down */
996 lnet_peer_decref_locked(rtr);
1000 rcd = rtr->lp_rcd != NULL ?
1001 rtr->lp_rcd : lnet_create_rc_data_locked(rtr);
1006 secs = lnet_router_check_interval(rtr);
1009 "rtr %s %d: deadline %lu ping_notsent %d alive %d "
1010 "alive_count %d lp_ping_timestamp %lu\n",
1011 libcfs_nid2str(rtr->lp_nid), secs,
1012 rtr->lp_ping_deadline, rtr->lp_ping_notsent,
1013 rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
1015 if (secs != 0 && !rtr->lp_ping_notsent &&
1016 cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
1017 cfs_time_seconds(secs)))) {
1019 lnet_process_id_t id;
1020 lnet_handle_md_t mdh;
1022 id.nid = rtr->lp_nid;
1023 id.pid = LNET_PID_LUSTRE;
1024 CDEBUG(D_NET, "Check: %s\n", libcfs_id2str(id));
1026 rtr->lp_ping_notsent = 1;
1027 rtr->lp_ping_timestamp = now;
1031 if (rtr->lp_ping_deadline == 0) {
1032 rtr->lp_ping_deadline =
1033 cfs_time_shift(router_ping_timeout);
1036 lnet_net_unlock(rtr->lp_cpt);
1038 rc = LNetGet(LNET_NID_ANY, mdh, id, LNET_RESERVED_PORTAL,
1039 LNET_PROTO_PING_MATCHBITS, 0);
1041 lnet_net_lock(rtr->lp_cpt);
1043 rtr->lp_ping_notsent = 0; /* no event pending */
1046 lnet_peer_decref_locked(rtr);
1051 lnet_router_checker_start(void)
1055 struct task_struct *task;
1057 LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
1059 if (check_routers_before_use &&
1060 dead_router_check_interval <= 0) {
1061 LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be"
1062 " set if 'check_routers_before_use' is set"
1067 sema_init(&the_lnet.ln_rc_signal, 0);
1069 rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh);
1071 CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc);
1075 the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
1076 task = kthread_run(lnet_router_checker, NULL, "router_checker");
1079 CERROR("Can't start router checker thread: %d\n", rc);
1080 /* block until event callback signals exit */
1081 down(&the_lnet.ln_rc_signal);
1082 rc = LNetEQFree(the_lnet.ln_rc_eqh);
1084 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
1088 if (check_routers_before_use) {
1089 /* Note that a helpful side-effect of pinging all known routers
1090 * at startup is that it makes them drop stale connections they
1091 * may have to a previous instance of me. */
1092 lnet_wait_known_routerstate();
1099 lnet_router_checker_stop (void)
1103 if (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN)
1106 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
1107 the_lnet.ln_rc_state = LNET_RC_STATE_STOPPING;
1108 /* wakeup the RC thread if it's sleeping */
1109 wake_up(&the_lnet.ln_rc_waitq);
1111 /* block until event callback signals exit */
1112 down(&the_lnet.ln_rc_signal);
1113 LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
1115 rc = LNetEQFree(the_lnet.ln_rc_eqh);
1121 lnet_prune_rc_data(int wait_unlink)
1123 lnet_rc_data_t *rcd;
1124 lnet_rc_data_t *tmp;
1126 struct list_head head;
1129 if (likely(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING &&
1130 list_empty(&the_lnet.ln_rcd_deathrow) &&
1131 list_empty(&the_lnet.ln_rcd_zombie)))
1134 INIT_LIST_HEAD(&head);
1136 lnet_net_lock(LNET_LOCK_EX);
1138 if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
1139 /* router checker is stopping, prune all */
1140 list_for_each_entry(lp, &the_lnet.ln_routers,
1142 if (lp->lp_rcd == NULL)
1145 LASSERT(list_empty(&lp->lp_rcd->rcd_list));
1146 list_add(&lp->lp_rcd->rcd_list,
1147 &the_lnet.ln_rcd_deathrow);
1152 /* unlink all RCDs on deathrow list */
1153 list_splice_init(&the_lnet.ln_rcd_deathrow, &head);
1155 if (!list_empty(&head)) {
1156 lnet_net_unlock(LNET_LOCK_EX);
1158 list_for_each_entry(rcd, &head, rcd_list)
1159 LNetMDUnlink(rcd->rcd_mdh);
1161 lnet_net_lock(LNET_LOCK_EX);
1164 list_splice_init(&head, &the_lnet.ln_rcd_zombie);
1166 /* release all zombie RCDs */
1167 while (!list_empty(&the_lnet.ln_rcd_zombie)) {
1168 list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie,
1170 if (LNetHandleIsInvalid(rcd->rcd_mdh))
1171 list_move(&rcd->rcd_list, &head);
1174 wait_unlink = wait_unlink &&
1175 !list_empty(&the_lnet.ln_rcd_zombie);
1177 lnet_net_unlock(LNET_LOCK_EX);
1179 while (!list_empty(&head)) {
1180 rcd = list_entry(head.next,
1181 lnet_rc_data_t, rcd_list);
1182 list_del_init(&rcd->rcd_list);
1183 lnet_destroy_rc_data(rcd);
1190 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
1191 "Waiting for rc buffers to unlink\n");
1192 set_current_state(TASK_UNINTERRUPTIBLE);
1193 schedule_timeout(cfs_time_seconds(1) / 4);
1195 lnet_net_lock(LNET_LOCK_EX);
1198 lnet_net_unlock(LNET_LOCK_EX);
1202 * This function is called to check if the RC should block indefinitely.
1203 * It's called from lnet_router_checker() as well as being passed to
1204 * wait_event_interruptible() to avoid the lost wake_up problem.
1206 * When it's called from wait_event_interruptible() it is necessary to
1207 * also not sleep if the rc state is not running to avoid a deadlock
1208 * when the system is shutting down
1211 lnet_router_checker_active(void)
1213 if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING)
1216 /* Router Checker thread needs to run when routing is enabled in
1217 * order to call lnet_update_ni_status_locked() */
1218 if (the_lnet.ln_routing)
1221 return !list_empty(&the_lnet.ln_routers) &&
1222 (live_router_check_interval > 0 ||
1223 dead_router_check_interval > 0);
1227 lnet_router_checker(void *arg)
1230 struct list_head *entry;
1232 cfs_block_allsigs();
1234 while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
1239 cpt = lnet_net_lock_current();
1241 version = the_lnet.ln_routers_version;
1243 list_for_each(entry, &the_lnet.ln_routers) {
1244 rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
1248 lnet_net_unlock(cpt);
1251 /* the routers list has changed */
1252 if (version != the_lnet.ln_routers_version)
1256 lnet_ping_router_locked(rtr);
1258 /* NB dropped lock */
1259 if (version != the_lnet.ln_routers_version) {
1260 /* the routers list has changed */
1265 if (the_lnet.ln_routing)
1266 lnet_update_ni_status_locked();
1268 lnet_net_unlock(cpt);
1270 lnet_prune_rc_data(0); /* don't wait for UNLINK */
1272 /* Call schedule_timeout() here always adds 1 to load average
1273 * because kernel counts # active tasks as nr_running
1274 * + nr_uninterruptible. */
1275 /* if there are any routes then wakeup every second. If
1276 * there are no routes then sleep indefinitely until woken
1277 * up by a user adding a route */
1278 if (!lnet_router_checker_active())
1279 wait_event_interruptible(the_lnet.ln_rc_waitq,
1280 lnet_router_checker_active());
1282 wait_event_interruptible_timeout(the_lnet.ln_rc_waitq,
1284 cfs_time_seconds(1));
1287 lnet_prune_rc_data(1); /* wait for UNLINK */
1289 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
1290 up(&the_lnet.ln_rc_signal);
1291 /* The unlink event callback will signal final completion */
1296 lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
1298 int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
1300 while (--npages >= 0)
1301 __free_page(rb->rb_kiov[npages].kiov_page);
1303 LIBCFS_FREE(rb, sz);
1306 static lnet_rtrbuf_t *
1307 lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
1309 int npages = rbp->rbp_npages;
1310 int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
1315 LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
1321 for (i = 0; i < npages; i++) {
1322 page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
1323 GFP_KERNEL | __GFP_ZERO);
1326 __free_page(rb->rb_kiov[i].kiov_page);
1328 LIBCFS_FREE(rb, sz);
1332 rb->rb_kiov[i].kiov_len = PAGE_SIZE;
1333 rb->rb_kiov[i].kiov_offset = 0;
1334 rb->rb_kiov[i].kiov_page = page;
1341 lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp, int cpt)
1343 int npages = rbp->rbp_npages;
1345 struct list_head tmp;
1347 if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
1350 INIT_LIST_HEAD(&tmp);
1353 lnet_drop_routed_msgs_locked(&rbp->rbp_msgs, cpt);
1354 list_splice_init(&rbp->rbp_bufs, &tmp);
1355 rbp->rbp_req_nbuffers = 0;
1356 rbp->rbp_nbuffers = rbp->rbp_credits = 0;
1357 rbp->rbp_mincredits = 0;
1358 lnet_net_unlock(cpt);
1360 /* Free buffers on the free list. */
1361 while (!list_empty(&tmp)) {
1362 rb = list_entry(tmp.next, lnet_rtrbuf_t, rb_list);
1363 list_del(&rb->rb_list);
1364 lnet_destroy_rtrbuf(rb, npages);
1369 lnet_rtrpool_adjust_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
1371 struct list_head rb_list;
1374 int num_buffers = 0;
1376 int npages = rbp->rbp_npages;
1379 /* If we are called for less buffers than already in the pool, we
1380 * just lower the req_nbuffers number and excess buffers will be
1381 * thrown away as they are returned to the free list. Credits
1382 * then get adjusted as well.
1383 * If we already have enough buffers allocated to serve the
1384 * increase requested, then we can treat that the same way as we
1385 * do the decrease. */
1386 num_rb = nbufs - rbp->rbp_nbuffers;
1387 if (nbufs <= rbp->rbp_req_nbuffers || num_rb <= 0) {
1388 rbp->rbp_req_nbuffers = nbufs;
1389 lnet_net_unlock(cpt);
1392 /* store the older value of rbp_req_nbuffers and then set it to
1393 * the new request to prevent lnet_return_rx_credits_locked() from
1394 * freeing buffers that we need to keep around */
1395 old_req_nbufs = rbp->rbp_req_nbuffers;
1396 rbp->rbp_req_nbuffers = nbufs;
1397 lnet_net_unlock(cpt);
1399 INIT_LIST_HEAD(&rb_list);
1401 /* allocate the buffers on a local list first. If all buffers are
1402 * allocated successfully then join this list to the rbp buffer
1403 * list. If not then free all allocated buffers. */
1404 while (num_rb-- > 0) {
1405 rb = lnet_new_rtrbuf(rbp, cpt);
1407 CERROR("Failed to allocate %d route bufs of %d pages\n",
1411 rbp->rbp_req_nbuffers = old_req_nbufs;
1412 lnet_net_unlock(cpt);
1417 list_add(&rb->rb_list, &rb_list);
1423 list_splice_tail(&rb_list, &rbp->rbp_bufs);
1424 rbp->rbp_nbuffers += num_buffers;
1425 rbp->rbp_credits += num_buffers;
1426 rbp->rbp_mincredits = rbp->rbp_credits;
1427 /* We need to schedule blocked msg using the newly
1429 while (!list_empty(&rbp->rbp_bufs) &&
1430 !list_empty(&rbp->rbp_msgs))
1431 lnet_schedule_blocked_locked(rbp);
1433 lnet_net_unlock(cpt);
1438 while (!list_empty(&rb_list)) {
1439 rb = list_entry(rb_list.next, lnet_rtrbuf_t, rb_list);
1440 list_del(&rb->rb_list);
1441 lnet_destroy_rtrbuf(rb, npages);
1448 lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
1450 INIT_LIST_HEAD(&rbp->rbp_msgs);
1451 INIT_LIST_HEAD(&rbp->rbp_bufs);
1453 rbp->rbp_npages = npages;
1454 rbp->rbp_credits = 0;
1455 rbp->rbp_mincredits = 0;
1459 lnet_rtrpools_free(int keep_pools)
1461 lnet_rtrbufpool_t *rtrp;
1464 if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
1467 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1468 lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i);
1469 lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i);
1470 lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i);
1474 cfs_percpt_free(the_lnet.ln_rtrpools);
1475 the_lnet.ln_rtrpools = NULL;
1480 lnet_nrb_tiny_calculate(void)
1482 int nrbs = LNET_NRB_TINY;
1484 if (tiny_router_buffers < 0) {
1485 LCONSOLE_ERROR_MSG(0x10c,
1486 "tiny_router_buffers=%d invalid when "
1487 "routing enabled\n", tiny_router_buffers);
1491 if (tiny_router_buffers > 0)
1492 nrbs = tiny_router_buffers;
1494 nrbs /= LNET_CPT_NUMBER;
1495 return max(nrbs, LNET_NRB_TINY_MIN);
1499 lnet_nrb_small_calculate(void)
1501 int nrbs = LNET_NRB_SMALL;
1503 if (small_router_buffers < 0) {
1504 LCONSOLE_ERROR_MSG(0x10c,
1505 "small_router_buffers=%d invalid when "
1506 "routing enabled\n", small_router_buffers);
1510 if (small_router_buffers > 0)
1511 nrbs = small_router_buffers;
1513 nrbs /= LNET_CPT_NUMBER;
1514 return max(nrbs, LNET_NRB_SMALL_MIN);
1518 lnet_nrb_large_calculate(void)
1520 int nrbs = LNET_NRB_LARGE;
1522 if (large_router_buffers < 0) {
1523 LCONSOLE_ERROR_MSG(0x10c,
1524 "large_router_buffers=%d invalid when "
1525 "routing enabled\n", large_router_buffers);
1529 if (large_router_buffers > 0)
1530 nrbs = large_router_buffers;
1532 nrbs /= LNET_CPT_NUMBER;
1533 return max(nrbs, LNET_NRB_LARGE_MIN);
1537 lnet_rtrpools_alloc(int im_a_router)
1539 lnet_rtrbufpool_t *rtrp;
1546 if (!strcmp(forwarding, "")) {
1547 /* not set either way */
1550 } else if (!strcmp(forwarding, "disabled")) {
1551 /* explicitly disabled */
1553 } else if (!strcmp(forwarding, "enabled")) {
1554 /* explicitly enabled */
1556 LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
1557 "'enabled' or 'disabled'\n");
1561 nrb_tiny = lnet_nrb_tiny_calculate();
1565 nrb_small = lnet_nrb_small_calculate();
1569 nrb_large = lnet_nrb_large_calculate();
1573 the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
1575 sizeof(lnet_rtrbufpool_t));
1576 if (the_lnet.ln_rtrpools == NULL) {
1577 LCONSOLE_ERROR_MSG(0x10c,
1578 "Failed to initialize router buffe pool\n");
1582 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1583 lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0);
1584 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1589 lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX],
1590 LNET_NRB_SMALL_PAGES);
1591 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1596 lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX],
1597 LNET_NRB_LARGE_PAGES);
1598 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1604 lnet_net_lock(LNET_LOCK_EX);
1605 the_lnet.ln_routing = 1;
1606 lnet_net_unlock(LNET_LOCK_EX);
1610 lnet_rtrpools_free(0);
1615 lnet_rtrpools_adjust_helper(int tiny, int small, int large)
1620 lnet_rtrbufpool_t *rtrp;
1622 /* If the provided values for each buffer pool are different than the
1623 * configured values, we need to take action. */
1625 tiny_router_buffers = tiny;
1626 nrb = lnet_nrb_tiny_calculate();
1627 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1628 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1635 small_router_buffers = small;
1636 nrb = lnet_nrb_small_calculate();
1637 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1638 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1645 large_router_buffers = large;
1646 nrb = lnet_nrb_large_calculate();
1647 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1648 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1659 lnet_rtrpools_adjust(int tiny, int small, int large)
1661 /* this function doesn't revert the changes if adding new buffers
1662 * failed. It's up to the user space caller to revert the
1665 if (!the_lnet.ln_routing)
1668 return lnet_rtrpools_adjust_helper(tiny, small, large);
1672 lnet_rtrpools_enable(void)
1676 if (the_lnet.ln_routing)
1679 if (the_lnet.ln_rtrpools == NULL)
1680 /* If routing is turned off, and we have never
1681 * initialized the pools before, just call the
1682 * standard buffer pool allocation routine as
1683 * if we are just configuring this for the first
1685 rc = lnet_rtrpools_alloc(1);
1687 rc = lnet_rtrpools_adjust_helper(0, 0, 0);
1691 lnet_net_lock(LNET_LOCK_EX);
1692 the_lnet.ln_routing = 1;
1694 the_lnet.ln_ping_info->pi_features &= ~LNET_PING_FEAT_RTE_DISABLED;
1695 lnet_net_unlock(LNET_LOCK_EX);
1701 lnet_rtrpools_disable(void)
1703 if (!the_lnet.ln_routing)
1706 lnet_net_lock(LNET_LOCK_EX);
1707 the_lnet.ln_routing = 0;
1708 the_lnet.ln_ping_info->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1710 tiny_router_buffers = 0;
1711 small_router_buffers = 0;
1712 large_router_buffers = 0;
1713 lnet_net_unlock(LNET_LOCK_EX);
1714 lnet_rtrpools_free(1);
1718 lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, cfs_time_t when)
1720 struct lnet_peer *lp = NULL;
1721 cfs_time_t now = cfs_time_current();
1722 int cpt = lnet_cpt_of_nid(nid, ni);
1724 LASSERT (!in_interrupt ());
1726 CDEBUG (D_NET, "%s notifying %s: %s\n",
1727 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1728 libcfs_nid2str(nid),
1729 alive ? "up" : "down");
1732 LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
1733 CWARN("Ignoring notification of %s %s by %s (different net)\n",
1734 libcfs_nid2str(nid), alive ? "birth" : "death",
1735 libcfs_nid2str(ni->ni_nid));
1739 /* can't do predictions... */
1740 if (cfs_time_after(when, now)) {
1741 CWARN("Ignoring prediction from %s of %s %s "
1742 "%ld seconds in the future\n",
1743 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1744 libcfs_nid2str(nid), alive ? "up" : "down",
1745 cfs_duration_sec(cfs_time_sub(when, now)));
1749 if (ni != NULL && !alive && /* LND telling me she's down */
1750 !auto_down) { /* auto-down disabled */
1751 CDEBUG(D_NET, "Auto-down disabled\n");
1757 if (the_lnet.ln_shutdown) {
1758 lnet_net_unlock(cpt);
1762 lp = lnet_find_peer_locked(the_lnet.ln_peer_tables[cpt], nid);
1765 lnet_net_unlock(cpt);
1766 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
1770 /* We can't fully trust LND on reporting exact peer last_alive
1771 * if he notifies us about dead peer. For example ksocklnd can
1772 * call us with when == _time_when_the_node_was_booted_ if
1773 * no connections were successfully established */
1774 if (ni != NULL && !alive && when < lp->lp_last_alive)
1775 when = lp->lp_last_alive;
1777 lnet_notify_locked(lp, ni == NULL, alive, when);
1780 lnet_ni_notify_locked(ni, lp);
1782 lnet_peer_decref_locked(lp);
1784 lnet_net_unlock(cpt);
1787 EXPORT_SYMBOL(lnet_notify);