2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2016, Intel Corporation.
6 * This file is part of Lustre, https://wiki.hpdd.intel.com/
8 * Portals is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Portals is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Portals; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_LNET
24 #include <lnet/lib-lnet.h>
26 #define LNET_NRB_TINY_MIN 512 /* min value for each CPT */
27 #define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4)
28 #define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */
29 #define LNET_NRB_SMALL (LNET_NRB_SMALL_MIN * 4)
30 #define LNET_NRB_SMALL_PAGES 1
31 #define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
32 #define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
33 #define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
36 static char *forwarding = "";
37 module_param(forwarding, charp, 0444);
38 MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
40 static int tiny_router_buffers;
41 module_param(tiny_router_buffers, int, 0444);
42 MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
43 static int small_router_buffers;
44 module_param(small_router_buffers, int, 0444);
45 MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
46 static int large_router_buffers;
47 module_param(large_router_buffers, int, 0444);
48 MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
49 static int peer_buffer_credits;
50 module_param(peer_buffer_credits, int, 0444);
51 MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
53 static int auto_down = 1;
54 module_param(auto_down, int, 0444);
55 MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
58 lnet_peer_buffer_credits(struct lnet_net *net)
60 /* NI option overrides LNet default */
61 if (net->net_tunables.lct_peer_rtr_credits > 0)
62 return net->net_tunables.lct_peer_rtr_credits;
63 if (peer_buffer_credits > 0)
64 return peer_buffer_credits;
66 /* As an approximation, allow this peer the same number of router
67 * buffers as it is allowed outstanding sends */
68 return net->net_tunables.lct_peer_tx_credits;
72 static int lnet_router_checker(void *);
74 static int check_routers_before_use;
75 module_param(check_routers_before_use, int, 0444);
76 MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
78 int avoid_asym_router_failure = 1;
79 module_param(avoid_asym_router_failure, int, 0644);
80 MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
82 static int dead_router_check_interval = 60;
83 module_param(dead_router_check_interval, int, 0644);
84 MODULE_PARM_DESC(dead_router_check_interval, "Seconds between dead router health checks (<= 0 to disable)");
86 static int live_router_check_interval = 60;
87 module_param(live_router_check_interval, int, 0644);
88 MODULE_PARM_DESC(live_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
90 static int router_ping_timeout = 50;
91 module_param(router_ping_timeout, int, 0644);
92 MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
95 lnet_peers_start_down(void)
97 return check_routers_before_use;
101 lnet_notify_locked(struct lnet_peer_ni *lp, int notifylnd, int alive,
104 if (cfs_time_before(when, lp->lpni_timestamp)) { /* out of date information */
105 CDEBUG(D_NET, "Out of date\n");
109 lp->lpni_timestamp = when; /* update timestamp */
110 lp->lpni_ping_deadline = 0; /* disable ping timeout */
112 if (lp->lpni_alive_count != 0 && /* got old news */
113 (!lp->lpni_alive) == (!alive)) { /* new date for old news */
114 CDEBUG(D_NET, "Old news\n");
118 /* Flag that notification is outstanding */
120 lp->lpni_alive_count++;
121 lp->lpni_alive = !(!alive); /* 1 bit! */
123 lp->lpni_notifylnd |= notifylnd;
125 lp->lpni_ping_feats = LNET_PING_FEAT_INVAL; /* reset */
127 CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lpni_nid), alive);
131 lnet_ni_notify_locked(lnet_ni_t *ni, struct lnet_peer_ni *lp)
136 /* Notify only in 1 thread at any time to ensure ordered notification.
137 * NB individual events can be missed; the only guarantee is that you
138 * always get the most recent news */
140 if (lp->lpni_notifying || ni == NULL)
143 lp->lpni_notifying = 1;
145 while (lp->lpni_notify) {
146 alive = lp->lpni_alive;
147 notifylnd = lp->lpni_notifylnd;
149 lp->lpni_notifylnd = 0;
152 if (notifylnd && ni->ni_net->net_lnd->lnd_notify != NULL) {
153 lnet_net_unlock(lp->lpni_cpt);
155 /* A new notification could happen now; I'll handle it
156 * when control returns to me */
158 (ni->ni_net->net_lnd->lnd_notify)(ni, lp->lpni_nid,
161 lnet_net_lock(lp->lpni_cpt);
165 lp->lpni_notifying = 0;
170 lnet_rtr_addref_locked(struct lnet_peer_ni *lp)
172 LASSERT(atomic_read(&lp->lpni_refcount) > 0);
173 LASSERT(lp->lpni_rtr_refcount >= 0);
175 /* lnet_net_lock must be exclusively locked */
176 lp->lpni_rtr_refcount++;
177 if (lp->lpni_rtr_refcount == 1) {
178 struct list_head *pos;
180 /* a simple insertion sort */
181 list_for_each_prev(pos, &the_lnet.ln_routers) {
182 struct lnet_peer_ni *rtr =
183 list_entry(pos, struct lnet_peer_ni,
186 if (rtr->lpni_nid < lp->lpni_nid)
190 list_add(&lp->lpni_rtr_list, pos);
191 /* addref for the_lnet.ln_routers */
192 lnet_peer_ni_addref_locked(lp);
193 the_lnet.ln_routers_version++;
198 lnet_rtr_decref_locked(struct lnet_peer_ni *lp)
200 LASSERT(atomic_read(&lp->lpni_refcount) > 0);
201 LASSERT(lp->lpni_rtr_refcount > 0);
203 /* lnet_net_lock must be exclusively locked */
204 lp->lpni_rtr_refcount--;
205 if (lp->lpni_rtr_refcount == 0) {
206 LASSERT(list_empty(&lp->lpni_routes));
208 if (lp->lpni_rcd != NULL) {
209 list_add(&lp->lpni_rcd->rcd_list,
210 &the_lnet.ln_rcd_deathrow);
214 list_del(&lp->lpni_rtr_list);
215 /* decref for the_lnet.ln_routers */
216 lnet_peer_ni_decref_locked(lp);
217 the_lnet.ln_routers_version++;
222 lnet_find_rnet_locked(__u32 net)
224 lnet_remotenet_t *rnet;
225 struct list_head *tmp;
226 struct list_head *rn_list;
228 LASSERT(!the_lnet.ln_shutdown);
230 rn_list = lnet_net2rnethash(net);
231 list_for_each(tmp, rn_list) {
232 rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
234 if (rnet->lrn_net == net)
240 static void lnet_shuffle_seed(void)
245 struct timespec64 ts;
246 lnet_ni_t *ni = NULL;
251 cfs_get_random_bytes(seed, sizeof(seed));
253 /* Nodes with small feet have little entropy
254 * the NID for this node gives the most entropy in the low bits */
255 while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
256 lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
258 if (lnd_type != LOLND)
259 seed[0] ^= (LNET_NIDADDR(ni->ni_nid) | lnd_type);
263 cfs_srand(ts.tv_sec ^ seed[0], ts.tv_nsec ^ seed[1]);
268 /* NB expects LNET_LOCK held */
270 lnet_add_route_to_rnet(lnet_remotenet_t *rnet, lnet_route_t *route)
272 unsigned int len = 0;
273 unsigned int offset = 0;
278 list_for_each(e, &rnet->lrn_routes) {
282 /* len+1 positions to add a new entry, also prevents division by 0 */
283 offset = cfs_rand() % (len + 1);
284 list_for_each(e, &rnet->lrn_routes) {
289 list_add(&route->lr_list, e);
290 list_add(&route->lr_gwlist, &route->lr_gateway->lpni_routes);
292 the_lnet.ln_remote_nets_version++;
293 lnet_rtr_addref_locked(route->lr_gateway);
297 lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
298 unsigned int priority)
301 lnet_remotenet_t *rnet;
302 lnet_remotenet_t *rnet2;
305 struct lnet_peer_ni *lpni;
309 CDEBUG(D_NET, "Add route: net %s hops %d priority %u gw %s\n",
310 libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
312 if (gateway == LNET_NID_ANY ||
313 LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
314 net == LNET_NIDNET(LNET_NID_ANY) ||
315 LNET_NETTYP(net) == LOLND ||
316 LNET_NIDNET(gateway) == net ||
317 (hops != LNET_UNDEFINED_HOPS && (hops < 1 || hops > 255)))
320 if (lnet_islocalnet(net)) /* it's a local network */
323 /* Assume net, route, all new */
324 LIBCFS_ALLOC(route, sizeof(*route));
325 LIBCFS_ALLOC(rnet, sizeof(*rnet));
326 if (route == NULL || rnet == NULL) {
327 CERROR("Out of memory creating route %s %d %s\n",
328 libcfs_net2str(net), hops, libcfs_nid2str(gateway));
330 LIBCFS_FREE(route, sizeof(*route));
332 LIBCFS_FREE(rnet, sizeof(*rnet));
336 INIT_LIST_HEAD(&rnet->lrn_routes);
338 route->lr_hops = hops;
340 route->lr_priority = priority;
342 lnet_net_lock(LNET_LOCK_EX);
344 lpni = lnet_nid2peerni_locked(gateway, LNET_LOCK_EX);
346 lnet_net_unlock(LNET_LOCK_EX);
348 LIBCFS_FREE(route, sizeof(*route));
349 LIBCFS_FREE(rnet, sizeof(*rnet));
352 if (rc == -EHOSTUNREACH) /* gateway is not on a local net. */
353 return rc; /* ignore the route entry */
354 CERROR("Error %d creating route %s %d %s\n", rc,
355 libcfs_net2str(net), hops,
356 libcfs_nid2str(gateway));
359 route->lr_gateway = lpni;
360 LASSERT(!the_lnet.ln_shutdown);
362 rnet2 = lnet_find_rnet_locked(net);
365 list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
369 /* Search for a duplicate route (it's a NOOP if it is) */
371 list_for_each(e, &rnet2->lrn_routes) {
372 lnet_route_t *route2 = list_entry(e, lnet_route_t, lr_list);
374 if (route2->lr_gateway == route->lr_gateway) {
379 /* our lookups must be true */
380 LASSERT(route2->lr_gateway->lpni_nid != gateway);
384 lnet_peer_ni_addref_locked(route->lr_gateway); /* +1 for notify */
385 lnet_add_route_to_rnet(rnet2, route);
387 ni = lnet_get_next_ni_locked(route->lr_gateway->lpni_net, NULL);
388 lnet_net_unlock(LNET_LOCK_EX);
390 /* XXX Assume alive */
391 if (ni->ni_net->net_lnd->lnd_notify != NULL)
392 (ni->ni_net->net_lnd->lnd_notify)(ni, gateway, 1);
394 lnet_net_lock(LNET_LOCK_EX);
397 /* -1 for notify or !add_route */
398 lnet_peer_ni_decref_locked(route->lr_gateway);
399 lnet_net_unlock(LNET_LOCK_EX);
405 LIBCFS_FREE(route, sizeof(*route));
409 LIBCFS_FREE(rnet, sizeof(*rnet));
411 /* indicate to startup the router checker if configured */
412 wake_up(&the_lnet.ln_rc_waitq);
418 lnet_check_routes(void)
420 lnet_remotenet_t *rnet;
422 lnet_route_t *route2;
423 struct list_head *e1;
424 struct list_head *e2;
426 struct list_head *rn_list;
429 cpt = lnet_net_lock_current();
431 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
432 rn_list = &the_lnet.ln_remote_nets_hash[i];
433 list_for_each(e1, rn_list) {
434 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
437 list_for_each(e2, &rnet->lrn_routes) {
442 route = list_entry(e2, lnet_route_t,
445 if (route2 == NULL) {
450 if (route->lr_gateway->lpni_net ==
451 route2->lr_gateway->lpni_net)
454 nid1 = route->lr_gateway->lpni_nid;
455 nid2 = route2->lr_gateway->lpni_nid;
458 lnet_net_unlock(cpt);
460 CERROR("Routes to %s via %s and %s not "
463 libcfs_nid2str(nid1),
464 libcfs_nid2str(nid2));
470 lnet_net_unlock(cpt);
475 lnet_del_route(__u32 net, lnet_nid_t gw_nid)
477 struct lnet_peer_ni *gateway;
478 lnet_remotenet_t *rnet;
480 struct list_head *e1;
481 struct list_head *e2;
483 struct list_head *rn_list;
486 CDEBUG(D_NET, "Del route: net %s : gw %s\n",
487 libcfs_net2str(net), libcfs_nid2str(gw_nid));
489 /* NB Caller may specify either all routes via the given gateway
490 * or a specific route entry actual NIDs) */
492 lnet_net_lock(LNET_LOCK_EX);
493 if (net == LNET_NIDNET(LNET_NID_ANY))
494 rn_list = &the_lnet.ln_remote_nets_hash[0];
496 rn_list = lnet_net2rnethash(net);
499 list_for_each(e1, rn_list) {
500 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
502 if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
503 net == rnet->lrn_net))
506 list_for_each(e2, &rnet->lrn_routes) {
507 route = list_entry(e2, lnet_route_t, lr_list);
509 gateway = route->lr_gateway;
510 if (!(gw_nid == LNET_NID_ANY ||
511 gw_nid == gateway->lpni_nid))
514 list_del(&route->lr_list);
515 list_del(&route->lr_gwlist);
516 the_lnet.ln_remote_nets_version++;
518 if (list_empty(&rnet->lrn_routes))
519 list_del(&rnet->lrn_list);
523 lnet_rtr_decref_locked(gateway);
524 lnet_peer_ni_decref_locked(gateway);
526 lnet_net_unlock(LNET_LOCK_EX);
528 LIBCFS_FREE(route, sizeof(*route));
531 LIBCFS_FREE(rnet, sizeof(*rnet));
534 lnet_net_lock(LNET_LOCK_EX);
539 if (net == LNET_NIDNET(LNET_NID_ANY) &&
540 ++idx < LNET_REMOTE_NETS_HASH_SIZE) {
541 rn_list = &the_lnet.ln_remote_nets_hash[idx];
544 lnet_net_unlock(LNET_LOCK_EX);
550 lnet_destroy_routes (void)
552 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
555 int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg)
557 int i, rc = -ENOENT, j;
559 if (the_lnet.ln_rtrpools == NULL)
562 for (i = 0; i < LNET_NRBPOOLS; i++) {
563 lnet_rtrbufpool_t *rbp;
565 lnet_net_lock(LNET_LOCK_EX);
566 cfs_percpt_for_each(rbp, j, the_lnet.ln_rtrpools) {
570 pool_cfg->pl_pools[i].pl_npages = rbp[i].rbp_npages;
571 pool_cfg->pl_pools[i].pl_nbuffers = rbp[i].rbp_nbuffers;
572 pool_cfg->pl_pools[i].pl_credits = rbp[i].rbp_credits;
573 pool_cfg->pl_pools[i].pl_mincredits = rbp[i].rbp_mincredits;
577 lnet_net_unlock(LNET_LOCK_EX);
580 lnet_net_lock(LNET_LOCK_EX);
581 pool_cfg->pl_routing = the_lnet.ln_routing;
582 lnet_net_unlock(LNET_LOCK_EX);
588 lnet_get_route(int idx, __u32 *net, __u32 *hops,
589 lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
591 struct list_head *e1;
592 struct list_head *e2;
593 lnet_remotenet_t *rnet;
597 struct list_head *rn_list;
599 cpt = lnet_net_lock_current();
601 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
602 rn_list = &the_lnet.ln_remote_nets_hash[i];
603 list_for_each(e1, rn_list) {
604 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
606 list_for_each(e2, &rnet->lrn_routes) {
607 route = list_entry(e2, lnet_route_t,
611 *net = rnet->lrn_net;
612 *hops = route->lr_hops;
613 *priority = route->lr_priority;
614 *gateway = route->lr_gateway->lpni_nid;
615 *alive = lnet_is_route_alive(route);
616 lnet_net_unlock(cpt);
623 lnet_net_unlock(cpt);
628 lnet_swap_pinginfo(struct lnet_ping_info *info)
631 struct lnet_ni_status *stat;
633 __swab32s(&info->pi_magic);
634 __swab32s(&info->pi_features);
635 __swab32s(&info->pi_pid);
636 __swab32s(&info->pi_nnis);
637 for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
638 stat = &info->pi_ni[i];
639 __swab64s(&stat->ns_nid);
640 __swab32s(&stat->ns_status);
646 * parse router-checker pinginfo, record number of down NIs for remote
647 * networks on that router.
650 lnet_parse_rc_info(lnet_rc_data_t *rcd)
652 struct lnet_ping_info *info = rcd->rcd_pinginfo;
653 struct lnet_peer_ni *gw = rcd->rcd_gateway;
659 if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
660 lnet_swap_pinginfo(info);
662 /* NB always racing with network! */
663 if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
664 CDEBUG(D_NET, "%s: Unexpected magic %08x\n",
665 libcfs_nid2str(gw->lpni_nid), info->pi_magic);
666 gw->lpni_ping_feats = LNET_PING_FEAT_INVAL;
670 gw->lpni_ping_feats = info->pi_features;
671 if ((gw->lpni_ping_feats & LNET_PING_FEAT_MASK) == 0) {
672 CDEBUG(D_NET, "%s: Unexpected features 0x%x\n",
673 libcfs_nid2str(gw->lpni_nid), gw->lpni_ping_feats);
674 return; /* nothing I can understand */
677 if ((gw->lpni_ping_feats & LNET_PING_FEAT_NI_STATUS) == 0)
678 return; /* can't carry NI status info */
680 list_for_each_entry(rte, &gw->lpni_routes, lr_gwlist) {
685 if ((gw->lpni_ping_feats & LNET_PING_FEAT_RTE_DISABLED) != 0) {
690 for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
691 struct lnet_ni_status *stat = &info->pi_ni[i];
692 lnet_nid_t nid = stat->ns_nid;
694 if (nid == LNET_NID_ANY) {
695 CDEBUG(D_NET, "%s: unexpected LNET_NID_ANY\n",
696 libcfs_nid2str(gw->lpni_nid));
697 gw->lpni_ping_feats = LNET_PING_FEAT_INVAL;
701 if (LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
704 if (stat->ns_status == LNET_NI_STATUS_DOWN) {
709 if (stat->ns_status == LNET_NI_STATUS_UP) {
710 if (LNET_NIDNET(nid) == rte->lr_net) {
717 CDEBUG(D_NET, "%s: Unexpected status 0x%x\n",
718 libcfs_nid2str(gw->lpni_nid), stat->ns_status);
719 gw->lpni_ping_feats = LNET_PING_FEAT_INVAL;
723 if (up) { /* ignore downed NIs if NI for dest network is up */
727 /* if @down is zero and this route is single-hop, it means
728 * we can't find NI for target network */
729 if (down == 0 && rte->lr_hops == 1)
732 rte->lr_downis = down;
737 lnet_router_checker_event(lnet_event_t *event)
739 lnet_rc_data_t *rcd = event->md.user_ptr;
740 struct lnet_peer_ni *lp;
742 LASSERT(rcd != NULL);
744 if (event->unlinked) {
745 LNetInvalidateHandle(&rcd->rcd_mdh);
749 LASSERT(event->type == LNET_EVENT_SEND ||
750 event->type == LNET_EVENT_REPLY);
752 lp = rcd->rcd_gateway;
755 /* NB: it's called with holding lnet_res_lock, we have a few
756 * places need to hold both locks at the same time, please take
757 * care of lock ordering */
758 lnet_net_lock(lp->lpni_cpt);
759 if (!lnet_isrouter(lp) || lp->lpni_rcd != rcd) {
760 /* ignore if no longer a router or rcd is replaced */
764 if (event->type == LNET_EVENT_SEND) {
765 lp->lpni_ping_notsent = 0;
766 if (event->status == 0)
770 /* LNET_EVENT_REPLY */
771 /* A successful REPLY means the router is up. If _any_ comms
772 * to the router fail I assume it's down (this will happen if
773 * we ping alive routers to try to detect router death before
774 * apps get burned). */
776 lnet_notify_locked(lp, 1, (event->status == 0), cfs_time_current());
777 /* The router checker will wake up very shortly and do the
778 * actual notification.
779 * XXX If 'lp' stops being a router before then, it will still
780 * have the notification pending!!! */
782 if (avoid_asym_router_failure && event->status == 0)
783 lnet_parse_rc_info(rcd);
786 lnet_net_unlock(lp->lpni_cpt);
790 lnet_wait_known_routerstate(void)
792 struct lnet_peer_ni *rtr;
793 struct list_head *entry;
796 LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
799 int cpt = lnet_net_lock_current();
802 list_for_each(entry, &the_lnet.ln_routers) {
803 rtr = list_entry(entry, struct lnet_peer_ni,
806 if (rtr->lpni_alive_count == 0) {
812 lnet_net_unlock(cpt);
817 set_current_state(TASK_UNINTERRUPTIBLE);
818 schedule_timeout(cfs_time_seconds(1));
823 lnet_router_ni_update_locked(struct lnet_peer_ni *gw, __u32 net)
827 if ((gw->lpni_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0) {
828 list_for_each_entry(rte, &gw->lpni_routes, lr_gwlist) {
829 if (rte->lr_net == net) {
838 lnet_update_ni_status_locked(void)
840 lnet_ni_t *ni = NULL;
844 LASSERT(the_lnet.ln_routing);
846 timeout = router_ping_timeout +
847 MAX(live_router_check_interval, dead_router_check_interval);
849 now = ktime_get_real_seconds();
850 while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
851 if (ni->ni_net->net_lnd->lnd_type == LOLND)
854 if (now < ni->ni_last_alive + timeout)
858 /* re-check with lock */
859 if (now < ni->ni_last_alive + timeout) {
864 LASSERT(ni->ni_status != NULL);
866 if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
867 CDEBUG(D_NET, "NI(%s:%d) status changed to down\n",
868 libcfs_nid2str(ni->ni_nid), timeout);
869 /* NB: so far, this is the only place to set
870 * NI status to "down" */
871 ni->ni_status->ns_status = LNET_NI_STATUS_DOWN;
878 lnet_destroy_rc_data(lnet_rc_data_t *rcd)
880 LASSERT(list_empty(&rcd->rcd_list));
881 /* detached from network */
882 LASSERT(LNetHandleIsInvalid(rcd->rcd_mdh));
884 if (rcd->rcd_gateway != NULL) {
885 int cpt = rcd->rcd_gateway->lpni_cpt;
888 lnet_peer_ni_decref_locked(rcd->rcd_gateway);
889 lnet_net_unlock(cpt);
892 if (rcd->rcd_pinginfo != NULL)
893 LIBCFS_FREE(rcd->rcd_pinginfo, LNET_PINGINFO_SIZE);
895 LIBCFS_FREE(rcd, sizeof(*rcd));
898 static lnet_rc_data_t *
899 lnet_create_rc_data_locked(struct lnet_peer_ni *gateway)
901 lnet_rc_data_t *rcd = NULL;
902 struct lnet_ping_info *pi;
906 lnet_net_unlock(gateway->lpni_cpt);
908 LIBCFS_ALLOC(rcd, sizeof(*rcd));
912 LNetInvalidateHandle(&rcd->rcd_mdh);
913 INIT_LIST_HEAD(&rcd->rcd_list);
915 LIBCFS_ALLOC(pi, LNET_PINGINFO_SIZE);
919 for (i = 0; i < LNET_MAX_RTR_NIS; i++) {
920 pi->pi_ni[i].ns_nid = LNET_NID_ANY;
921 pi->pi_ni[i].ns_status = LNET_NI_STATUS_INVALID;
923 rcd->rcd_pinginfo = pi;
925 LASSERT(!LNetHandleIsInvalid(the_lnet.ln_rc_eqh));
926 rc = LNetMDBind((lnet_md_t){.start = pi,
928 .length = LNET_PINGINFO_SIZE,
929 .threshold = LNET_MD_THRESH_INF,
930 .options = LNET_MD_TRUNCATE,
931 .eq_handle = the_lnet.ln_rc_eqh},
935 CERROR("Can't bind MD: %d\n", rc);
940 lnet_net_lock(gateway->lpni_cpt);
941 /* router table changed or someone has created rcd for this gateway */
942 if (!lnet_isrouter(gateway) || gateway->lpni_rcd != NULL) {
943 lnet_net_unlock(gateway->lpni_cpt);
947 lnet_peer_ni_addref_locked(gateway);
948 rcd->rcd_gateway = gateway;
949 gateway->lpni_rcd = rcd;
950 gateway->lpni_ping_notsent = 0;
956 if (!LNetHandleIsInvalid(rcd->rcd_mdh)) {
957 rc = LNetMDUnlink(rcd->rcd_mdh);
960 lnet_destroy_rc_data(rcd);
963 lnet_net_lock(gateway->lpni_cpt);
964 return gateway->lpni_rcd;
968 lnet_router_check_interval (struct lnet_peer_ni *rtr)
972 secs = rtr->lpni_alive ? live_router_check_interval :
973 dead_router_check_interval;
981 lnet_ping_router_locked (struct lnet_peer_ni *rtr)
983 lnet_rc_data_t *rcd = NULL;
984 cfs_time_t now = cfs_time_current();
988 lnet_peer_ni_addref_locked(rtr);
990 if (rtr->lpni_ping_deadline != 0 && /* ping timed out? */
991 cfs_time_after(now, rtr->lpni_ping_deadline))
992 lnet_notify_locked(rtr, 1, 0, now);
994 /* Run any outstanding notifications */
995 ni = lnet_get_next_ni_locked(rtr->lpni_net, NULL);
996 lnet_ni_notify_locked(ni, rtr);
998 if (!lnet_isrouter(rtr) ||
999 the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
1000 /* router table changed or router checker is shutting down */
1001 lnet_peer_ni_decref_locked(rtr);
1005 rcd = rtr->lpni_rcd != NULL ?
1006 rtr->lpni_rcd : lnet_create_rc_data_locked(rtr);
1011 secs = lnet_router_check_interval(rtr);
1014 "rtr %s %d: deadline %lu ping_notsent %d alive %d "
1015 "alive_count %d lpni_ping_timestamp %lu\n",
1016 libcfs_nid2str(rtr->lpni_nid), secs,
1017 rtr->lpni_ping_deadline, rtr->lpni_ping_notsent,
1018 rtr->lpni_alive, rtr->lpni_alive_count, rtr->lpni_ping_timestamp);
1020 if (secs != 0 && !rtr->lpni_ping_notsent &&
1021 cfs_time_after(now, cfs_time_add(rtr->lpni_ping_timestamp,
1022 cfs_time_seconds(secs)))) {
1024 lnet_process_id_t id;
1025 lnet_handle_md_t mdh;
1027 id.nid = rtr->lpni_nid;
1028 id.pid = LNET_PID_LUSTRE;
1029 CDEBUG(D_NET, "Check: %s\n", libcfs_id2str(id));
1031 rtr->lpni_ping_notsent = 1;
1032 rtr->lpni_ping_timestamp = now;
1036 if (rtr->lpni_ping_deadline == 0) {
1037 rtr->lpni_ping_deadline =
1038 cfs_time_shift(router_ping_timeout);
1041 lnet_net_unlock(rtr->lpni_cpt);
1043 rc = LNetGet(LNET_NID_ANY, mdh, id, LNET_RESERVED_PORTAL,
1044 LNET_PROTO_PING_MATCHBITS, 0);
1046 lnet_net_lock(rtr->lpni_cpt);
1048 rtr->lpni_ping_notsent = 0; /* no event pending */
1051 lnet_peer_ni_decref_locked(rtr);
1056 lnet_router_checker_start(void)
1060 struct task_struct *task;
1062 LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
1064 if (check_routers_before_use &&
1065 dead_router_check_interval <= 0) {
1066 LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be"
1067 " set if 'check_routers_before_use' is set"
1072 sema_init(&the_lnet.ln_rc_signal, 0);
1074 rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh);
1076 CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc);
1080 the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
1081 task = kthread_run(lnet_router_checker, NULL, "router_checker");
1084 CERROR("Can't start router checker thread: %d\n", rc);
1085 /* block until event callback signals exit */
1086 down(&the_lnet.ln_rc_signal);
1087 rc = LNetEQFree(the_lnet.ln_rc_eqh);
1089 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
1093 if (check_routers_before_use) {
1094 /* Note that a helpful side-effect of pinging all known routers
1095 * at startup is that it makes them drop stale connections they
1096 * may have to a previous instance of me. */
1097 lnet_wait_known_routerstate();
1104 lnet_router_checker_stop (void)
1108 if (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN)
1111 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
1112 the_lnet.ln_rc_state = LNET_RC_STATE_STOPPING;
1113 /* wakeup the RC thread if it's sleeping */
1114 wake_up(&the_lnet.ln_rc_waitq);
1116 /* block until event callback signals exit */
1117 down(&the_lnet.ln_rc_signal);
1118 LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
1120 rc = LNetEQFree(the_lnet.ln_rc_eqh);
1126 lnet_prune_rc_data(int wait_unlink)
1128 lnet_rc_data_t *rcd;
1129 lnet_rc_data_t *tmp;
1130 struct lnet_peer_ni *lp;
1131 struct list_head head;
1134 if (likely(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING &&
1135 list_empty(&the_lnet.ln_rcd_deathrow) &&
1136 list_empty(&the_lnet.ln_rcd_zombie)))
1139 INIT_LIST_HEAD(&head);
1141 lnet_net_lock(LNET_LOCK_EX);
1143 if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
1144 /* router checker is stopping, prune all */
1145 list_for_each_entry(lp, &the_lnet.ln_routers,
1147 if (lp->lpni_rcd == NULL)
1150 LASSERT(list_empty(&lp->lpni_rcd->rcd_list));
1151 list_add(&lp->lpni_rcd->rcd_list,
1152 &the_lnet.ln_rcd_deathrow);
1153 lp->lpni_rcd = NULL;
1157 /* unlink all RCDs on deathrow list */
1158 list_splice_init(&the_lnet.ln_rcd_deathrow, &head);
1160 if (!list_empty(&head)) {
1161 lnet_net_unlock(LNET_LOCK_EX);
1163 list_for_each_entry(rcd, &head, rcd_list)
1164 LNetMDUnlink(rcd->rcd_mdh);
1166 lnet_net_lock(LNET_LOCK_EX);
1169 list_splice_init(&head, &the_lnet.ln_rcd_zombie);
1171 /* release all zombie RCDs */
1172 while (!list_empty(&the_lnet.ln_rcd_zombie)) {
1173 list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie,
1175 if (LNetHandleIsInvalid(rcd->rcd_mdh))
1176 list_move(&rcd->rcd_list, &head);
1179 wait_unlink = wait_unlink &&
1180 !list_empty(&the_lnet.ln_rcd_zombie);
1182 lnet_net_unlock(LNET_LOCK_EX);
1184 while (!list_empty(&head)) {
1185 rcd = list_entry(head.next,
1186 lnet_rc_data_t, rcd_list);
1187 list_del_init(&rcd->rcd_list);
1188 lnet_destroy_rc_data(rcd);
1195 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
1196 "Waiting for rc buffers to unlink\n");
1197 set_current_state(TASK_UNINTERRUPTIBLE);
1198 schedule_timeout(cfs_time_seconds(1) / 4);
1200 lnet_net_lock(LNET_LOCK_EX);
1203 lnet_net_unlock(LNET_LOCK_EX);
1207 * This function is called to check if the RC should block indefinitely.
1208 * It's called from lnet_router_checker() as well as being passed to
1209 * wait_event_interruptible() to avoid the lost wake_up problem.
1211 * When it's called from wait_event_interruptible() it is necessary to
1212 * also not sleep if the rc state is not running to avoid a deadlock
1213 * when the system is shutting down
1216 lnet_router_checker_active(void)
1218 if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING)
1221 /* Router Checker thread needs to run when routing is enabled in
1222 * order to call lnet_update_ni_status_locked() */
1223 if (the_lnet.ln_routing)
1226 return !list_empty(&the_lnet.ln_routers) &&
1227 (live_router_check_interval > 0 ||
1228 dead_router_check_interval > 0);
1232 lnet_router_checker(void *arg)
1234 struct lnet_peer_ni *rtr;
1235 struct list_head *entry;
1237 cfs_block_allsigs();
1239 while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
1244 cpt = lnet_net_lock_current();
1246 version = the_lnet.ln_routers_version;
1248 list_for_each(entry, &the_lnet.ln_routers) {
1249 rtr = list_entry(entry, struct lnet_peer_ni,
1252 cpt2 = rtr->lpni_cpt;
1254 lnet_net_unlock(cpt);
1257 /* the routers list has changed */
1258 if (version != the_lnet.ln_routers_version)
1262 lnet_ping_router_locked(rtr);
1264 /* NB dropped lock */
1265 if (version != the_lnet.ln_routers_version) {
1266 /* the routers list has changed */
1271 if (the_lnet.ln_routing)
1272 lnet_update_ni_status_locked();
1274 lnet_net_unlock(cpt);
1276 lnet_prune_rc_data(0); /* don't wait for UNLINK */
1278 /* Call schedule_timeout() here always adds 1 to load average
1279 * because kernel counts # active tasks as nr_running
1280 * + nr_uninterruptible. */
1281 /* if there are any routes then wakeup every second. If
1282 * there are no routes then sleep indefinitely until woken
1283 * up by a user adding a route */
1284 if (!lnet_router_checker_active())
1285 wait_event_interruptible(the_lnet.ln_rc_waitq,
1286 lnet_router_checker_active());
1288 wait_event_interruptible_timeout(the_lnet.ln_rc_waitq,
1290 cfs_time_seconds(1));
1293 lnet_prune_rc_data(1); /* wait for UNLINK */
1295 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
1296 up(&the_lnet.ln_rc_signal);
1297 /* The unlink event callback will signal final completion */
1302 lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
1304 int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
1306 while (--npages >= 0)
1307 __free_page(rb->rb_kiov[npages].kiov_page);
1309 LIBCFS_FREE(rb, sz);
1312 static lnet_rtrbuf_t *
1313 lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
1315 int npages = rbp->rbp_npages;
1316 int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
1321 LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
1327 for (i = 0; i < npages; i++) {
1328 page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
1329 GFP_KERNEL | __GFP_ZERO);
1332 __free_page(rb->rb_kiov[i].kiov_page);
1334 LIBCFS_FREE(rb, sz);
1338 rb->rb_kiov[i].kiov_len = PAGE_SIZE;
1339 rb->rb_kiov[i].kiov_offset = 0;
1340 rb->rb_kiov[i].kiov_page = page;
1347 lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp, int cpt)
1349 int npages = rbp->rbp_npages;
1351 struct list_head tmp;
1353 if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
1356 INIT_LIST_HEAD(&tmp);
1359 lnet_drop_routed_msgs_locked(&rbp->rbp_msgs, cpt);
1360 list_splice_init(&rbp->rbp_bufs, &tmp);
1361 rbp->rbp_req_nbuffers = 0;
1362 rbp->rbp_nbuffers = rbp->rbp_credits = 0;
1363 rbp->rbp_mincredits = 0;
1364 lnet_net_unlock(cpt);
1366 /* Free buffers on the free list. */
1367 while (!list_empty(&tmp)) {
1368 rb = list_entry(tmp.next, lnet_rtrbuf_t, rb_list);
1369 list_del(&rb->rb_list);
1370 lnet_destroy_rtrbuf(rb, npages);
1375 lnet_rtrpool_adjust_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
1377 struct list_head rb_list;
1380 int num_buffers = 0;
1382 int npages = rbp->rbp_npages;
1385 /* If we are called for less buffers than already in the pool, we
1386 * just lower the req_nbuffers number and excess buffers will be
1387 * thrown away as they are returned to the free list. Credits
1388 * then get adjusted as well.
1389 * If we already have enough buffers allocated to serve the
1390 * increase requested, then we can treat that the same way as we
1391 * do the decrease. */
1392 num_rb = nbufs - rbp->rbp_nbuffers;
1393 if (nbufs <= rbp->rbp_req_nbuffers || num_rb <= 0) {
1394 rbp->rbp_req_nbuffers = nbufs;
1395 lnet_net_unlock(cpt);
1398 /* store the older value of rbp_req_nbuffers and then set it to
1399 * the new request to prevent lnet_return_rx_credits_locked() from
1400 * freeing buffers that we need to keep around */
1401 old_req_nbufs = rbp->rbp_req_nbuffers;
1402 rbp->rbp_req_nbuffers = nbufs;
1403 lnet_net_unlock(cpt);
1405 INIT_LIST_HEAD(&rb_list);
1407 /* allocate the buffers on a local list first. If all buffers are
1408 * allocated successfully then join this list to the rbp buffer
1409 * list. If not then free all allocated buffers. */
1410 while (num_rb-- > 0) {
1411 rb = lnet_new_rtrbuf(rbp, cpt);
1413 CERROR("Failed to allocate %d route bufs of %d pages\n",
1417 rbp->rbp_req_nbuffers = old_req_nbufs;
1418 lnet_net_unlock(cpt);
1423 list_add(&rb->rb_list, &rb_list);
1429 list_splice_tail(&rb_list, &rbp->rbp_bufs);
1430 rbp->rbp_nbuffers += num_buffers;
1431 rbp->rbp_credits += num_buffers;
1432 rbp->rbp_mincredits = rbp->rbp_credits;
1433 /* We need to schedule blocked msg using the newly
1435 while (!list_empty(&rbp->rbp_bufs) &&
1436 !list_empty(&rbp->rbp_msgs))
1437 lnet_schedule_blocked_locked(rbp);
1439 lnet_net_unlock(cpt);
1444 while (!list_empty(&rb_list)) {
1445 rb = list_entry(rb_list.next, lnet_rtrbuf_t, rb_list);
1446 list_del(&rb->rb_list);
1447 lnet_destroy_rtrbuf(rb, npages);
1454 lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
1456 INIT_LIST_HEAD(&rbp->rbp_msgs);
1457 INIT_LIST_HEAD(&rbp->rbp_bufs);
1459 rbp->rbp_npages = npages;
1460 rbp->rbp_credits = 0;
1461 rbp->rbp_mincredits = 0;
1465 lnet_rtrpools_free(int keep_pools)
1467 lnet_rtrbufpool_t *rtrp;
1470 if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
1473 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1474 lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i);
1475 lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i);
1476 lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i);
1480 cfs_percpt_free(the_lnet.ln_rtrpools);
1481 the_lnet.ln_rtrpools = NULL;
1486 lnet_nrb_tiny_calculate(void)
1488 int nrbs = LNET_NRB_TINY;
1490 if (tiny_router_buffers < 0) {
1491 LCONSOLE_ERROR_MSG(0x10c,
1492 "tiny_router_buffers=%d invalid when "
1493 "routing enabled\n", tiny_router_buffers);
1497 if (tiny_router_buffers > 0)
1498 nrbs = tiny_router_buffers;
1500 nrbs /= LNET_CPT_NUMBER;
1501 return max(nrbs, LNET_NRB_TINY_MIN);
1505 lnet_nrb_small_calculate(void)
1507 int nrbs = LNET_NRB_SMALL;
1509 if (small_router_buffers < 0) {
1510 LCONSOLE_ERROR_MSG(0x10c,
1511 "small_router_buffers=%d invalid when "
1512 "routing enabled\n", small_router_buffers);
1516 if (small_router_buffers > 0)
1517 nrbs = small_router_buffers;
1519 nrbs /= LNET_CPT_NUMBER;
1520 return max(nrbs, LNET_NRB_SMALL_MIN);
1524 lnet_nrb_large_calculate(void)
1526 int nrbs = LNET_NRB_LARGE;
1528 if (large_router_buffers < 0) {
1529 LCONSOLE_ERROR_MSG(0x10c,
1530 "large_router_buffers=%d invalid when "
1531 "routing enabled\n", large_router_buffers);
1535 if (large_router_buffers > 0)
1536 nrbs = large_router_buffers;
1538 nrbs /= LNET_CPT_NUMBER;
1539 return max(nrbs, LNET_NRB_LARGE_MIN);
1543 lnet_rtrpools_alloc(int im_a_router)
1545 lnet_rtrbufpool_t *rtrp;
1552 if (!strcmp(forwarding, "")) {
1553 /* not set either way */
1556 } else if (!strcmp(forwarding, "disabled")) {
1557 /* explicitly disabled */
1559 } else if (!strcmp(forwarding, "enabled")) {
1560 /* explicitly enabled */
1562 LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
1563 "'enabled' or 'disabled'\n");
1567 nrb_tiny = lnet_nrb_tiny_calculate();
1571 nrb_small = lnet_nrb_small_calculate();
1575 nrb_large = lnet_nrb_large_calculate();
1579 the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
1581 sizeof(lnet_rtrbufpool_t));
1582 if (the_lnet.ln_rtrpools == NULL) {
1583 LCONSOLE_ERROR_MSG(0x10c,
1584 "Failed to initialize router buffe pool\n");
1588 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1589 lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0);
1590 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1595 lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX],
1596 LNET_NRB_SMALL_PAGES);
1597 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1602 lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX],
1603 LNET_NRB_LARGE_PAGES);
1604 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1610 lnet_net_lock(LNET_LOCK_EX);
1611 the_lnet.ln_routing = 1;
1612 lnet_net_unlock(LNET_LOCK_EX);
1616 lnet_rtrpools_free(0);
1621 lnet_rtrpools_adjust_helper(int tiny, int small, int large)
1626 lnet_rtrbufpool_t *rtrp;
1628 /* If the provided values for each buffer pool are different than the
1629 * configured values, we need to take action. */
1631 tiny_router_buffers = tiny;
1632 nrb = lnet_nrb_tiny_calculate();
1633 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1634 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1641 small_router_buffers = small;
1642 nrb = lnet_nrb_small_calculate();
1643 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1644 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1651 large_router_buffers = large;
1652 nrb = lnet_nrb_large_calculate();
1653 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1654 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1665 lnet_rtrpools_adjust(int tiny, int small, int large)
1667 /* this function doesn't revert the changes if adding new buffers
1668 * failed. It's up to the user space caller to revert the
1671 if (!the_lnet.ln_routing)
1674 return lnet_rtrpools_adjust_helper(tiny, small, large);
1678 lnet_rtrpools_enable(void)
1682 if (the_lnet.ln_routing)
1685 if (the_lnet.ln_rtrpools == NULL)
1686 /* If routing is turned off, and we have never
1687 * initialized the pools before, just call the
1688 * standard buffer pool allocation routine as
1689 * if we are just configuring this for the first
1691 rc = lnet_rtrpools_alloc(1);
1693 rc = lnet_rtrpools_adjust_helper(0, 0, 0);
1697 lnet_net_lock(LNET_LOCK_EX);
1698 the_lnet.ln_routing = 1;
1700 the_lnet.ln_ping_info->pi_features &= ~LNET_PING_FEAT_RTE_DISABLED;
1701 lnet_net_unlock(LNET_LOCK_EX);
1707 lnet_rtrpools_disable(void)
1709 if (!the_lnet.ln_routing)
1712 lnet_net_lock(LNET_LOCK_EX);
1713 the_lnet.ln_routing = 0;
1714 the_lnet.ln_ping_info->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1716 tiny_router_buffers = 0;
1717 small_router_buffers = 0;
1718 large_router_buffers = 0;
1719 lnet_net_unlock(LNET_LOCK_EX);
1720 lnet_rtrpools_free(1);
1724 lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, cfs_time_t when)
1726 struct lnet_peer_ni *lp = NULL;
1727 cfs_time_t now = cfs_time_current();
1728 int cpt = lnet_cpt_of_nid(nid, ni);
1730 LASSERT (!in_interrupt ());
1732 CDEBUG (D_NET, "%s notifying %s: %s\n",
1733 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1734 libcfs_nid2str(nid),
1735 alive ? "up" : "down");
1738 LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
1739 CWARN("Ignoring notification of %s %s by %s (different net)\n",
1740 libcfs_nid2str(nid), alive ? "birth" : "death",
1741 libcfs_nid2str(ni->ni_nid));
1745 /* can't do predictions... */
1746 if (cfs_time_after(when, now)) {
1747 CWARN("Ignoring prediction from %s of %s %s "
1748 "%ld seconds in the future\n",
1749 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1750 libcfs_nid2str(nid), alive ? "up" : "down",
1751 cfs_duration_sec(cfs_time_sub(when, now)));
1755 if (ni != NULL && !alive && /* LND telling me she's down */
1756 !auto_down) { /* auto-down disabled */
1757 CDEBUG(D_NET, "Auto-down disabled\n");
1763 if (the_lnet.ln_shutdown) {
1764 lnet_net_unlock(cpt);
1768 lp = lnet_find_peer_ni_locked(nid);
1771 lnet_net_unlock(cpt);
1772 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
1776 /* We can't fully trust LND on reporting exact peer last_alive
1777 * if he notifies us about dead peer. For example ksocklnd can
1778 * call us with when == _time_when_the_node_was_booted_ if
1779 * no connections were successfully established */
1780 if (ni != NULL && !alive && when < lp->lpni_last_alive)
1781 when = lp->lpni_last_alive;
1783 lnet_notify_locked(lp, ni == NULL, alive, when);
1786 lnet_ni_notify_locked(ni, lp);
1788 lnet_peer_ni_decref_locked(lp);
1790 lnet_net_unlock(cpt);
1793 EXPORT_SYMBOL(lnet_notify);