2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2017, Intel Corporation.
6 * This file is part of Lustre, https://wiki.whamcloud.com/
8 * Portals is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Portals is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Portals; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_LNET
25 #include <linux/random.h>
26 #include <lnet/lib-lnet.h>
28 #define LNET_NRB_TINY_MIN 512 /* min value for each CPT */
29 #define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4)
30 #define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */
31 #define LNET_NRB_SMALL (LNET_NRB_SMALL_MIN * 4)
32 #define LNET_NRB_SMALL_PAGES 1
33 #define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
34 #define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
35 #define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
38 static char *forwarding = "";
39 module_param(forwarding, charp, 0444);
40 MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
42 static int tiny_router_buffers;
43 module_param(tiny_router_buffers, int, 0444);
44 MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
45 static int small_router_buffers;
46 module_param(small_router_buffers, int, 0444);
47 MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
48 static int large_router_buffers;
49 module_param(large_router_buffers, int, 0444);
50 MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
51 static int peer_buffer_credits;
52 module_param(peer_buffer_credits, int, 0444);
53 MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
55 static int auto_down = 1;
56 module_param(auto_down, int, 0444);
57 MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
60 lnet_peer_buffer_credits(struct lnet_net *net)
62 /* NI option overrides LNet default */
63 if (net->net_tunables.lct_peer_rtr_credits > 0)
64 return net->net_tunables.lct_peer_rtr_credits;
65 if (peer_buffer_credits > 0)
66 return peer_buffer_credits;
68 /* As an approximation, allow this peer the same number of router
69 * buffers as it is allowed outstanding sends */
70 return net->net_tunables.lct_peer_tx_credits;
73 static int check_routers_before_use;
74 module_param(check_routers_before_use, int, 0444);
75 MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
77 int avoid_asym_router_failure = 1;
78 module_param(avoid_asym_router_failure, int, 0644);
79 MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
81 static int dead_router_check_interval = 60;
82 module_param(dead_router_check_interval, int, 0644);
83 MODULE_PARM_DESC(dead_router_check_interval, "Seconds between dead router health checks (<= 0 to disable)");
85 static int live_router_check_interval = 60;
86 module_param(live_router_check_interval, int, 0644);
87 MODULE_PARM_DESC(live_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
89 static int router_ping_timeout = 50;
90 module_param(router_ping_timeout, int, 0644);
91 MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
94 lnet_peers_start_down(void)
96 return check_routers_before_use;
100 lnet_notify_locked(struct lnet_peer_ni *lp, int notifylnd, int alive,
103 if (lp->lpni_timestamp > when) { /* out of date information */
104 CDEBUG(D_NET, "Out of date\n");
109 * This function can be called with different cpt locks being
110 * held. lpni_alive_count modification needs to be properly protected.
111 * Significant reads to lpni_alive_count are also protected with
114 spin_lock(&lp->lpni_lock);
116 lp->lpni_timestamp = when; /* update timestamp */
118 if (lp->lpni_alive_count != 0 && /* got old news */
119 (!lp->lpni_alive) == (!alive)) { /* new date for old news */
120 spin_unlock(&lp->lpni_lock);
121 CDEBUG(D_NET, "Old news\n");
125 /* Flag that notification is outstanding */
127 lp->lpni_alive_count++;
128 lp->lpni_alive = (alive) ? 1 : 0;
130 lp->lpni_notifylnd = notifylnd;
132 lp->lpni_ping_feats = LNET_PING_FEAT_INVAL; /* reset */
134 spin_unlock(&lp->lpni_lock);
136 CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lpni_nid), alive);
140 * This function will always be called with lp->lpni_cpt lock held.
143 lnet_ni_notify_locked(struct lnet_ni *ni, struct lnet_peer_ni *lp)
148 /* Notify only in 1 thread at any time to ensure ordered notification.
149 * NB individual events can be missed; the only guarantee is that you
150 * always get the most recent news */
152 spin_lock(&lp->lpni_lock);
154 if (lp->lpni_notifying || ni == NULL) {
155 spin_unlock(&lp->lpni_lock);
159 lp->lpni_notifying = 1;
162 * lp->lpni_notify needs to be protected because it can be set in
163 * lnet_notify_locked().
165 while (lp->lpni_notify) {
166 alive = lp->lpni_alive;
167 notifylnd = lp->lpni_notifylnd;
169 lp->lpni_notifylnd = 0;
172 if (notifylnd && ni->ni_net->net_lnd->lnd_notify != NULL) {
173 spin_unlock(&lp->lpni_lock);
174 lnet_net_unlock(lp->lpni_cpt);
176 /* A new notification could happen now; I'll handle it
177 * when control returns to me */
179 (ni->ni_net->net_lnd->lnd_notify)(ni, lp->lpni_nid,
182 lnet_net_lock(lp->lpni_cpt);
183 spin_lock(&lp->lpni_lock);
187 lp->lpni_notifying = 0;
188 spin_unlock(&lp->lpni_lock);
191 struct lnet_remotenet *
192 lnet_find_rnet_locked(__u32 net)
194 struct lnet_remotenet *rnet;
195 struct list_head *tmp;
196 struct list_head *rn_list;
198 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
200 rn_list = lnet_net2rnethash(net);
201 list_for_each(tmp, rn_list) {
202 rnet = list_entry(tmp, struct lnet_remotenet, lrn_list);
204 if (rnet->lrn_net == net)
211 lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
212 unsigned int priority)
221 /* TODO: reimplement lnet_check_routes() */
223 lnet_del_route(__u32 net, lnet_nid_t gw_nid)
231 lnet_destroy_routes (void)
233 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
236 int lnet_get_rtr_pool_cfg(int cpt, struct lnet_ioctl_pool_cfg *pool_cfg)
238 struct lnet_rtrbufpool *rbp;
239 int i, rc = -ENOENT, j;
241 if (the_lnet.ln_rtrpools == NULL)
245 cfs_percpt_for_each(rbp, i, the_lnet.ln_rtrpools) {
250 for (j = 0; j < LNET_NRBPOOLS; j++) {
251 pool_cfg->pl_pools[j].pl_npages = rbp[j].rbp_npages;
252 pool_cfg->pl_pools[j].pl_nbuffers = rbp[j].rbp_nbuffers;
253 pool_cfg->pl_pools[j].pl_credits = rbp[j].rbp_credits;
254 pool_cfg->pl_pools[j].pl_mincredits = rbp[j].rbp_mincredits;
261 lnet_net_lock(LNET_LOCK_EX);
262 pool_cfg->pl_routing = the_lnet.ln_routing;
263 lnet_net_unlock(LNET_LOCK_EX);
269 lnet_get_route(int idx, __u32 *net, __u32 *hops,
270 lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
272 struct list_head *e1;
273 struct list_head *e2;
274 struct lnet_remotenet *rnet;
275 struct lnet_route *route;
278 struct list_head *rn_list;
280 cpt = lnet_net_lock_current();
282 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
283 rn_list = &the_lnet.ln_remote_nets_hash[i];
284 list_for_each(e1, rn_list) {
285 rnet = list_entry(e1, struct lnet_remotenet, lrn_list);
287 list_for_each(e2, &rnet->lrn_routes) {
288 route = list_entry(e2, struct lnet_route,
292 *net = rnet->lrn_net;
293 *hops = route->lr_hops;
294 *priority = route->lr_priority;
295 *gateway = route->lr_gateway->lp_primary_nid;
296 *alive = lnet_is_route_alive(route);
297 lnet_net_unlock(cpt);
304 lnet_net_unlock(cpt);
309 lnet_swap_pinginfo(struct lnet_ping_buffer *pbuf)
311 struct lnet_ni_status *stat;
315 __swab32s(&pbuf->pb_info.pi_magic);
316 __swab32s(&pbuf->pb_info.pi_features);
317 __swab32s(&pbuf->pb_info.pi_pid);
318 __swab32s(&pbuf->pb_info.pi_nnis);
319 nnis = pbuf->pb_info.pi_nnis;
320 if (nnis > pbuf->pb_nnis)
321 nnis = pbuf->pb_nnis;
322 for (i = 0; i < nnis; i++) {
323 stat = &pbuf->pb_info.pi_ni[i];
324 __swab64s(&stat->ns_nid);
325 __swab32s(&stat->ns_status);
334 lnet_parse_rc_info(struct lnet_rc_data *rcd)
340 lnet_router_checker_event(struct lnet_event *event)
342 struct lnet_rc_data *rcd = event->md.user_ptr;
343 struct lnet_peer_ni *lp;
345 LASSERT(rcd != NULL);
347 if (event->unlinked) {
348 LNetInvalidateMDHandle(&rcd->rcd_mdh);
352 LASSERT(event->type == LNET_EVENT_SEND ||
353 event->type == LNET_EVENT_REPLY);
355 lp = rcd->rcd_gateway;
358 /* NB: it's called with holding lnet_res_lock, we have a few
359 * places need to hold both locks at the same time, please take
360 * care of lock ordering */
361 lnet_net_lock(lp->lpni_cpt);
362 if (!lnet_isrouter(lp) || lp->lpni_rcd != rcd) {
363 /* ignore if no longer a router or rcd is replaced */
367 if (event->type == LNET_EVENT_SEND) {
368 if (event->status == 0)
372 /* LNET_EVENT_REPLY */
373 /* A successful REPLY means the router is up. If _any_ comms
374 * to the router fail I assume it's down (this will happen if
375 * we ping alive routers to try to detect router death before
376 * apps get burned). */
378 lnet_notify_locked(lp, 1, !event->status, ktime_get_seconds());
379 /* The router checker will wake up very shortly and do the
380 * actual notification.
381 * XXX If 'lp' stops being a router before then, it will still
382 * have the notification pending!!! */
384 if (avoid_asym_router_failure && event->status == 0)
385 lnet_parse_rc_info(rcd);
388 lnet_net_unlock(lp->lpni_cpt);
392 lnet_wait_known_routerstate(void)
394 struct lnet_peer *rtr;
395 struct list_head *entry;
398 LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
401 int cpt = lnet_net_lock_current();
404 list_for_each(entry, &the_lnet.ln_routers) {
405 rtr = list_entry(entry, struct lnet_peer,
408 spin_lock(&rtr->lp_lock);
410 if ((rtr->lp_state & LNET_PEER_DISCOVERED) == 0) {
412 spin_unlock(&rtr->lp_lock);
415 spin_unlock(&rtr->lp_lock);
418 lnet_net_unlock(cpt);
423 set_current_state(TASK_UNINTERRUPTIBLE);
424 schedule_timeout(cfs_time_seconds(1));
428 /* TODO: reimplement */
430 lnet_router_ni_update_locked(struct lnet_peer_ni *gw, __u32 net)
432 struct lnet_route *rte;
433 struct lnet_peer *lp;
435 if ((gw->lpni_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0)
436 lp = gw->lpni_peer_net->lpn_peer;
440 list_for_each_entry(rte, &lp->lp_routes, lr_gwlist) {
441 if (rte->lr_net == net) {
449 lnet_update_ni_status_locked(void)
451 struct lnet_ni *ni = NULL;
455 LASSERT(the_lnet.ln_routing);
457 timeout = router_ping_timeout +
458 MAX(live_router_check_interval, dead_router_check_interval);
460 now = ktime_get_real_seconds();
461 while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
462 if (ni->ni_net->net_lnd->lnd_type == LOLND)
465 if (now < ni->ni_last_alive + timeout)
469 /* re-check with lock */
470 if (now < ni->ni_last_alive + timeout) {
475 LASSERT(ni->ni_status != NULL);
477 if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
478 CDEBUG(D_NET, "NI(%s:%lld) status changed to down\n",
479 libcfs_nid2str(ni->ni_nid), timeout);
480 /* NB: so far, this is the only place to set
481 * NI status to "down" */
482 ni->ni_status->ns_status = LNET_NI_STATUS_DOWN;
488 int lnet_router_pre_mt_start(void)
492 if (check_routers_before_use &&
493 dead_router_check_interval <= 0) {
494 LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be"
495 " set if 'check_routers_before_use' is set"
500 rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh);
502 CERROR("Can't allocate EQ(0): %d\n", rc);
509 void lnet_router_post_mt_start(void)
511 if (check_routers_before_use) {
512 /* Note that a helpful side-effect of pinging all known routers
513 * at startup is that it makes them drop stale connections they
514 * may have to a previous instance of me. */
515 lnet_wait_known_routerstate();
520 lnet_router_cleanup(void)
524 rc = LNetEQFree(the_lnet.ln_rc_eqh);
530 lnet_prune_rc_data(int wait_unlink)
532 wait_unlink = wait_unlink;
536 * This function is called from the monitor thread to check if there are
537 * any active routers that need to be checked.
540 lnet_router_checker_active(void)
542 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
545 /* Router Checker thread needs to run when routing is enabled in
546 * order to call lnet_update_ni_status_locked() */
547 if (the_lnet.ln_routing)
550 /* if there are routers that need to be cleaned up then do so */
551 if (!list_empty(&the_lnet.ln_rcd_deathrow) ||
552 !list_empty(&the_lnet.ln_rcd_zombie))
555 return !list_empty(&the_lnet.ln_routers) &&
556 (live_router_check_interval > 0 ||
557 dead_router_check_interval > 0);
561 lnet_check_routers(void)
563 struct lnet_peer *rtr;
564 struct list_head *entry;
568 cpt = lnet_net_lock_current();
570 version = the_lnet.ln_routers_version;
572 list_for_each(entry, &the_lnet.ln_routers) {
573 rtr = list_entry(entry, struct lnet_peer,
576 /* TODO use discovery to determine if router is alive */
578 /* NB dropped lock */
579 if (version != the_lnet.ln_routers_version) {
580 /* the routers list has changed */
585 if (the_lnet.ln_routing)
586 lnet_update_ni_status_locked();
588 lnet_net_unlock(cpt);
590 lnet_prune_rc_data(0); /* don't wait for UNLINK */
594 lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages)
596 int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
598 while (--npages >= 0)
599 __free_page(rb->rb_kiov[npages].kiov_page);
604 static struct lnet_rtrbuf *
605 lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt)
607 int npages = rbp->rbp_npages;
608 int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
610 struct lnet_rtrbuf *rb;
613 LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
619 for (i = 0; i < npages; i++) {
620 page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
621 GFP_KERNEL | __GFP_ZERO);
624 __free_page(rb->rb_kiov[i].kiov_page);
630 rb->rb_kiov[i].kiov_len = PAGE_SIZE;
631 rb->rb_kiov[i].kiov_offset = 0;
632 rb->rb_kiov[i].kiov_page = page;
639 lnet_rtrpool_free_bufs(struct lnet_rtrbufpool *rbp, int cpt)
641 int npages = rbp->rbp_npages;
642 struct lnet_rtrbuf *rb;
643 struct list_head tmp;
645 if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
648 INIT_LIST_HEAD(&tmp);
651 list_splice_init(&rbp->rbp_msgs, &tmp);
652 lnet_drop_routed_msgs_locked(&tmp, cpt);
653 list_splice_init(&rbp->rbp_bufs, &tmp);
654 rbp->rbp_req_nbuffers = 0;
655 rbp->rbp_nbuffers = rbp->rbp_credits = 0;
656 rbp->rbp_mincredits = 0;
657 lnet_net_unlock(cpt);
659 /* Free buffers on the free list. */
660 while (!list_empty(&tmp)) {
661 rb = list_entry(tmp.next, struct lnet_rtrbuf, rb_list);
662 list_del(&rb->rb_list);
663 lnet_destroy_rtrbuf(rb, npages);
668 lnet_rtrpool_adjust_bufs(struct lnet_rtrbufpool *rbp, int nbufs, int cpt)
670 struct list_head rb_list;
671 struct lnet_rtrbuf *rb;
675 int npages = rbp->rbp_npages;
678 /* If we are called for less buffers than already in the pool, we
679 * just lower the req_nbuffers number and excess buffers will be
680 * thrown away as they are returned to the free list. Credits
681 * then get adjusted as well.
682 * If we already have enough buffers allocated to serve the
683 * increase requested, then we can treat that the same way as we
684 * do the decrease. */
685 num_rb = nbufs - rbp->rbp_nbuffers;
686 if (nbufs <= rbp->rbp_req_nbuffers || num_rb <= 0) {
687 rbp->rbp_req_nbuffers = nbufs;
688 lnet_net_unlock(cpt);
691 /* store the older value of rbp_req_nbuffers and then set it to
692 * the new request to prevent lnet_return_rx_credits_locked() from
693 * freeing buffers that we need to keep around */
694 old_req_nbufs = rbp->rbp_req_nbuffers;
695 rbp->rbp_req_nbuffers = nbufs;
696 lnet_net_unlock(cpt);
698 INIT_LIST_HEAD(&rb_list);
700 /* allocate the buffers on a local list first. If all buffers are
701 * allocated successfully then join this list to the rbp buffer
702 * list. If not then free all allocated buffers. */
703 while (num_rb-- > 0) {
704 rb = lnet_new_rtrbuf(rbp, cpt);
706 CERROR("Failed to allocate %d route bufs of %d pages\n",
710 rbp->rbp_req_nbuffers = old_req_nbufs;
711 lnet_net_unlock(cpt);
716 list_add(&rb->rb_list, &rb_list);
722 list_splice_tail(&rb_list, &rbp->rbp_bufs);
723 rbp->rbp_nbuffers += num_buffers;
724 rbp->rbp_credits += num_buffers;
725 rbp->rbp_mincredits = rbp->rbp_credits;
726 /* We need to schedule blocked msg using the newly
728 while (!list_empty(&rbp->rbp_bufs) &&
729 !list_empty(&rbp->rbp_msgs))
730 lnet_schedule_blocked_locked(rbp);
732 lnet_net_unlock(cpt);
737 while (!list_empty(&rb_list)) {
738 rb = list_entry(rb_list.next, struct lnet_rtrbuf, rb_list);
739 list_del(&rb->rb_list);
740 lnet_destroy_rtrbuf(rb, npages);
747 lnet_rtrpool_init(struct lnet_rtrbufpool *rbp, int npages)
749 INIT_LIST_HEAD(&rbp->rbp_msgs);
750 INIT_LIST_HEAD(&rbp->rbp_bufs);
752 rbp->rbp_npages = npages;
753 rbp->rbp_credits = 0;
754 rbp->rbp_mincredits = 0;
758 lnet_rtrpools_free(int keep_pools)
760 struct lnet_rtrbufpool *rtrp;
763 if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
766 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
767 lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i);
768 lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i);
769 lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i);
773 cfs_percpt_free(the_lnet.ln_rtrpools);
774 the_lnet.ln_rtrpools = NULL;
779 lnet_nrb_tiny_calculate(void)
781 int nrbs = LNET_NRB_TINY;
783 if (tiny_router_buffers < 0) {
784 LCONSOLE_ERROR_MSG(0x10c,
785 "tiny_router_buffers=%d invalid when "
786 "routing enabled\n", tiny_router_buffers);
790 if (tiny_router_buffers > 0)
791 nrbs = tiny_router_buffers;
793 nrbs /= LNET_CPT_NUMBER;
794 return max(nrbs, LNET_NRB_TINY_MIN);
798 lnet_nrb_small_calculate(void)
800 int nrbs = LNET_NRB_SMALL;
802 if (small_router_buffers < 0) {
803 LCONSOLE_ERROR_MSG(0x10c,
804 "small_router_buffers=%d invalid when "
805 "routing enabled\n", small_router_buffers);
809 if (small_router_buffers > 0)
810 nrbs = small_router_buffers;
812 nrbs /= LNET_CPT_NUMBER;
813 return max(nrbs, LNET_NRB_SMALL_MIN);
817 lnet_nrb_large_calculate(void)
819 int nrbs = LNET_NRB_LARGE;
821 if (large_router_buffers < 0) {
822 LCONSOLE_ERROR_MSG(0x10c,
823 "large_router_buffers=%d invalid when "
824 "routing enabled\n", large_router_buffers);
828 if (large_router_buffers > 0)
829 nrbs = large_router_buffers;
831 nrbs /= LNET_CPT_NUMBER;
832 return max(nrbs, LNET_NRB_LARGE_MIN);
836 lnet_rtrpools_alloc(int im_a_router)
838 struct lnet_rtrbufpool *rtrp;
845 if (!strcmp(forwarding, "")) {
846 /* not set either way */
849 } else if (!strcmp(forwarding, "disabled")) {
850 /* explicitly disabled */
852 } else if (!strcmp(forwarding, "enabled")) {
853 /* explicitly enabled */
855 LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
856 "'enabled' or 'disabled'\n");
860 nrb_tiny = lnet_nrb_tiny_calculate();
864 nrb_small = lnet_nrb_small_calculate();
868 nrb_large = lnet_nrb_large_calculate();
872 the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
874 sizeof(struct lnet_rtrbufpool));
875 if (the_lnet.ln_rtrpools == NULL) {
876 LCONSOLE_ERROR_MSG(0x10c,
877 "Failed to initialize router buffe pool\n");
881 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
882 lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0);
883 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
888 lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX],
889 LNET_NRB_SMALL_PAGES);
890 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
895 lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX],
896 LNET_NRB_LARGE_PAGES);
897 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
903 lnet_net_lock(LNET_LOCK_EX);
904 the_lnet.ln_routing = 1;
905 lnet_net_unlock(LNET_LOCK_EX);
909 lnet_rtrpools_free(0);
914 lnet_rtrpools_adjust_helper(int tiny, int small, int large)
919 struct lnet_rtrbufpool *rtrp;
921 /* If the provided values for each buffer pool are different than the
922 * configured values, we need to take action. */
924 tiny_router_buffers = tiny;
925 nrb = lnet_nrb_tiny_calculate();
926 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
927 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
934 small_router_buffers = small;
935 nrb = lnet_nrb_small_calculate();
936 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
937 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
944 large_router_buffers = large;
945 nrb = lnet_nrb_large_calculate();
946 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
947 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
958 lnet_rtrpools_adjust(int tiny, int small, int large)
960 /* this function doesn't revert the changes if adding new buffers
961 * failed. It's up to the user space caller to revert the
964 if (!the_lnet.ln_routing)
967 return lnet_rtrpools_adjust_helper(tiny, small, large);
971 lnet_rtrpools_enable(void)
975 if (the_lnet.ln_routing)
978 if (the_lnet.ln_rtrpools == NULL)
979 /* If routing is turned off, and we have never
980 * initialized the pools before, just call the
981 * standard buffer pool allocation routine as
982 * if we are just configuring this for the first
984 rc = lnet_rtrpools_alloc(1);
986 rc = lnet_rtrpools_adjust_helper(0, 0, 0);
990 lnet_net_lock(LNET_LOCK_EX);
991 the_lnet.ln_routing = 1;
993 the_lnet.ln_ping_target->pb_info.pi_features &=
994 ~LNET_PING_FEAT_RTE_DISABLED;
995 lnet_net_unlock(LNET_LOCK_EX);
1001 lnet_rtrpools_disable(void)
1003 if (!the_lnet.ln_routing)
1006 lnet_net_lock(LNET_LOCK_EX);
1007 the_lnet.ln_routing = 0;
1008 the_lnet.ln_ping_target->pb_info.pi_features |=
1009 LNET_PING_FEAT_RTE_DISABLED;
1011 tiny_router_buffers = 0;
1012 small_router_buffers = 0;
1013 large_router_buffers = 0;
1014 lnet_net_unlock(LNET_LOCK_EX);
1015 lnet_rtrpools_free(1);
1019 lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, int alive, time64_t when)
1021 struct lnet_peer_ni *lp = NULL;
1022 time64_t now = ktime_get_seconds();
1023 int cpt = lnet_cpt_of_nid(nid, ni);
1025 LASSERT (!in_interrupt ());
1027 CDEBUG (D_NET, "%s notifying %s: %s\n",
1028 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1029 libcfs_nid2str(nid),
1030 alive ? "up" : "down");
1033 LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
1034 CWARN("Ignoring notification of %s %s by %s (different net)\n",
1035 libcfs_nid2str(nid), alive ? "birth" : "death",
1036 libcfs_nid2str(ni->ni_nid));
1040 /* can't do predictions... */
1042 CWARN("Ignoring prediction from %s of %s %s "
1043 "%lld seconds in the future\n",
1044 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1045 libcfs_nid2str(nid), alive ? "up" : "down", when - now);
1049 if (ni != NULL && !alive && /* LND telling me she's down */
1050 !auto_down) { /* auto-down disabled */
1051 CDEBUG(D_NET, "Auto-down disabled\n");
1057 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1058 lnet_net_unlock(cpt);
1062 lp = lnet_find_peer_ni_locked(nid);
1065 lnet_net_unlock(cpt);
1066 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
1071 * It is possible for this function to be called for the same peer
1072 * but with different NIs. We want to synchronize the notification
1073 * between the different calls. So we will use the lpni_cpt to
1074 * grab the net lock.
1076 if (lp->lpni_cpt != cpt) {
1077 lnet_net_unlock(cpt);
1082 /* We can't fully trust LND on reporting exact peer last_alive
1083 * if he notifies us about dead peer. For example ksocklnd can
1084 * call us with when == _time_when_the_node_was_booted_ if
1085 * no connections were successfully established */
1086 if (ni != NULL && !alive && when < lp->lpni_last_alive)
1087 when = lp->lpni_last_alive;
1089 lnet_notify_locked(lp, ni == NULL, alive, when);
1092 lnet_ni_notify_locked(ni, lp);
1094 lnet_peer_ni_decref_locked(lp);
1096 lnet_net_unlock(cpt);
1099 EXPORT_SYMBOL(lnet_notify);