2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2017, Intel Corporation.
6 * This file is part of Lustre, https://wiki.whamcloud.com/
8 * Portals is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Portals is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Portals; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_LNET
25 #include <linux/uaccess.h>
27 #include <libcfs/libcfs.h>
28 #include <lnet/lib-lnet.h>
30 /* This is really lnet_proc.c. You might need to update sanity test 215
31 * if any file format is changed. */
33 #define LNET_LOFFT_BITS (sizeof(loff_t) * 8)
35 * NB: max allowed LNET_CPT_BITS is 8 on 64-bit system and 2 on 32-bit system
37 #define LNET_PROC_CPT_BITS (LNET_CPT_BITS + 1)
38 /* change version, 16 bits or 8 bits */
39 #define LNET_PROC_VER_BITS \
40 clamp_t(int, LNET_LOFFT_BITS / 4, 8, 16)
42 #define LNET_PROC_HASH_BITS LNET_PEER_HASH_BITS
44 * bits for peer hash offset
45 * NB: we don't use the highest bit of *ppos because it's signed
47 #define LNET_PROC_HOFF_BITS (LNET_LOFFT_BITS - \
48 LNET_PROC_CPT_BITS - \
49 LNET_PROC_VER_BITS - \
50 LNET_PROC_HASH_BITS - 1)
51 /* bits for hash index + position */
52 #define LNET_PROC_HPOS_BITS (LNET_PROC_HASH_BITS + LNET_PROC_HOFF_BITS)
53 /* bits for peer hash table + hash version */
54 #define LNET_PROC_VPOS_BITS (LNET_PROC_HPOS_BITS + LNET_PROC_VER_BITS)
56 #define LNET_PROC_CPT_MASK ((1ULL << LNET_PROC_CPT_BITS) - 1)
57 #define LNET_PROC_VER_MASK ((1ULL << LNET_PROC_VER_BITS) - 1)
58 #define LNET_PROC_HASH_MASK ((1ULL << LNET_PROC_HASH_BITS) - 1)
59 #define LNET_PROC_HOFF_MASK ((1ULL << LNET_PROC_HOFF_BITS) - 1)
61 #define LNET_PROC_CPT_GET(pos) \
62 (int)(((pos) >> LNET_PROC_VPOS_BITS) & LNET_PROC_CPT_MASK)
64 #define LNET_PROC_VER_GET(pos) \
65 (int)(((pos) >> LNET_PROC_HPOS_BITS) & LNET_PROC_VER_MASK)
67 #define LNET_PROC_HASH_GET(pos) \
68 (int)(((pos) >> LNET_PROC_HOFF_BITS) & LNET_PROC_HASH_MASK)
70 #define LNET_PROC_HOFF_GET(pos) \
71 (int)((pos) & LNET_PROC_HOFF_MASK)
73 #define LNET_PROC_POS_MAKE(cpt, ver, hash, off) \
74 (((((loff_t)(cpt)) & LNET_PROC_CPT_MASK) << LNET_PROC_VPOS_BITS) | \
75 ((((loff_t)(ver)) & LNET_PROC_VER_MASK) << LNET_PROC_HPOS_BITS) | \
76 ((((loff_t)(hash)) & LNET_PROC_HASH_MASK) << LNET_PROC_HOFF_BITS) | \
77 ((off) & LNET_PROC_HOFF_MASK))
79 #define LNET_PROC_VERSION(v) ((unsigned int)((v) & LNET_PROC_VER_MASK))
81 static int __proc_lnet_stats(void *data, int write,
82 loff_t pos, void __user *buffer, int nob)
85 struct lnet_counters *ctrs;
86 struct lnet_counters_common common;
89 const int tmpsiz = 256; /* 7 %u and 4 __u64 */
92 lnet_counters_reset();
98 LIBCFS_ALLOC(ctrs, sizeof(*ctrs));
102 LIBCFS_ALLOC(tmpstr, tmpsiz);
103 if (tmpstr == NULL) {
104 LIBCFS_FREE(ctrs, sizeof(*ctrs));
108 lnet_counters_get(ctrs);
109 common = ctrs->lct_common;
111 len = scnprintf(tmpstr, tmpsiz,
112 "%u %u %u %u %u %u %u %llu %llu "
114 common.lcc_msgs_alloc, common.lcc_msgs_max,
116 common.lcc_send_count, common.lcc_recv_count,
117 common.lcc_route_count, common.lcc_drop_count,
118 common.lcc_send_length, common.lcc_recv_length,
119 common.lcc_route_length, common.lcc_drop_length);
124 rc = cfs_trace_copyout_string(buffer, nob,
127 LIBCFS_FREE(tmpstr, tmpsiz);
128 LIBCFS_FREE(ctrs, sizeof(*ctrs));
133 proc_lnet_stats(struct ctl_table *table, int write, void __user *buffer,
134 size_t *lenp, loff_t *ppos)
136 return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
141 proc_lnet_routes(struct ctl_table *table, int write, void __user *buffer,
142 size_t *lenp, loff_t *ppos)
144 const int tmpsiz = 256;
152 BUILD_BUG_ON(sizeof(loff_t) < 4);
154 off = LNET_PROC_HOFF_GET(*ppos);
155 ver = LNET_PROC_VER_GET(*ppos);
162 LIBCFS_ALLOC(tmpstr, tmpsiz);
166 s = tmpstr; /* points to current position in tmpstr[] */
169 s += scnprintf(s, tmpstr + tmpsiz - s, "Routing %s\n",
170 the_lnet.ln_routing ? "enabled" : "disabled");
171 LASSERT(tmpstr + tmpsiz - s > 0);
173 s += scnprintf(s, tmpstr + tmpsiz - s, "%-8s %4s %8s %7s %s\n",
174 "net", "hops", "priority", "state", "router");
175 LASSERT(tmpstr + tmpsiz - s > 0);
178 ver = (unsigned int)the_lnet.ln_remote_nets_version;
180 *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
184 struct lnet_route *route = NULL;
185 struct lnet_remotenet *rnet = NULL;
187 struct list_head *rn_list;
192 if (ver != LNET_PROC_VERSION(the_lnet.ln_remote_nets_version)) {
194 LIBCFS_FREE(tmpstr, tmpsiz);
198 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE && route == NULL;
200 rn_list = &the_lnet.ln_remote_nets_hash[i];
204 while (n != rn_list && route == NULL) {
205 rnet = list_entry(n, struct lnet_remotenet,
208 r = rnet->lrn_routes.next;
210 while (r != &rnet->lrn_routes) {
211 struct lnet_route *re =
212 list_entry(r, struct lnet_route,
228 __u32 net = rnet->lrn_net;
229 __u32 hops = route->lr_hops;
230 unsigned int priority = route->lr_priority;
231 int alive = lnet_is_route_alive(route);
233 s += scnprintf(s, tmpstr + tmpsiz - s,
234 "%-8s %4d %8u %7s %s\n",
235 libcfs_net2str(net), hops,
237 alive ? "up" : "down",
238 libcfs_nid2str(route->lr_nid));
239 LASSERT(tmpstr + tmpsiz - s > 0);
245 len = s - tmpstr; /* how many bytes was written */
247 if (len > *lenp) { /* linux-supplied buffer is too small */
249 } else if (len > 0) { /* wrote something */
250 if (copy_to_user(buffer, tmpstr, len))
254 *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
258 LIBCFS_FREE(tmpstr, tmpsiz);
267 proc_lnet_routers(struct ctl_table *table, int write, void __user *buffer,
268 size_t *lenp, loff_t *ppos)
273 const int tmpsiz = 256;
278 off = LNET_PROC_HOFF_GET(*ppos);
279 ver = LNET_PROC_VER_GET(*ppos);
286 LIBCFS_ALLOC(tmpstr, tmpsiz);
290 s = tmpstr; /* points to current position in tmpstr[] */
293 s += scnprintf(s, tmpstr + tmpsiz - s,
295 "ref", "rtr_ref", "alive", "router");
296 LASSERT(tmpstr + tmpsiz - s > 0);
299 ver = (unsigned int)the_lnet.ln_routers_version;
301 *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
304 struct lnet_peer *peer = NULL;
309 if (ver != LNET_PROC_VERSION(the_lnet.ln_routers_version)) {
312 LIBCFS_FREE(tmpstr, tmpsiz);
316 r = the_lnet.ln_routers.next;
318 while (r != &the_lnet.ln_routers) {
319 struct lnet_peer *lp =
320 list_entry(r, struct lnet_peer,
333 lnet_nid_t nid = peer->lp_primary_nid;
334 int nrefs = atomic_read(&peer->lp_refcount);
335 int nrtrrefs = peer->lp_rtr_refcount;
336 int alive = lnet_is_gateway_alive(peer);
338 s += scnprintf(s, tmpstr + tmpsiz - s,
341 alive ? "up" : "down",
342 libcfs_nid2str(nid));
348 len = s - tmpstr; /* how many bytes was written */
350 if (len > *lenp) { /* linux-supplied buffer is too small */
352 } else if (len > 0) { /* wrote something */
353 if (copy_to_user(buffer, tmpstr, len))
357 *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
361 LIBCFS_FREE(tmpstr, tmpsiz);
369 /* TODO: there should be no direct access to ptable. We should add a set
370 * of APIs that give access to the ptable and its members */
372 proc_lnet_peers(struct ctl_table *table, int write, void __user *buffer,
373 size_t *lenp, loff_t *ppos)
375 const int tmpsiz = 256;
376 struct lnet_peer_table *ptable;
379 int cpt = LNET_PROC_CPT_GET(*ppos);
380 int ver = LNET_PROC_VER_GET(*ppos);
381 int hash = LNET_PROC_HASH_GET(*ppos);
382 int hoff = LNET_PROC_HOFF_GET(*ppos);
388 struct lnet_peer_ni *peer;
390 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
392 for (hash = 0; hash < LNET_PEER_HASH_SIZE; hash++) {
393 list_for_each_entry(peer,
394 &ptable->pt_hash[hash],
396 peer->lpni_mintxcredits =
397 peer->lpni_txcredits;
398 peer->lpni_minrtrcredits =
399 peer->lpni_rtrcredits;
411 BUILD_BUG_ON(LNET_PROC_HASH_BITS < LNET_PEER_HASH_BITS);
413 if (cpt >= LNET_CPT_NUMBER) {
418 LIBCFS_ALLOC(tmpstr, tmpsiz);
422 s = tmpstr; /* points to current position in tmpstr[] */
425 s += scnprintf(s, tmpstr + tmpsiz - s,
426 "%-24s %4s %5s %5s %5s %5s %5s %5s %5s %s\n",
427 "nid", "refs", "state", "last", "max",
428 "rtr", "min", "tx", "min", "queue");
429 LASSERT(tmpstr + tmpsiz - s > 0);
433 struct lnet_peer_ni *peer;
443 ptable = the_lnet.ln_peer_tables[cpt];
445 ver = LNET_PROC_VERSION(ptable->pt_version);
447 if (ver != LNET_PROC_VERSION(ptable->pt_version)) {
448 lnet_net_unlock(cpt);
449 LIBCFS_FREE(tmpstr, tmpsiz);
453 while (hash < LNET_PEER_HASH_SIZE) {
455 p = ptable->pt_hash[hash].next;
457 while (p != &ptable->pt_hash[hash]) {
458 struct lnet_peer_ni *lp =
459 list_entry(p, struct lnet_peer_ni,
464 /* minor optimization: start from idx+1
465 * on next iteration if we've just
466 * drained lpni_hashlist */
467 if (lp->lpni_hashlist.next ==
468 &ptable->pt_hash[hash]) {
479 p = lp->lpni_hashlist.next;
491 lnet_nid_t nid = peer->lpni_nid;
492 int nrefs = atomic_read(&peer->lpni_refcount);
493 time64_t lastalive = -1;
494 char *aliveness = "NA";
495 int maxcr = (peer->lpni_net) ?
496 peer->lpni_net->net_tunables.lct_peer_tx_credits : 0;
497 int txcr = peer->lpni_txcredits;
498 int mintxcr = peer->lpni_mintxcredits;
499 int rtrcr = peer->lpni_rtrcredits;
500 int minrtrcr = peer->lpni_minrtrcredits;
501 int txqnob = peer->lpni_txqnob;
503 if (lnet_isrouter(peer) ||
504 lnet_peer_aliveness_enabled(peer))
505 aliveness = lnet_is_peer_ni_alive(peer) ?
508 lnet_net_unlock(cpt);
510 s += scnprintf(s, tmpstr + tmpsiz - s,
511 "%-24s %4d %5s %5lld %5d %5d %5d %5d %5d %d\n",
512 libcfs_nid2str(nid), nrefs, aliveness,
513 lastalive, maxcr, rtrcr, minrtrcr, txcr,
515 LASSERT(tmpstr + tmpsiz - s > 0);
517 } else { /* peer is NULL */
518 lnet_net_unlock(cpt);
521 if (hash == LNET_PEER_HASH_SIZE) {
525 if (peer == NULL && cpt < LNET_CPT_NUMBER)
530 len = s - tmpstr; /* how many bytes was written */
532 if (len > *lenp) { /* linux-supplied buffer is too small */
534 } else if (len > 0) { /* wrote something */
535 if (copy_to_user(buffer, tmpstr, len))
538 *ppos = LNET_PROC_POS_MAKE(cpt, ver, hash, hoff);
541 LIBCFS_FREE(tmpstr, tmpsiz);
549 static int __proc_lnet_buffers(void *data, int write,
550 loff_t pos, void __user *buffer, int nob)
562 /* (4 %d) * 4 * LNET_CPT_NUMBER */
563 tmpsiz = 64 * (LNET_NRBPOOLS + 1) * LNET_CPT_NUMBER;
564 LIBCFS_ALLOC(tmpstr, tmpsiz);
568 s = tmpstr; /* points to current position in tmpstr[] */
570 s += scnprintf(s, tmpstr + tmpsiz - s,
572 "pages", "count", "credits", "min");
573 LASSERT(tmpstr + tmpsiz - s > 0);
575 if (the_lnet.ln_rtrpools == NULL)
576 goto out; /* I'm not a router */
578 for (idx = 0; idx < LNET_NRBPOOLS; idx++) {
579 struct lnet_rtrbufpool *rbp;
581 lnet_net_lock(LNET_LOCK_EX);
582 cfs_percpt_for_each(rbp, i, the_lnet.ln_rtrpools) {
583 s += scnprintf(s, tmpstr + tmpsiz - s,
586 rbp[idx].rbp_nbuffers,
587 rbp[idx].rbp_credits,
588 rbp[idx].rbp_mincredits);
589 LASSERT(tmpstr + tmpsiz - s > 0);
591 lnet_net_unlock(LNET_LOCK_EX);
597 if (pos >= min_t(int, len, strlen(tmpstr)))
600 rc = cfs_trace_copyout_string(buffer, nob,
603 LIBCFS_FREE(tmpstr, tmpsiz);
608 proc_lnet_buffers(struct ctl_table *table, int write, void __user *buffer,
609 size_t *lenp, loff_t *ppos)
611 return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
612 __proc_lnet_buffers);
616 proc_lnet_nis(struct ctl_table *table, int write, void __user *buffer,
617 size_t *lenp, loff_t *ppos)
619 int tmpsiz = 128 * LNET_CPT_NUMBER;
629 /* Just reset the min stat. */
631 struct lnet_net *net;
635 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
636 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
637 struct lnet_tx_queue *tq;
641 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
642 for (j = 0; ni->ni_cpts != NULL &&
643 j < ni->ni_ncpts; j++) {
644 if (i == ni->ni_cpts[j])
648 if (j == ni->ni_ncpts)
653 tq->tq_credits_min = tq->tq_credits;
664 LIBCFS_ALLOC(tmpstr, tmpsiz);
668 s = tmpstr; /* points to current position in tmpstr[] */
671 s += scnprintf(s, tmpstr + tmpsiz - s,
672 "%-24s %6s %5s %4s %4s %4s %5s %5s %5s\n",
673 "nid", "status", "alive", "refs", "peer",
674 "rtr", "max", "tx", "min");
675 LASSERT (tmpstr + tmpsiz - s > 0);
677 struct lnet_ni *ni = NULL;
678 int skip = *ppos - 1;
682 ni = lnet_get_ni_idx_locked(skip);
685 struct lnet_tx_queue *tq;
687 time64_t now = ktime_get_real_seconds();
688 time64_t last_alive = -1;
692 if (the_lnet.ln_routing)
693 last_alive = now - ni->ni_net->net_last_alive;
696 LASSERT(ni->ni_status != NULL);
697 stat = (ni->ni_status->ns_status ==
698 LNET_NI_STATUS_UP) ? "up" : "down";
701 /* @lo forever alive */
702 if (ni->ni_net->net_lnd->lnd_type == LOLND) {
707 /* we actually output credits information for
708 * TX queue of each partition */
709 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
710 for (j = 0; ni->ni_cpts != NULL &&
711 j < ni->ni_ncpts; j++) {
712 if (i == ni->ni_cpts[j])
716 if (j == ni->ni_ncpts)
722 s += scnprintf(s, tmpstr + tmpsiz - s,
723 "%-24s %6s %5lld %4d %4d %4d %5d %5d %5d\n",
724 libcfs_nid2str(ni->ni_nid), stat,
725 last_alive, *ni->ni_refs[i],
726 ni->ni_net->net_tunables.lct_peer_tx_credits,
727 ni->ni_net->net_tunables.lct_peer_rtr_credits,
729 tq->tq_credits, tq->tq_credits_min);
733 LASSERT(tmpstr + tmpsiz - s > 0);
739 len = s - tmpstr; /* how many bytes was written */
741 if (len > *lenp) { /* linux-supplied buffer is too small */
743 } else if (len > 0) { /* wrote something */
744 if (copy_to_user(buffer, tmpstr, len))
750 LIBCFS_FREE(tmpstr, tmpsiz);
758 struct lnet_portal_rotors {
764 static struct lnet_portal_rotors portal_rotors[] = {
766 .pr_value = LNET_PTL_ROTOR_OFF,
768 .pr_desc = "Turn off message rotor for wildcard portals"
771 .pr_value = LNET_PTL_ROTOR_ON,
773 .pr_desc = "round-robin dispatch all PUT messages for "
777 .pr_value = LNET_PTL_ROTOR_RR_RT,
779 .pr_desc = "round-robin dispatch routed PUT message for "
783 .pr_value = LNET_PTL_ROTOR_HASH_RT,
784 .pr_name = "HASH_RT",
785 .pr_desc = "dispatch routed PUT message by hashing source "
786 "NID for wildcard portals"
795 static int __proc_lnet_portal_rotor(void *data, int write,
796 loff_t pos, void __user *buffer, int nob)
798 const int buf_len = 128;
804 LIBCFS_ALLOC(buf, buf_len);
811 for (i = 0; portal_rotors[i].pr_value >= 0; i++) {
812 if (portal_rotors[i].pr_value == portal_rotor)
816 LASSERT(portal_rotors[i].pr_value == portal_rotor);
819 rc = scnprintf(buf, buf_len,
820 "{\n\tportals: all\n"
821 "\trotor: %s\n\tdescription: %s\n}",
822 portal_rotors[i].pr_name,
823 portal_rotors[i].pr_desc);
825 if (pos >= min_t(int, rc, buf_len)) {
828 rc = cfs_trace_copyout_string(buffer, nob,
834 rc = cfs_trace_copyin_string(buf, buf_len, buffer, nob);
842 for (i = 0; portal_rotors[i].pr_name != NULL; i++) {
843 if (strncasecmp(portal_rotors[i].pr_name, tmp,
844 strlen(portal_rotors[i].pr_name)) == 0) {
845 portal_rotor = portal_rotors[i].pr_value;
852 LIBCFS_FREE(buf, buf_len);
857 proc_lnet_portal_rotor(struct ctl_table *table, int write, void __user *buffer,
858 size_t *lenp, loff_t *ppos)
860 return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
861 __proc_lnet_portal_rotor);
865 static struct ctl_table lnet_table[] = {
867 * NB No .strategy entries have been provided since sysctl(8) prefers
868 * to go via /proc for portability.
873 .proc_handler = &proc_lnet_stats,
876 .procname = "routes",
878 .proc_handler = &proc_lnet_routes,
881 .procname = "routers",
883 .proc_handler = &proc_lnet_routers,
888 .proc_handler = &proc_lnet_peers,
891 .procname = "buffers",
893 .proc_handler = &proc_lnet_buffers,
898 .proc_handler = &proc_lnet_nis,
901 .procname = "portal_rotor",
903 .proc_handler = &proc_lnet_portal_rotor,
908 void lnet_router_debugfs_init(void)
910 lnet_insert_debugfs(lnet_table);
913 void lnet_router_debugfs_fini(void)
915 lnet_remove_debugfs(lnet_table);