2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2017, Intel Corporation.
6 * This file is part of Lustre, https://wiki.whamcloud.com/
8 * Portals is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Portals is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Portals; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_LNET
25 #include <linux/uaccess.h>
27 #include <libcfs/libcfs.h>
28 #include <lnet/lib-lnet.h>
30 /* This is really lnet_proc.c. You might need to update sanity test 215
31 * if any file format is changed. */
33 #define LNET_LOFFT_BITS (sizeof(loff_t) * 8)
35 * NB: max allowed LNET_CPT_BITS is 8 on 64-bit system and 2 on 32-bit system
37 #define LNET_PROC_CPT_BITS (LNET_CPT_BITS + 1)
38 /* change version, 16 bits or 8 bits */
39 #define LNET_PROC_VER_BITS \
40 clamp_t(int, LNET_LOFFT_BITS / 4, 8, 16)
42 #define LNET_PROC_HASH_BITS LNET_PEER_HASH_BITS
44 * bits for peer hash offset
45 * NB: we don't use the highest bit of *ppos because it's signed
47 #define LNET_PROC_HOFF_BITS (LNET_LOFFT_BITS - \
48 LNET_PROC_CPT_BITS - \
49 LNET_PROC_VER_BITS - \
50 LNET_PROC_HASH_BITS - 1)
51 /* bits for hash index + position */
52 #define LNET_PROC_HPOS_BITS (LNET_PROC_HASH_BITS + LNET_PROC_HOFF_BITS)
53 /* bits for peer hash table + hash version */
54 #define LNET_PROC_VPOS_BITS (LNET_PROC_HPOS_BITS + LNET_PROC_VER_BITS)
56 #define LNET_PROC_CPT_MASK ((1ULL << LNET_PROC_CPT_BITS) - 1)
57 #define LNET_PROC_VER_MASK ((1ULL << LNET_PROC_VER_BITS) - 1)
58 #define LNET_PROC_HASH_MASK ((1ULL << LNET_PROC_HASH_BITS) - 1)
59 #define LNET_PROC_HOFF_MASK ((1ULL << LNET_PROC_HOFF_BITS) - 1)
61 #define LNET_PROC_CPT_GET(pos) \
62 (int)(((pos) >> LNET_PROC_VPOS_BITS) & LNET_PROC_CPT_MASK)
64 #define LNET_PROC_VER_GET(pos) \
65 (int)(((pos) >> LNET_PROC_HPOS_BITS) & LNET_PROC_VER_MASK)
67 #define LNET_PROC_HASH_GET(pos) \
68 (int)(((pos) >> LNET_PROC_HOFF_BITS) & LNET_PROC_HASH_MASK)
70 #define LNET_PROC_HOFF_GET(pos) \
71 (int)((pos) & LNET_PROC_HOFF_MASK)
73 #define LNET_PROC_POS_MAKE(cpt, ver, hash, off) \
74 (((((loff_t)(cpt)) & LNET_PROC_CPT_MASK) << LNET_PROC_VPOS_BITS) | \
75 ((((loff_t)(ver)) & LNET_PROC_VER_MASK) << LNET_PROC_HPOS_BITS) | \
76 ((((loff_t)(hash)) & LNET_PROC_HASH_MASK) << LNET_PROC_HOFF_BITS) | \
77 ((off) & LNET_PROC_HOFF_MASK))
79 #define LNET_PROC_VERSION(v) ((unsigned int)((v) & LNET_PROC_VER_MASK))
81 static int proc_lnet_stats(struct ctl_table *table, int write,
82 void __user *buffer, size_t *lenp, loff_t *ppos)
85 struct lnet_counters *ctrs;
86 struct lnet_counters_common common;
90 char tmpstr[256]; /* 7 %u and 4 u64 */
93 lnet_counters_reset();
99 LIBCFS_ALLOC(ctrs, sizeof(*ctrs));
103 rc = lnet_counters_get(ctrs);
107 common = ctrs->lct_common;
109 len = scnprintf(tmpstr, sizeof(tmpstr),
110 "%u %u %u %u %u %u %u %llu %llu "
112 common.lcc_msgs_alloc, common.lcc_msgs_max,
114 common.lcc_send_count, common.lcc_recv_count,
115 common.lcc_route_count, common.lcc_drop_count,
116 common.lcc_send_length, common.lcc_recv_length,
117 common.lcc_route_length, common.lcc_drop_length);
122 rc = cfs_trace_copyout_string(buffer, nob,
125 LIBCFS_FREE(ctrs, sizeof(*ctrs));
130 proc_lnet_routes(struct ctl_table *table, int write, void __user *buffer,
131 size_t *lenp, loff_t *ppos)
133 const int tmpsiz = 256;
141 BUILD_BUG_ON(sizeof(loff_t) < 4);
143 off = LNET_PROC_HOFF_GET(*ppos);
144 ver = LNET_PROC_VER_GET(*ppos);
151 LIBCFS_ALLOC(tmpstr, tmpsiz);
155 s = tmpstr; /* points to current position in tmpstr[] */
158 s += scnprintf(s, tmpstr + tmpsiz - s, "Routing %s\n",
159 the_lnet.ln_routing ? "enabled" : "disabled");
160 LASSERT(tmpstr + tmpsiz - s > 0);
162 s += scnprintf(s, tmpstr + tmpsiz - s, "%-8s %4s %8s %7s %s\n",
163 "net", "hops", "priority", "state", "router");
164 LASSERT(tmpstr + tmpsiz - s > 0);
167 ver = (unsigned int)the_lnet.ln_remote_nets_version;
169 *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
173 struct lnet_route *route = NULL;
174 struct lnet_remotenet *rnet = NULL;
176 struct list_head *rn_list;
181 if (ver != LNET_PROC_VERSION(the_lnet.ln_remote_nets_version)) {
183 LIBCFS_FREE(tmpstr, tmpsiz);
187 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE && route == NULL;
189 rn_list = &the_lnet.ln_remote_nets_hash[i];
193 while (n != rn_list && route == NULL) {
194 rnet = list_entry(n, struct lnet_remotenet,
197 r = rnet->lrn_routes.next;
199 while (r != &rnet->lrn_routes) {
200 struct lnet_route *re =
201 list_entry(r, struct lnet_route,
217 __u32 net = rnet->lrn_net;
218 __u32 hops = route->lr_hops;
219 unsigned int priority = route->lr_priority;
220 int alive = lnet_is_route_alive(route);
222 s += scnprintf(s, tmpstr + tmpsiz - s,
223 "%-8s %4d %8u %7s %s\n",
224 libcfs_net2str(net), hops,
226 alive ? "up" : "down",
227 libcfs_nidstr(&route->lr_nid));
228 LASSERT(tmpstr + tmpsiz - s > 0);
234 len = s - tmpstr; /* how many bytes was written */
236 if (len > *lenp) { /* linux-supplied buffer is too small */
238 } else if (len > 0) { /* wrote something */
239 if (copy_to_user(buffer, tmpstr, len))
243 *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
247 LIBCFS_FREE(tmpstr, tmpsiz);
256 proc_lnet_routers(struct ctl_table *table, int write, void __user *buffer,
257 size_t *lenp, loff_t *ppos)
262 const int tmpsiz = 256;
267 off = LNET_PROC_HOFF_GET(*ppos);
268 ver = LNET_PROC_VER_GET(*ppos);
275 LIBCFS_ALLOC(tmpstr, tmpsiz);
279 s = tmpstr; /* points to current position in tmpstr[] */
282 s += scnprintf(s, tmpstr + tmpsiz - s,
284 "ref", "rtr_ref", "alive", "router");
285 LASSERT(tmpstr + tmpsiz - s > 0);
288 ver = (unsigned int)the_lnet.ln_routers_version;
290 *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
293 struct lnet_peer *peer = NULL;
298 if (ver != LNET_PROC_VERSION(the_lnet.ln_routers_version)) {
301 LIBCFS_FREE(tmpstr, tmpsiz);
305 r = the_lnet.ln_routers.next;
307 while (r != &the_lnet.ln_routers) {
308 struct lnet_peer *lp =
309 list_entry(r, struct lnet_peer,
322 struct lnet_nid *nid = &peer->lp_primary_nid;
323 int nrefs = atomic_read(&peer->lp_refcount);
324 int nrtrrefs = peer->lp_rtr_refcount;
325 int alive = lnet_is_gateway_alive(peer);
327 s += scnprintf(s, tmpstr + tmpsiz - s,
330 alive ? "up" : "down",
337 len = s - tmpstr; /* how many bytes was written */
339 if (len > *lenp) { /* linux-supplied buffer is too small */
341 } else if (len > 0) { /* wrote something */
342 if (copy_to_user(buffer, tmpstr, len))
346 *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
350 LIBCFS_FREE(tmpstr, tmpsiz);
358 /* TODO: there should be no direct access to ptable. We should add a set
359 * of APIs that give access to the ptable and its members */
361 proc_lnet_peers(struct ctl_table *table, int write, void __user *buffer,
362 size_t *lenp, loff_t *ppos)
364 const int tmpsiz = 256;
365 struct lnet_peer_table *ptable;
368 int cpt = LNET_PROC_CPT_GET(*ppos);
369 int ver = LNET_PROC_VER_GET(*ppos);
370 int hash = LNET_PROC_HASH_GET(*ppos);
371 int hoff = LNET_PROC_HOFF_GET(*ppos);
377 struct lnet_peer_ni *peer;
379 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
381 for (hash = 0; hash < LNET_PEER_HASH_SIZE; hash++) {
382 list_for_each_entry(peer,
383 &ptable->pt_hash[hash],
385 peer->lpni_mintxcredits =
386 peer->lpni_txcredits;
387 peer->lpni_minrtrcredits =
388 peer->lpni_rtrcredits;
400 BUILD_BUG_ON(LNET_PROC_HASH_BITS < LNET_PEER_HASH_BITS);
402 if (cpt >= LNET_CPT_NUMBER) {
407 LIBCFS_ALLOC(tmpstr, tmpsiz);
411 s = tmpstr; /* points to current position in tmpstr[] */
414 s += scnprintf(s, tmpstr + tmpsiz - s,
415 "%-24s %4s %5s %5s %5s %5s %5s %5s %5s %s\n",
416 "nid", "refs", "state", "last", "max",
417 "rtr", "min", "tx", "min", "queue");
418 LASSERT(tmpstr + tmpsiz - s > 0);
422 struct lnet_peer_ni *peer;
432 ptable = the_lnet.ln_peer_tables[cpt];
434 ver = LNET_PROC_VERSION(ptable->pt_version);
436 if (ver != LNET_PROC_VERSION(ptable->pt_version)) {
437 lnet_net_unlock(cpt);
438 LIBCFS_FREE(tmpstr, tmpsiz);
442 while (hash < LNET_PEER_HASH_SIZE) {
444 p = ptable->pt_hash[hash].next;
446 while (p != &ptable->pt_hash[hash]) {
447 struct lnet_peer_ni *lp =
448 list_entry(p, struct lnet_peer_ni,
453 /* minor optimization: start from idx+1
454 * on next iteration if we've just
455 * drained lpni_hashlist */
456 if (lp->lpni_hashlist.next ==
457 &ptable->pt_hash[hash]) {
468 p = lp->lpni_hashlist.next;
480 struct lnet_nid nid = peer->lpni_nid;
481 int nrefs = kref_read(&peer->lpni_kref);
482 time64_t lastalive = -1;
483 char *aliveness = "NA";
484 int maxcr = (peer->lpni_net) ?
485 peer->lpni_net->net_tunables.lct_peer_tx_credits : 0;
486 int txcr = peer->lpni_txcredits;
487 int mintxcr = peer->lpni_mintxcredits;
488 int rtrcr = peer->lpni_rtrcredits;
489 int minrtrcr = peer->lpni_minrtrcredits;
490 int txqnob = peer->lpni_txqnob;
492 if (lnet_isrouter(peer) ||
493 lnet_peer_aliveness_enabled(peer))
494 aliveness = lnet_is_peer_ni_alive(peer) ?
497 lnet_net_unlock(cpt);
499 s += scnprintf(s, tmpstr + tmpsiz - s,
500 "%-24s %4d %5s %5lld %5d %5d %5d %5d %5d %d\n",
501 libcfs_nidstr(&nid), nrefs, aliveness,
502 lastalive, maxcr, rtrcr, minrtrcr, txcr,
504 LASSERT(tmpstr + tmpsiz - s > 0);
506 } else { /* peer is NULL */
507 lnet_net_unlock(cpt);
510 if (hash == LNET_PEER_HASH_SIZE) {
514 if (peer == NULL && cpt < LNET_CPT_NUMBER)
519 len = s - tmpstr; /* how many bytes was written */
521 if (len > *lenp) { /* linux-supplied buffer is too small */
523 } else if (len > 0) { /* wrote something */
524 if (copy_to_user(buffer, tmpstr, len))
527 *ppos = LNET_PROC_POS_MAKE(cpt, ver, hash, hoff);
530 LIBCFS_FREE(tmpstr, tmpsiz);
538 static int proc_lnet_buffers(struct ctl_table *table, int write,
539 void __user *buffer, size_t *lenp, loff_t *ppos)
553 /* (4 %d) * 4 * LNET_CPT_NUMBER */
554 tmpsiz = 64 * (LNET_NRBPOOLS + 1) * LNET_CPT_NUMBER;
555 LIBCFS_ALLOC(tmpstr, tmpsiz);
559 s = tmpstr; /* points to current position in tmpstr[] */
561 s += scnprintf(s, tmpstr + tmpsiz - s,
563 "pages", "count", "credits", "min");
564 LASSERT(tmpstr + tmpsiz - s > 0);
566 if (the_lnet.ln_rtrpools == NULL)
567 goto out; /* I'm not a router */
569 for (idx = 0; idx < LNET_NRBPOOLS; idx++) {
570 struct lnet_rtrbufpool *rbp;
572 lnet_net_lock(LNET_LOCK_EX);
573 cfs_percpt_for_each(rbp, i, the_lnet.ln_rtrpools) {
574 s += scnprintf(s, tmpstr + tmpsiz - s,
577 rbp[idx].rbp_nbuffers,
578 rbp[idx].rbp_credits,
579 rbp[idx].rbp_mincredits);
580 LASSERT(tmpstr + tmpsiz - s > 0);
582 lnet_net_unlock(LNET_LOCK_EX);
588 if (pos >= min_t(int, len, strlen(tmpstr)))
591 rc = cfs_trace_copyout_string(buffer, nob,
594 LIBCFS_FREE(tmpstr, tmpsiz);
599 proc_lnet_nis(struct ctl_table *table, int write, void __user *buffer,
600 size_t *lenp, loff_t *ppos)
602 int tmpsiz = 128 * LNET_CPT_NUMBER;
612 /* Just reset the min stat. */
614 struct lnet_net *net;
618 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
619 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
620 struct lnet_tx_queue *tq;
624 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
625 for (j = 0; ni->ni_cpts != NULL &&
626 j < ni->ni_ncpts; j++) {
627 if (i == ni->ni_cpts[j])
631 if (j == ni->ni_ncpts)
636 tq->tq_credits_min = tq->tq_credits;
647 LIBCFS_ALLOC(tmpstr, tmpsiz);
651 s = tmpstr; /* points to current position in tmpstr[] */
654 s += scnprintf(s, tmpstr + tmpsiz - s,
655 "%-24s %6s %5s %4s %4s %4s %5s %5s %5s\n",
656 "nid", "status", "alive", "refs", "peer",
657 "rtr", "max", "tx", "min");
658 LASSERT (tmpstr + tmpsiz - s > 0);
660 struct lnet_ni *ni = NULL;
661 int skip = *ppos - 1;
665 ni = lnet_get_ni_idx_locked(skip);
668 struct lnet_tx_queue *tq;
670 time64_t now = ktime_get_seconds();
671 time64_t last_alive = -1;
675 if (the_lnet.ln_routing)
676 last_alive = now - ni->ni_net->net_last_alive;
679 LASSERT(ni->ni_status != NULL);
680 stat = (lnet_ni_get_status_locked(ni) ==
681 LNET_NI_STATUS_UP) ? "up" : "down";
684 /* @lo forever alive */
685 if (ni->ni_net->net_lnd->lnd_type == LOLND) {
690 /* we actually output credits information for
691 * TX queue of each partition */
692 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
693 for (j = 0; ni->ni_cpts != NULL &&
694 j < ni->ni_ncpts; j++) {
695 if (i == ni->ni_cpts[j])
699 if (j == ni->ni_ncpts)
705 s += scnprintf(s, tmpstr + tmpsiz - s,
706 "%-24s %6s %5lld %4d %4d %4d %5d %5d %5d\n",
707 libcfs_nidstr(&ni->ni_nid), stat,
708 last_alive, *ni->ni_refs[i],
709 ni->ni_net->net_tunables.lct_peer_tx_credits,
710 ni->ni_net->net_tunables.lct_peer_rtr_credits,
712 tq->tq_credits, tq->tq_credits_min);
716 LASSERT(tmpstr + tmpsiz - s > 0);
722 len = s - tmpstr; /* how many bytes was written */
724 if (len > *lenp) { /* linux-supplied buffer is too small */
726 } else if (len > 0) { /* wrote something */
727 if (copy_to_user(buffer, tmpstr, len))
733 LIBCFS_FREE(tmpstr, tmpsiz);
741 struct lnet_portal_rotors {
747 static struct lnet_portal_rotors portal_rotors[] = {
749 .pr_value = LNET_PTL_ROTOR_OFF,
751 .pr_desc = "Turn off message rotor for wildcard portals"
754 .pr_value = LNET_PTL_ROTOR_ON,
756 .pr_desc = "round-robin dispatch all PUT messages for "
760 .pr_value = LNET_PTL_ROTOR_RR_RT,
762 .pr_desc = "round-robin dispatch routed PUT message for "
766 .pr_value = LNET_PTL_ROTOR_HASH_RT,
767 .pr_name = "HASH_RT",
768 .pr_desc = "dispatch routed PUT message by hashing source "
769 "NID for wildcard portals"
778 static int proc_lnet_portal_rotor(struct ctl_table *table, int write,
779 void __user *buffer, size_t *lenp,
782 const int buf_len = 128;
791 LIBCFS_ALLOC(buf, buf_len);
797 for (i = 0; portal_rotors[i].pr_value >= 0; i++) {
798 if (portal_rotors[i].pr_value == portal_rotor)
802 LASSERT(portal_rotors[i].pr_value == portal_rotor);
805 rc = scnprintf(buf, buf_len,
806 "{\n\tportals: all\n"
807 "\trotor: %s\n\tdescription: %s\n}",
808 portal_rotors[i].pr_name,
809 portal_rotors[i].pr_desc);
811 if (pos >= min_t(int, rc, buf_len)) {
814 rc = cfs_trace_copyout_string(buffer, nob,
817 LIBCFS_FREE(buf, buf_len);
822 buf = memdup_user_nul(buffer, nob);
830 for (i = 0; portal_rotors[i].pr_name != NULL; i++) {
831 if (strncasecmp(portal_rotors[i].pr_name, tmp,
832 strlen(portal_rotors[i].pr_name)) == 0) {
833 portal_rotor = portal_rotors[i].pr_value;
844 static struct ctl_table lnet_table[] = {
846 * NB No .strategy entries have been provided since sysctl(8) prefers
847 * to go via /proc for portability.
852 .proc_handler = &proc_lnet_stats,
855 .procname = "routes",
857 .proc_handler = &proc_lnet_routes,
860 .procname = "routers",
862 .proc_handler = &proc_lnet_routers,
867 .proc_handler = &proc_lnet_peers,
870 .procname = "buffers",
872 .proc_handler = &proc_lnet_buffers,
877 .proc_handler = &proc_lnet_nis,
880 .procname = "portal_rotor",
882 .proc_handler = &proc_lnet_portal_rotor,
885 .procname = "lnet_lnd_timeout",
886 .data = &lnet_lnd_timeout,
887 .maxlen = sizeof(lnet_lnd_timeout),
889 .proc_handler = &debugfs_doint,
894 static void *debugfs_state;
896 void lnet_router_debugfs_init(void)
898 lnet_insert_debugfs(lnet_table, THIS_MODULE,
902 void lnet_router_debugfs_fini(void)
904 lnet_remove_debugfs(lnet_table);
906 void lnet_router_exit(void)
908 lnet_debugfs_fini(&debugfs_state);