2 * Copyright (C) 2009-2012, 2016 Cray, Inc.
4 * Copyright (c) 2013, 2015, Intel Corporation.
6 * Author: Nic Henke <nic@cray.com>
7 * Author: James Shimek <jshimek@cray.com>
9 * This file is part of Lustre, http://www.lustre.org.
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 /* this code liberated and modified from lnet/lnet/router_proc.c */
27 #define DEBUG_SUBSYSTEM S_LND
29 #include <linux/seq_file.h>
30 #include <lprocfs_status.h>
32 #define GNILND_PROC_STATS "stats"
33 #define GNILND_PROC_MDD "mdd"
34 #define GNILND_PROC_SMSG "smsg"
35 #define GNILND_PROC_CONN "conn"
36 #define GNILND_PROC_PEER_CONNS "peer_conns"
37 #define GNILND_PROC_PEER "peer"
38 #define GNILND_PROC_CKSUM_TEST "cksum_test"
41 _kgnilnd_proc_run_cksum_test(int caseno, int nloops, int nob)
43 struct bio_vec *src, *dest;
44 struct timespec begin, end, diff;
51 CFS_ALLOC_PTR_ARRAY(src, LNET_MAX_IOV);
52 CFS_ALLOC_PTR_ARRAY(dest, LNET_MAX_IOV);
54 if (src == NULL || dest == NULL) {
55 CERROR("couldn't allocate iovs\n");
56 GOTO(unwind, rc = -ENOMEM);
59 for (i = 0; i < LNET_MAX_IOV; i++) {
61 src[i].bv_len = PAGE_SIZE;
62 src[i].bv_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
64 if (src[i].bv_page == NULL) {
65 CERROR("couldn't allocate page %d\n", i);
66 GOTO(unwind, rc = -ENOMEM);
69 dest[i].bv_offset = 0;
70 dest[i].bv_len = PAGE_SIZE;
71 dest[i].bv_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
73 if (dest[i].bv_page == NULL) {
74 CERROR("couldn't allocate page %d\n", i);
75 GOTO(unwind, rc = -ENOMEM);
79 /* add extra 2 pages - one for offset of src, 2nd to allow dest offset */
80 niov = (nob / PAGE_SIZE) + 2;
81 if (niov > LNET_MAX_IOV) {
82 CERROR("bytes %d too large, requires niov %d > %d\n",
83 nob, niov, LNET_MAX_IOV);
84 GOTO(unwind, rc = -E2BIG);
88 src[0].bv_offset = 317;
89 dest[0].bv_offset = 592;
96 dest[0].bv_offset -= 1;
100 src[0].bv_offset += 1;
104 src[0].bv_offset += 1;
105 dest[0].bv_offset -= 1;
107 src[0].bv_len = PAGE_SIZE - src[0].bv_offset;
108 dest[0].bv_len = PAGE_SIZE - dest[0].bv_offset;
110 for (i = 0; i < niov; i++) {
111 memset(page_address(src[i].bv_page) + src[i].bv_offset,
112 0xf0 + i, src[i].bv_len);
115 lnet_copy_kiov2kiov(niov, dest, 0, niov, src, 0, nob);
117 getnstimeofday(&begin);
119 for (n = 0; n < nloops; n++) {
121 "case %d loop %d src %d dest %d nob %d niov %d\n",
122 caseno, n, src[0].bv_offset, dest[0].bv_offset, nob,
124 cksum = kgnilnd_cksum_kiov(niov, src, 0, nob - (n % nob), 1);
125 cksum2 = kgnilnd_cksum_kiov(niov, dest, 0, nob - (n % nob), 1);
127 if (cksum != cksum2) {
128 CERROR("case %d loop %d different checksums %x expected %x\n",
129 j, n, cksum2, cksum);
130 GOTO(unwind, rc = -ENOKEY);
134 getnstimeofday(&end);
136 mbytes = ((__u64)nloops * nob * 2) / (1024*1024);
138 diff = kgnilnd_ts_sub(end, begin);
140 LCONSOLE_INFO("running %lldMB took %ld.%ld seconds\n",
141 mbytes, diff.tv_sec, diff.tv_nsec);
144 CDEBUG(D_NET, "freeing %d pages\n", i);
145 for (i -= 1; i >= 0; i--) {
147 __free_page(src[i].bv_page);
150 __free_page(dest[i].bv_page);
154 CFS_FREE_PTR_ARRAY(src, LNET_MAX_IOV);
156 CFS_FREE_PTR_ARRAY(dest, LNET_MAX_IOV);
161 kgnilnd_proc_cksum_test_write(struct file *file, const char __user *ubuffer,
162 size_t count, loff_t *ppos)
164 char dummy[256 + 1] = { '\0' };
165 int testno, nloops, nbytes;
169 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
170 CERROR("can't run cksum test, kgnilnd is not initialized yet\n");
174 if (count >= sizeof(dummy) || count == 0)
177 if (copy_from_user(dummy, ubuffer, count))
180 if (sscanf(dummy, "%d:%d:%d", &testno, &nloops, &nbytes) == 3) {
181 rc = _kgnilnd_proc_run_cksum_test(testno, nloops, nbytes);
185 /* spurious, but lets us know the parse was ok */
193 kgnilnd_cksum_test_seq_open(struct inode *inode, struct file *file)
195 return single_open(file, NULL, PDE_DATA(inode));
198 static const struct file_operations kgn_cksum_test_fops = {
199 .owner = THIS_MODULE,
200 .open = kgnilnd_cksum_test_seq_open,
201 .write = kgnilnd_proc_cksum_test_write,
203 .release = seq_release,
207 kgnilnd_stats_seq_show(struct seq_file *sf, void *v)
212 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
213 seq_printf(sf, "kgnilnd is not initialized yet\n");
217 /* only do the first device */
218 dev = &kgnilnd_data.kgn_devices[0];
220 /* sampling is racy, but so is reading this file! */
222 do_gettimeofday(&now);
224 seq_printf(sf, "time: %lu.%lu\n"
233 "n_eager_allocs: %d\n"
234 "GART map bytes: %ld\n"
235 "TX queued maps: %d\n"
236 "TX phys nmaps: %d\n"
237 "TX phys bytes: %lu\n"
238 "RDMAQ bytes_auth: %ld\n"
239 "RDMAQ bytes_left: %ld\n"
240 "RDMAQ nstalls: %d\n"
241 "dev mutex delay: %ld\n"
243 "dev n_schedule: %d\n"
244 "SMSG fast_try: %d\n"
246 "SMSG fast_block: %d\n"
248 "SMSG tx_bytes: %lu\n"
250 "SMSG rx_bytes: %lu\n"
252 "RDMA tx_bytes: %lu\n"
254 "RDMA rx_bytes: %lu\n"
258 "RDMA REV length: %d\n"
259 "RDMA REV offset: %d\n"
260 "RDMA REV copy: %d\n",
261 now.tv_sec, now.tv_usec,
262 atomic_read(&kgnilnd_data.kgn_ntx),
263 atomic_read(&kgnilnd_data.kgn_npeers),
264 atomic_read(&kgnilnd_data.kgn_nconns),
265 atomic_read(&dev->gnd_neps),
266 atomic_read(&dev->gnd_ndgrams),
267 atomic_read(&dev->gnd_nfmablk),
268 atomic_read(&dev->gnd_n_mdd), atomic_read(&dev->gnd_n_mdd_held),
269 atomic_read(&kgnilnd_data.kgn_neager_allocs),
270 atomic64_read(&dev->gnd_nbytes_map),
271 atomic_read(&dev->gnd_nq_map),
272 dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE,
273 atomic64_read(&dev->gnd_rdmaq_bytes_out),
274 atomic64_read(&dev->gnd_rdmaq_bytes_ok),
275 atomic_read(&dev->gnd_rdmaq_nstalls),
276 dev->gnd_mutex_delay,
277 atomic_read(&dev->gnd_n_yield),
278 atomic_read(&dev->gnd_n_schedule),
279 atomic_read(&dev->gnd_fast_try),
280 atomic_read(&dev->gnd_fast_ok),
281 atomic_read(&dev->gnd_fast_block),
282 atomic_read(&dev->gnd_short_ntx),
283 atomic64_read(&dev->gnd_short_txbytes),
284 atomic_read(&dev->gnd_short_nrx),
285 atomic64_read(&dev->gnd_short_rxbytes),
286 atomic_read(&dev->gnd_rdma_ntx),
287 atomic64_read(&dev->gnd_rdma_txbytes),
288 atomic_read(&dev->gnd_rdma_nrx),
289 atomic64_read(&dev->gnd_rdma_rxbytes),
290 atomic_read(&kgnilnd_data.kgn_nvmap_short),
291 atomic_read(&kgnilnd_data.kgn_nvmap_cksum),
292 atomic_read(&kgnilnd_data.kgn_nkmap_short),
293 atomic_read(&kgnilnd_data.kgn_rev_length),
294 atomic_read(&kgnilnd_data.kgn_rev_offset),
295 atomic_read(&kgnilnd_data.kgn_rev_copy_buff));
301 kgnilnd_proc_stats_write(struct file *file, const char __user *ubuffer,
302 size_t count, loff_t *ppos)
306 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
307 CERROR("kgnilnd is not initialized for stats write\n");
311 /* only do the first device */
312 dev = &kgnilnd_data.kgn_devices[0];
314 atomic_set(&dev->gnd_short_ntx, 0);
315 atomic_set(&dev->gnd_short_nrx, 0);
316 atomic64_set(&dev->gnd_short_txbytes, 0);
317 atomic64_set(&dev->gnd_short_rxbytes, 0);
318 atomic_set(&dev->gnd_rdma_ntx, 0);
319 atomic_set(&dev->gnd_rdma_nrx, 0);
320 atomic_set(&dev->gnd_fast_ok, 0);
321 atomic_set(&dev->gnd_fast_try, 0);
322 atomic_set(&dev->gnd_fast_block, 0);
323 atomic64_set(&dev->gnd_rdma_txbytes, 0);
324 atomic64_set(&dev->gnd_rdma_rxbytes, 0);
325 atomic_set(&dev->gnd_rdmaq_nstalls, 0);
326 set_mb(dev->gnd_mutex_delay, 0);
327 atomic_set(&dev->gnd_n_yield, 0);
328 atomic_set(&dev->gnd_n_schedule, 0);
329 atomic_set(&kgnilnd_data.kgn_nvmap_short, 0);
330 atomic_set(&kgnilnd_data.kgn_nvmap_cksum, 0);
331 atomic_set(&kgnilnd_data.kgn_nkmap_short, 0);
332 /* sampling is racy, but so is writing this file! */
338 kgnilnd_stats_seq_open(struct inode *inode, struct file *file)
340 return single_open(file, kgnilnd_stats_seq_show, PDE_DATA(inode));
343 static const struct file_operations kgn_stats_fops = {
344 .owner = THIS_MODULE,
345 .open = kgnilnd_stats_seq_open,
347 .write = kgnilnd_proc_stats_write,
349 .release = seq_release,
353 kgn_device_t *gmdd_dev;
356 } kgn_mdd_seq_iter_t;
359 kgnilnd_mdd_seq_seek(kgn_mdd_seq_iter_t *gseq, loff_t off)
367 gseq->gmdd_tx = NULL;
374 if (tx == NULL || gseq->gmdd_off > off) {
375 /* search from start */
376 r = gseq->gmdd_dev->gnd_map_list.next;
379 /* continue current search */
380 r = &tx->tx_map_list;
381 here = gseq->gmdd_off;
384 gseq->gmdd_off = off;
386 while (r != &gseq->gmdd_dev->gnd_map_list) {
389 t = list_entry(r, kgn_tx_t, tx_map_list);
400 gseq->gmdd_tx = NULL;
407 kgnilnd_mdd_seq_start(struct seq_file *s, loff_t *pos)
410 kgn_mdd_seq_iter_t *gseq;
413 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
417 LIBCFS_ALLOC(gseq, sizeof(*gseq));
419 CERROR("could not allocate mdd sequence iterator\n");
423 /* only doing device 0 for now */
424 gseq->gmdd_dev = &kgnilnd_data.kgn_devices[0];
425 gseq->gmdd_tx = NULL;
427 /* need to lock map while we poke - huge disturbance
428 * but without it, no way to get the data printed */
429 spin_lock(&gseq->gmdd_dev->gnd_map_lock);
431 /* set private to gseq for stop */
434 rc = kgnilnd_mdd_seq_seek(gseq, *pos);
442 kgnilnd_mdd_seq_stop(struct seq_file *s, void *iter)
444 kgn_mdd_seq_iter_t *gseq = s->private;
447 spin_unlock(&gseq->gmdd_dev->gnd_map_lock);
448 LIBCFS_FREE(gseq, sizeof(*gseq));
453 kgnilnd_mdd_seq_next(struct seq_file *s, void *iter, loff_t *pos)
455 kgn_mdd_seq_iter_t *gseq = iter;
457 loff_t next = *pos + 1;
459 rc = kgnilnd_mdd_seq_seek(gseq, next);
468 kgnilnd_mdd_seq_show(struct seq_file *s, void *iter)
470 kgn_mdd_seq_iter_t *gseq = iter;
476 gni_mem_handle_t hndl;
478 if (gseq->gmdd_off == 0) {
479 seq_printf(s, "%s %22s %16s %8s %8s %37s\n",
480 "tx", "tx_id", "nob", "physnop",
481 "buftype", "mem handle");
488 id = tx->tx_id.txe_smsg_id;
490 physnop = tx->tx_phys_npages;
491 buftype = tx->tx_buftype;
492 hndl.qword1 = tx->tx_map_key.qword1;
493 hndl.qword2 = tx->tx_map_key.qword2;
495 seq_printf(s, "%p %x %16llu %8d %#8x %#llx.%#llxx\n",
496 tx, id, nob, physnop, buftype,
497 hndl.qword1, hndl.qword2);
502 static struct seq_operations kgn_mdd_sops = {
503 .start = kgnilnd_mdd_seq_start,
504 .stop = kgnilnd_mdd_seq_stop,
505 .next = kgnilnd_mdd_seq_next,
506 .show = kgnilnd_mdd_seq_show,
511 kgnilnd_mdd_seq_open(struct inode *inode, struct file *file)
516 rc = seq_open(file, &kgn_mdd_sops);
518 sf = file->private_data;
520 /* NULL means we've not yet open() */
526 static struct file_operations kgn_mdd_fops = {
527 .owner = THIS_MODULE,
528 .open = kgnilnd_mdd_seq_open,
531 .release = seq_release,
536 kgn_device_t *gsmsg_dev;
537 kgn_fma_memblock_t *gsmsg_fmablk;
539 } kgn_smsg_seq_iter_t;
542 kgnilnd_smsg_seq_seek(kgn_smsg_seq_iter_t *gseq, loff_t off)
544 kgn_fma_memblock_t *fmablk;
550 /* offset 0 is the header, so we start real entries at
551 * here == off == 1 */
553 gseq->gsmsg_fmablk = NULL;
558 fmablk = gseq->gsmsg_fmablk;
559 dev = gseq->gsmsg_dev;
561 spin_lock(&dev->gnd_fmablk_lock);
563 if (fmablk != NULL &&
564 gseq->gsmsg_version != atomic_read(&dev->gnd_fmablk_vers)) {
570 if (fmablk == NULL || gseq->gsmsg_off > off) {
571 /* search from start */
572 r = dev->gnd_fma_buffs.next;
575 /* continue current search */
576 r = &fmablk->gnm_bufflist;
577 here = gseq->gsmsg_off;
580 gseq->gsmsg_version = atomic_read(&dev->gnd_fmablk_vers);
581 gseq->gsmsg_off = off;
583 while (r != &dev->gnd_fma_buffs) {
584 kgn_fma_memblock_t *t;
586 t = list_entry(r, kgn_fma_memblock_t, gnm_bufflist);
589 gseq->gsmsg_fmablk = t;
597 gseq->gsmsg_fmablk = NULL;
600 spin_unlock(&dev->gnd_fmablk_lock);
605 kgnilnd_smsg_seq_start(struct seq_file *s, loff_t *pos)
608 kgn_smsg_seq_iter_t *gseq;
611 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
615 LIBCFS_ALLOC(gseq, sizeof(*gseq));
617 CERROR("could not allocate smsg sequence iterator\n");
621 /* only doing device 0 for now */
622 gseq->gsmsg_dev = &kgnilnd_data.kgn_devices[0];
623 gseq->gsmsg_fmablk = NULL;
624 rc = kgnilnd_smsg_seq_seek(gseq, *pos);
628 LIBCFS_FREE(gseq, sizeof(*gseq));
633 kgnilnd_smsg_seq_stop(struct seq_file *s, void *iter)
635 kgn_smsg_seq_iter_t *gseq = iter;
638 LIBCFS_FREE(gseq, sizeof(*gseq));
642 kgnilnd_smsg_seq_next(struct seq_file *s, void *iter, loff_t *pos)
644 kgn_smsg_seq_iter_t *gseq = iter;
646 loff_t next = *pos + 1;
648 rc = kgnilnd_smsg_seq_seek(gseq, next);
650 LIBCFS_FREE(gseq, sizeof(*gseq));
658 kgnilnd_smsg_seq_show(struct seq_file *s, void *iter)
660 kgn_smsg_seq_iter_t *gseq = iter;
661 kgn_fma_memblock_t *fmablk;
663 int avail_mboxs, held_mboxs, num_mboxs;
664 unsigned int blk_size;
666 kgn_fmablk_state_t state;
667 gni_mem_handle_t hndl;
669 if (gseq->gsmsg_off == 0) {
670 seq_printf(s, "%5s %4s %6s/%5s/%5s %9s %18s %37s\n",
671 "blk#", "type", "avail", "held", "total", "size",
672 "fmablk", "mem handle");
676 fmablk = gseq->gsmsg_fmablk;
677 dev = gseq->gsmsg_dev;
678 LASSERT(fmablk != NULL);
680 spin_lock(&dev->gnd_fmablk_lock);
682 if (gseq->gsmsg_version != atomic_read(&dev->gnd_fmablk_vers)) {
684 spin_unlock(&dev->gnd_fmablk_lock);
688 live = fmablk->gnm_hold_timeout == 0;
689 /* none are available if it isn't live... */
690 avail_mboxs = live ? fmablk->gnm_avail_mboxs : 0;
691 held_mboxs = fmablk->gnm_held_mboxs;
692 num_mboxs = fmablk->gnm_num_mboxs;
693 blk_size = fmablk->gnm_blk_size;
694 state = fmablk->gnm_state;
695 hndl.qword1 = fmablk->gnm_hndl.qword1;
696 hndl.qword2 = fmablk->gnm_hndl.qword2;
698 spin_unlock(&dev->gnd_fmablk_lock);
701 seq_printf(s, "%5d %4s %6d/%5d/%5d %9d %18p %#llx.%#llx\n",
702 (int) gseq->gsmsg_off, kgnilnd_fmablk_state2str(state),
703 avail_mboxs, held_mboxs, num_mboxs, blk_size,
704 fmablk, hndl.qword1, hndl.qword2);
706 seq_printf(s, "%5d %4s %6d/%5d/%5d %9d %18p %37s\n",
707 (int) gseq->gsmsg_off, kgnilnd_fmablk_state2str(state),
708 avail_mboxs, held_mboxs, num_mboxs, blk_size,
709 fmablk, "PURGATORY.HOLD");
715 static struct seq_operations kgn_smsg_sops = {
716 .start = kgnilnd_smsg_seq_start,
717 .stop = kgnilnd_smsg_seq_stop,
718 .next = kgnilnd_smsg_seq_next,
719 .show = kgnilnd_smsg_seq_show,
724 kgnilnd_smsg_seq_open(struct inode *inode, struct file *file)
729 rc = seq_open(file, &kgn_smsg_sops);
731 sf = file->private_data;
732 sf->private = PDE_DATA(inode);
738 static struct file_operations kgn_smsg_fops = {
739 .owner = THIS_MODULE,
740 .open = kgnilnd_smsg_seq_open,
743 .release = seq_release,
748 struct list_head *gconn_list;
749 kgn_conn_t *gconn_conn;
752 } kgn_conn_seq_iter_t;
755 kgnilnd_conn_seq_seek(kgn_conn_seq_iter_t *gseq, loff_t off)
757 struct list_head *list, *tmp;
762 gseq->gconn_hashidx = 0;
763 gseq->gconn_list = NULL;
766 if (off > atomic_read(&kgnilnd_data.kgn_nconns)) {
767 gseq->gconn_list = NULL;
771 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
772 if (gseq->gconn_list != NULL &&
773 gseq->gconn_version != kgnilnd_data.kgn_conn_version) {
779 if ((gseq->gconn_list == NULL) ||
780 (gseq->gconn_off > off) ||
781 (gseq->gconn_hashidx >= *kgnilnd_tunables.kgn_peer_hash_size)) {
782 /* search from start */
783 gseq->gconn_hashidx = 0;
784 list = &kgnilnd_data.kgn_conns[gseq->gconn_hashidx];
787 /* continue current search */
788 list = gseq->gconn_list;
791 gseq->gconn_version = kgnilnd_data.kgn_conn_version;
792 gseq->gconn_off = off;
796 list_for_each(tmp, list) {
799 conn = list_entry(tmp, kgn_conn_t, gnc_hashlist);
800 gseq->gconn_conn = conn;
806 /* if we got through this hash bucket with 'off' still to go, try next*/
807 gseq->gconn_hashidx++;
809 (gseq->gconn_hashidx < *kgnilnd_tunables.kgn_peer_hash_size)) {
810 list = &kgnilnd_data.kgn_conns[gseq->gconn_hashidx];
814 gseq->gconn_list = NULL;
817 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
822 kgnilnd_conn_seq_start(struct seq_file *s, loff_t *pos)
825 kgn_conn_seq_iter_t *gseq;
828 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
832 LIBCFS_ALLOC(gseq, sizeof(*gseq));
834 CERROR("could not allocate conn sequence iterator\n");
838 /* only doing device 0 for now */
839 gseq->gconn_list = NULL;
840 rc = kgnilnd_conn_seq_seek(gseq, *pos);
844 LIBCFS_FREE(gseq, sizeof(*gseq));
849 kgnilnd_conn_seq_stop(struct seq_file *s, void *iter)
851 kgn_conn_seq_iter_t *gseq = iter;
854 LIBCFS_FREE(gseq, sizeof(*gseq));
858 kgnilnd_conn_seq_next(struct seq_file *s, void *iter, loff_t *pos)
860 kgn_conn_seq_iter_t *gseq = iter;
862 loff_t next = *pos + 1;
864 rc = kgnilnd_conn_seq_seek(gseq, next);
866 LIBCFS_FREE(gseq, sizeof(*gseq));
874 kgnilnd_conn_seq_show(struct seq_file *s, void *iter)
876 kgn_conn_seq_iter_t *gseq = iter;
877 kgn_peer_t *peer = NULL;
880 /* there is no header data for conns, so offset 0 is the first
883 conn = gseq->gconn_conn;
884 LASSERT(conn != NULL);
886 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
887 if (gseq->gconn_list != NULL &&
888 gseq->gconn_version != kgnilnd_data.kgn_conn_version) {
890 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
894 /* instead of saving off the data, just refcount */
895 kgnilnd_conn_addref(conn);
896 if (conn->gnc_peer) {
897 /* don't use link - after unlock it could get nuked */
898 peer = conn->gnc_peer;
899 kgnilnd_peer_addref(peer);
902 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
904 seq_printf(s, "%p->%s [%d] q %d/%d/%d "
905 "tx sq %u %dms/%dms "
906 "rx sq %u %dms/%dms "
907 "noop r/s %d/%d w/s/cq %lds/%lds/%lds "
908 "sched a/d %lds/%lds "
909 "tx_re %lld TO %ds %s\n",
910 conn, peer ? libcfs_nid2str(peer->gnp_nid) : "<?>",
911 atomic_read(&conn->gnc_refcount),
912 kgnilnd_count_list(&conn->gnc_fmaq),
913 atomic_read(&conn->gnc_nlive_fma),
914 atomic_read(&conn->gnc_nlive_rdma),
915 atomic_read(&conn->gnc_tx_seq),
916 jiffies_to_msecs(jiffies - conn->gnc_last_tx),
917 jiffies_to_msecs(jiffies - conn->gnc_last_tx_cq),
918 atomic_read(&conn->gnc_rx_seq),
919 jiffies_to_msecs(jiffies - conn->gnc_last_rx),
920 jiffies_to_msecs(jiffies - conn->gnc_last_rx_cq),
921 atomic_read(&conn->gnc_reaper_noop),
922 atomic_read(&conn->gnc_sched_noop),
923 cfs_duration_sec(jiffies - conn->gnc_last_noop_want),
924 cfs_duration_sec(jiffies - conn->gnc_last_noop_sent),
925 cfs_duration_sec(jiffies - conn->gnc_last_noop_cq),
926 cfs_duration_sec(jiffies - conn->gnc_last_sched_ask),
927 cfs_duration_sec(jiffies - conn->gnc_last_sched_do),
928 conn->gnc_tx_retrans, conn->gnc_timeout,
929 kgnilnd_conn_state2str(conn));
932 kgnilnd_peer_decref(peer);
933 kgnilnd_conn_decref(conn);
938 static struct seq_operations kgn_conn_sops = {
939 .start = kgnilnd_conn_seq_start,
940 .stop = kgnilnd_conn_seq_stop,
941 .next = kgnilnd_conn_seq_next,
942 .show = kgnilnd_conn_seq_show,
946 #define KGN_DEBUG_PEER_NID_DEFAULT -1
947 static int kgnilnd_debug_peer_nid = KGN_DEBUG_PEER_NID_DEFAULT;
950 kgnilnd_proc_peer_conns_write(struct file *file, const char __user *ubuffer,
951 size_t count, loff_t *ppos)
956 if (count >= sizeof(dummy) || count == 0)
959 if (copy_from_user(dummy, ubuffer, count))
962 rc = sscanf(dummy, "%d", &kgnilnd_debug_peer_nid);
971 /* debug data to print from conns associated with peer nid
974 - mbox_addr (msg_buffer + mbox_offset)
993 kgnilnd_proc_peer_conns_seq_show(struct seq_file *sf, void *v)
1001 if (kgnilnd_debug_peer_nid == KGN_DEBUG_PEER_NID_DEFAULT) {
1002 seq_printf(sf, "peer_conns not initialized\n");
1006 /* sample date/time stamp - print time in UTC
1007 * 2012-12-11T16:06:16.966751 123@gni ...
1009 getnstimeofday(&now);
1010 time_to_tm(now.tv_sec, 0, &ctm);
1013 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1014 peer = kgnilnd_find_peer_locked(kgnilnd_debug_peer_nid);
1017 seq_printf(sf, "peer not found for this nid %d\n",
1018 kgnilnd_debug_peer_nid);
1019 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1023 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1025 "%04ld-%02d-%02dT%02d:%02d:%02d.%06ld %s "
1031 "err %d peer err %d "
1032 "tx sq %u %dms/%dms "
1033 "rx sq %u %dms/%dms/%dms "
1035 ctm.tm_year+1900, ctm.tm_mon+1, ctm.tm_mday,
1036 ctm.tm_hour, ctm.tm_min, ctm.tm_sec, now.tv_nsec,
1037 libcfs_nid2str(peer->gnp_nid),
1038 conn->remote_mbox_addr,
1039 kgnilnd_conn_dgram_type2str(conn->gnc_dgram_type),
1040 kgnilnd_conn_state2str(conn),
1041 conn->gnc_in_purgatory,
1042 conn->gnc_close_sent,
1043 conn->gnc_close_recvd,
1045 conn->gnc_peer_error,
1046 atomic_read(&conn->gnc_tx_seq),
1047 jiffies_to_msecs(jifs - conn->gnc_last_tx),
1048 jiffies_to_msecs(jifs - conn->gnc_last_tx_cq),
1049 atomic_read(&conn->gnc_rx_seq),
1050 jiffies_to_msecs(jifs - conn->gnc_first_rx),
1051 jiffies_to_msecs(jifs - conn->gnc_last_rx),
1052 jiffies_to_msecs(jifs - conn->gnc_last_rx_cq),
1053 conn->gnc_tx_retrans);
1056 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1061 kgnilnd_peer_conns_seq_open(struct inode *inode, struct file *file)
1063 return single_open(file, kgnilnd_proc_peer_conns_seq_show,
1067 static const struct file_operations kgn_peer_conns_fops = {
1068 .owner = THIS_MODULE,
1069 .open = kgnilnd_peer_conns_seq_open,
1071 .write = kgnilnd_proc_peer_conns_write,
1072 .llseek = seq_lseek,
1073 .release = seq_release,
1077 kgnilnd_conn_seq_open(struct inode *inode, struct file *file)
1079 struct seq_file *sf;
1082 rc = seq_open(file, &kgn_conn_sops);
1084 sf = file->private_data;
1085 sf->private = PDE_DATA(inode);
1091 static struct file_operations kgn_conn_fops = {
1092 .owner = THIS_MODULE,
1093 .open = kgnilnd_conn_seq_open,
1095 .llseek = seq_lseek,
1096 .release = seq_release,
1100 __u64 gpeer_version;
1101 struct list_head *gpeer_list;
1102 kgn_peer_t *gpeer_peer;
1105 } kgn_peer_seq_iter_t;
1108 kgnilnd_peer_seq_seek(kgn_peer_seq_iter_t *gseq, loff_t off)
1110 struct list_head *list, *tmp;
1115 gseq->gpeer_hashidx = 0;
1116 gseq->gpeer_list = NULL;
1119 if (off > atomic_read(&kgnilnd_data.kgn_npeers)) {
1120 gseq->gpeer_list = NULL;
1124 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1125 if (gseq->gpeer_list != NULL &&
1126 gseq->gpeer_version != kgnilnd_data.kgn_peer_version) {
1132 if ((gseq->gpeer_list == NULL) ||
1133 (gseq->gpeer_off > off) ||
1134 (gseq->gpeer_hashidx >= *kgnilnd_tunables.kgn_peer_hash_size)) {
1135 /* search from start */
1136 gseq->gpeer_hashidx = 0;
1137 list = &kgnilnd_data.kgn_peers[gseq->gpeer_hashidx];
1140 /* continue current search */
1141 list = gseq->gpeer_list;
1144 gseq->gpeer_version = kgnilnd_data.kgn_peer_version;
1145 gseq->gpeer_off = off;
1149 list_for_each(tmp, list) {
1152 peer = list_entry(tmp, kgn_peer_t, gnp_list);
1153 gseq->gpeer_peer = peer;
1159 /* if we got through this hash bucket with 'off' still to go, try next*/
1160 gseq->gpeer_hashidx++;
1161 if ((here <= off) &&
1162 (gseq->gpeer_hashidx < *kgnilnd_tunables.kgn_peer_hash_size)) {
1163 list = &kgnilnd_data.kgn_peers[gseq->gpeer_hashidx];
1167 gseq->gpeer_list = NULL;
1170 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1175 kgnilnd_peer_seq_start(struct seq_file *s, loff_t *pos)
1178 kgn_peer_seq_iter_t *gseq;
1181 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
1185 LIBCFS_ALLOC(gseq, sizeof(*gseq));
1187 CERROR("could not allocate peer sequence iterator\n");
1191 /* only doing device 0 for now */
1192 gseq->gpeer_list = NULL;
1193 rc = kgnilnd_peer_seq_seek(gseq, *pos);
1197 LIBCFS_FREE(gseq, sizeof(*gseq));
1202 kgnilnd_peer_seq_stop(struct seq_file *s, void *iter)
1204 kgn_peer_seq_iter_t *gseq = iter;
1207 LIBCFS_FREE(gseq, sizeof(*gseq));
1211 kgnilnd_peer_seq_next(struct seq_file *s, void *iter, loff_t *pos)
1213 kgn_peer_seq_iter_t *gseq = iter;
1215 loff_t next = *pos + 1;
1217 rc = kgnilnd_peer_seq_seek(gseq, next);
1219 LIBCFS_FREE(gseq, sizeof(*gseq));
1227 kgnilnd_peer_seq_show(struct seq_file *s, void *iter)
1229 kgn_peer_seq_iter_t *gseq = iter;
1234 /* there is no header data for peers, so offset 0 is the first
1237 peer = gseq->gpeer_peer;
1238 LASSERT(peer != NULL);
1240 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1241 if (gseq->gpeer_list != NULL &&
1242 gseq->gpeer_version != kgnilnd_data.kgn_peer_version) {
1244 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1248 /* instead of saving off the data, just refcount */
1249 kgnilnd_peer_addref(peer);
1250 conn = kgnilnd_find_conn_locked(peer);
1252 if (peer->gnp_connecting) {
1254 } else if (conn != NULL) {
1260 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1261 if (conn->gnc_in_purgatory) {
1266 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1268 seq_printf(s, "%p->%s [%d] %s NIC 0x%x q %d conn %c purg %d last %d@%lldms dgram %d@%dms reconn %dms to %lus \n",
1269 peer, libcfs_nid2str(peer->gnp_nid),
1270 atomic_read(&peer->gnp_refcount),
1271 (peer->gnp_state == GNILND_PEER_DOWN) ? "down" :
1272 peer->gnp_state == GNILND_PEER_TIMED_OUT ? "timedout" : "up",
1274 kgnilnd_count_list(&peer->gnp_tx_queue),
1277 peer->gnp_last_errno,
1278 (ktime_get_seconds() - peer->gnp_last_alive) * MSEC_PER_SEC,
1279 peer->gnp_last_dgram_errno,
1280 jiffies_to_msecs(jiffies - peer->gnp_last_dgram_time),
1281 peer->gnp_reconnect_interval != 0
1282 ? jiffies_to_msecs(jiffies - peer->gnp_reconnect_time)
1284 peer->gnp_reconnect_interval);
1286 kgnilnd_peer_decref(peer);
1291 static struct seq_operations kgn_peer_sops = {
1292 .start = kgnilnd_peer_seq_start,
1293 .stop = kgnilnd_peer_seq_stop,
1294 .next = kgnilnd_peer_seq_next,
1295 .show = kgnilnd_peer_seq_show,
1299 kgnilnd_peer_seq_open(struct inode *inode, struct file *file)
1301 struct seq_file *sf;
1304 rc = seq_open(file, &kgn_peer_sops);
1306 sf = file->private_data;
1307 sf->private = PDE_DATA(inode);
1313 static struct file_operations kgn_peer_fops = {
1314 .owner = THIS_MODULE,
1315 .open = kgnilnd_peer_seq_open,
1317 .llseek = seq_lseek,
1318 .release = seq_release,
1321 static struct proc_dir_entry *kgn_proc_root;
1324 kgnilnd_proc_init(void)
1326 struct proc_dir_entry *pde;
1331 kgn_proc_root = proc_mkdir(libcfs_lnd2modname(GNILND), NULL);
1332 if (kgn_proc_root == NULL) {
1333 CERROR("couldn't create proc dir %s\n",
1334 libcfs_lnd2modname(GNILND));
1338 /* Initialize CKSUM_TEST */
1339 pde = proc_create(GNILND_PROC_CKSUM_TEST, 0200, kgn_proc_root,
1340 &kgn_cksum_test_fops);
1342 CERROR("couldn't create proc entry %s\n", GNILND_PROC_CKSUM_TEST);
1343 GOTO(remove_dir, rc = -ENOENT);
1346 /* Initialize STATS */
1347 pde = proc_create(GNILND_PROC_STATS, 0644, kgn_proc_root,
1350 CERROR("couldn't create proc entry %s\n", GNILND_PROC_STATS);
1351 GOTO(remove_test, rc = -ENOENT);
1354 /* Initialize MDD */
1355 pde = proc_create(GNILND_PROC_MDD, 0444, kgn_proc_root, &kgn_mdd_fops);
1357 CERROR("couldn't create proc entry %s\n", GNILND_PROC_MDD);
1358 GOTO(remove_stats, rc = -ENOENT);
1361 /* Initialize SMSG */
1362 pde = proc_create(GNILND_PROC_SMSG, 0444, kgn_proc_root,
1365 CERROR("couldn't create proc entry %s\n", GNILND_PROC_SMSG);
1366 GOTO(remove_mdd, rc = -ENOENT);
1369 /* Initialize CONN */
1370 pde = proc_create(GNILND_PROC_CONN, 0444, kgn_proc_root,
1373 CERROR("couldn't create proc entry %s\n", GNILND_PROC_CONN);
1374 GOTO(remove_smsg, rc = -ENOENT);
1377 /* Initialize peer conns debug */
1378 pde = proc_create(GNILND_PROC_PEER_CONNS, 0644, kgn_proc_root,
1379 &kgn_peer_conns_fops);
1381 CERROR("couldn't create proc entry %s\n", GNILND_PROC_PEER_CONNS);
1382 GOTO(remove_conn, rc = -ENOENT);
1385 /* Initialize PEER */
1386 pde = proc_create(GNILND_PROC_PEER, 0444, kgn_proc_root,
1389 CERROR("couldn't create proc entry %s\n", GNILND_PROC_PEER);
1390 GOTO(remove_pc, rc = -ENOENT);
1395 remove_proc_entry(GNILND_PROC_PEER_CONNS, kgn_proc_root);
1397 remove_proc_entry(GNILND_PROC_CONN, kgn_proc_root);
1399 remove_proc_entry(GNILND_PROC_SMSG, kgn_proc_root);
1401 remove_proc_entry(GNILND_PROC_MDD, kgn_proc_root);
1403 remove_proc_entry(GNILND_PROC_STATS, kgn_proc_root);
1405 remove_proc_entry(GNILND_PROC_CKSUM_TEST, kgn_proc_root);
1407 remove_proc_entry(libcfs_lnd2modname(GNILND), NULL);
1413 kgnilnd_proc_fini(void)
1415 remove_proc_entry(GNILND_PROC_PEER_CONNS, kgn_proc_root);
1416 remove_proc_entry(GNILND_PROC_PEER, kgn_proc_root);
1417 remove_proc_entry(GNILND_PROC_CONN, kgn_proc_root);
1418 remove_proc_entry(GNILND_PROC_MDD, kgn_proc_root);
1419 remove_proc_entry(GNILND_PROC_SMSG, kgn_proc_root);
1420 remove_proc_entry(GNILND_PROC_STATS, kgn_proc_root);
1421 remove_proc_entry(GNILND_PROC_CKSUM_TEST, kgn_proc_root);
1422 remove_proc_entry(libcfs_lnd2modname(GNILND), NULL);