2 * Copyright (C) 2009-2012, 2016 Cray, Inc.
4 * Copyright (c) 2013, 2015, Intel Corporation.
6 * Author: Nic Henke <nic@cray.com>
7 * Author: James Shimek <jshimek@cray.com>
9 * This file is part of Lustre, http://www.lustre.org.
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 /* this code liberated and modified from lnet/lnet/router_proc.c */
27 #define DEBUG_SUBSYSTEM S_LND
29 #include <linux/seq_file.h>
30 #include <lprocfs_status.h>
32 #define GNILND_PROC_STATS "stats"
33 #define GNILND_PROC_MDD "mdd"
34 #define GNILND_PROC_SMSG "smsg"
35 #define GNILND_PROC_CONN "conn"
36 #define GNILND_PROC_PEER_CONNS "peer_conns"
37 #define GNILND_PROC_PEER "peer"
38 #define GNILND_PROC_CKSUM_TEST "cksum_test"
41 _kgnilnd_proc_run_cksum_test(int caseno, int nloops, int nob)
43 struct bio_vec *src, *dest;
44 struct timespec begin, end, diff;
51 CFS_ALLOC_PTR_ARRAY(src, LNET_MAX_IOV);
52 CFS_ALLOC_PTR_ARRAY(dest, LNET_MAX_IOV);
54 if (src == NULL || dest == NULL) {
55 CERROR("couldn't allocate iovs\n");
56 GOTO(unwind, rc = -ENOMEM);
59 for (i = 0; i < LNET_MAX_IOV; i++) {
61 src[i].bv_len = PAGE_SIZE;
62 src[i].bv_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
64 if (src[i].bv_page == NULL) {
65 CERROR("couldn't allocate page %d\n", i);
66 GOTO(unwind, rc = -ENOMEM);
69 dest[i].bv_offset = 0;
70 dest[i].bv_len = PAGE_SIZE;
71 dest[i].bv_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
73 if (dest[i].bv_page == NULL) {
74 CERROR("couldn't allocate page %d\n", i);
75 GOTO(unwind, rc = -ENOMEM);
79 /* add extra 2 pages - one for offset of src, 2nd to allow dest offset */
80 niov = (nob / PAGE_SIZE) + 2;
81 if (niov > LNET_MAX_IOV) {
82 CERROR("bytes %d too large, requires niov %d > %d\n",
83 nob, niov, LNET_MAX_IOV);
84 GOTO(unwind, rc = -E2BIG);
88 src[0].bv_offset = 317;
89 dest[0].bv_offset = 592;
96 dest[0].bv_offset -= 1;
100 src[0].bv_offset += 1;
104 src[0].bv_offset += 1;
105 dest[0].bv_offset -= 1;
107 src[0].bv_len = PAGE_SIZE - src[0].bv_offset;
108 dest[0].bv_len = PAGE_SIZE - dest[0].bv_offset;
110 for (i = 0; i < niov; i++) {
111 memset(page_address(src[i].bv_page) + src[i].bv_offset,
112 0xf0 + i, src[i].bv_len);
115 lnet_copy_kiov2kiov(niov, dest, 0, niov, src, 0, nob);
117 getnstimeofday(&begin);
119 for (n = 0; n < nloops; n++) {
121 "case %d loop %d src %d dest %d nob %d niov %d\n",
122 caseno, n, src[0].bv_offset, dest[0].bv_offset, nob,
124 cksum = kgnilnd_cksum_kiov(niov, src, 0, nob - (n % nob), 1);
125 cksum2 = kgnilnd_cksum_kiov(niov, dest, 0, nob - (n % nob), 1);
127 if (cksum != cksum2) {
128 CERROR("case %d loop %d different checksums %x expected %x\n",
129 j, n, cksum2, cksum);
130 GOTO(unwind, rc = -ENOKEY);
134 getnstimeofday(&end);
136 mbytes = ((__u64)nloops * nob * 2) / (1024*1024);
138 diff = kgnilnd_ts_sub(end, begin);
140 LCONSOLE_INFO("running %lldMB took %ld.%ld seconds\n",
141 mbytes, diff.tv_sec, diff.tv_nsec);
144 CDEBUG(D_NET, "freeing %d pages\n", i);
145 for (i -= 1; i >= 0; i--) {
147 __free_page(src[i].bv_page);
150 __free_page(dest[i].bv_page);
154 CFS_FREE_PTR_ARRAY(src, LNET_MAX_IOV);
156 CFS_FREE_PTR_ARRAY(dest, LNET_MAX_IOV);
161 kgnilnd_proc_cksum_test_write(struct file *file, const char __user *ubuffer,
162 size_t count, loff_t *ppos)
164 char dummy[256 + 1] = { '\0' };
165 int testno, nloops, nbytes;
169 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
170 CERROR("can't run cksum test, kgnilnd is not initialized yet\n");
174 if (count >= sizeof(dummy) || count == 0)
177 if (copy_from_user(dummy, ubuffer, count))
180 if (sscanf(dummy, "%d:%d:%d", &testno, &nloops, &nbytes) == 3) {
181 rc = _kgnilnd_proc_run_cksum_test(testno, nloops, nbytes);
185 /* spurious, but lets us know the parse was ok */
193 kgnilnd_cksum_test_seq_open(struct inode *inode, struct file *file)
195 return single_open(file, NULL, PDE_DATA(inode));
198 static const struct file_operations kgn_cksum_test_fops = {
199 .owner = THIS_MODULE,
200 .open = kgnilnd_cksum_test_seq_open,
201 .write = kgnilnd_proc_cksum_test_write,
203 .release = seq_release,
207 kgnilnd_stats_seq_show(struct seq_file *sf, void *v)
212 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
213 seq_printf(sf, "kgnilnd is not initialized yet\n");
217 /* only do the first device */
218 dev = &kgnilnd_data.kgn_devices[0];
220 /* sampling is racy, but so is reading this file! */
222 do_gettimeofday(&now);
224 seq_printf(sf, "time: %lu.%lu\n"
233 "n_eager_allocs: %d\n"
234 "GART map bytes: %ld\n"
235 "TX queued maps: %d\n"
236 "TX phys nmaps: %d\n"
237 "TX phys bytes: %lu\n"
238 "TX virt nmaps: %d\n"
239 "TX virt bytes: %llu\n"
240 "RDMAQ bytes_auth: %ld\n"
241 "RDMAQ bytes_left: %ld\n"
242 "RDMAQ nstalls: %d\n"
243 "dev mutex delay: %ld\n"
245 "dev n_schedule: %d\n"
246 "SMSG fast_try: %d\n"
248 "SMSG fast_block: %d\n"
250 "SMSG tx_bytes: %lu\n"
252 "SMSG rx_bytes: %lu\n"
254 "RDMA tx_bytes: %lu\n"
256 "RDMA rx_bytes: %lu\n"
260 "RDMA REV length: %d\n"
261 "RDMA REV offset: %d\n"
262 "RDMA REV copy: %d\n",
263 now.tv_sec, now.tv_usec,
264 atomic_read(&kgnilnd_data.kgn_ntx),
265 atomic_read(&kgnilnd_data.kgn_npeers),
266 atomic_read(&kgnilnd_data.kgn_nconns),
267 atomic_read(&dev->gnd_neps),
268 atomic_read(&dev->gnd_ndgrams),
269 atomic_read(&dev->gnd_nfmablk),
270 atomic_read(&dev->gnd_n_mdd), atomic_read(&dev->gnd_n_mdd_held),
271 atomic_read(&kgnilnd_data.kgn_neager_allocs),
272 atomic64_read(&dev->gnd_nbytes_map),
273 atomic_read(&dev->gnd_nq_map),
274 dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE,
275 dev->gnd_map_nvirt, dev->gnd_map_virtnob,
276 atomic64_read(&dev->gnd_rdmaq_bytes_out),
277 atomic64_read(&dev->gnd_rdmaq_bytes_ok),
278 atomic_read(&dev->gnd_rdmaq_nstalls),
279 dev->gnd_mutex_delay,
280 atomic_read(&dev->gnd_n_yield),
281 atomic_read(&dev->gnd_n_schedule),
282 atomic_read(&dev->gnd_fast_try),
283 atomic_read(&dev->gnd_fast_ok),
284 atomic_read(&dev->gnd_fast_block),
285 atomic_read(&dev->gnd_short_ntx),
286 atomic64_read(&dev->gnd_short_txbytes),
287 atomic_read(&dev->gnd_short_nrx),
288 atomic64_read(&dev->gnd_short_rxbytes),
289 atomic_read(&dev->gnd_rdma_ntx),
290 atomic64_read(&dev->gnd_rdma_txbytes),
291 atomic_read(&dev->gnd_rdma_nrx),
292 atomic64_read(&dev->gnd_rdma_rxbytes),
293 atomic_read(&kgnilnd_data.kgn_nvmap_short),
294 atomic_read(&kgnilnd_data.kgn_nvmap_cksum),
295 atomic_read(&kgnilnd_data.kgn_nkmap_short),
296 atomic_read(&kgnilnd_data.kgn_rev_length),
297 atomic_read(&kgnilnd_data.kgn_rev_offset),
298 atomic_read(&kgnilnd_data.kgn_rev_copy_buff));
304 kgnilnd_proc_stats_write(struct file *file, const char __user *ubuffer,
305 size_t count, loff_t *ppos)
309 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
310 CERROR("kgnilnd is not initialized for stats write\n");
314 /* only do the first device */
315 dev = &kgnilnd_data.kgn_devices[0];
317 atomic_set(&dev->gnd_short_ntx, 0);
318 atomic_set(&dev->gnd_short_nrx, 0);
319 atomic64_set(&dev->gnd_short_txbytes, 0);
320 atomic64_set(&dev->gnd_short_rxbytes, 0);
321 atomic_set(&dev->gnd_rdma_ntx, 0);
322 atomic_set(&dev->gnd_rdma_nrx, 0);
323 atomic_set(&dev->gnd_fast_ok, 0);
324 atomic_set(&dev->gnd_fast_try, 0);
325 atomic_set(&dev->gnd_fast_block, 0);
326 atomic64_set(&dev->gnd_rdma_txbytes, 0);
327 atomic64_set(&dev->gnd_rdma_rxbytes, 0);
328 atomic_set(&dev->gnd_rdmaq_nstalls, 0);
329 set_mb(dev->gnd_mutex_delay, 0);
330 atomic_set(&dev->gnd_n_yield, 0);
331 atomic_set(&dev->gnd_n_schedule, 0);
332 atomic_set(&kgnilnd_data.kgn_nvmap_short, 0);
333 atomic_set(&kgnilnd_data.kgn_nvmap_cksum, 0);
334 atomic_set(&kgnilnd_data.kgn_nkmap_short, 0);
335 /* sampling is racy, but so is writing this file! */
341 kgnilnd_stats_seq_open(struct inode *inode, struct file *file)
343 return single_open(file, kgnilnd_stats_seq_show, PDE_DATA(inode));
346 static const struct file_operations kgn_stats_fops = {
347 .owner = THIS_MODULE,
348 .open = kgnilnd_stats_seq_open,
350 .write = kgnilnd_proc_stats_write,
352 .release = seq_release,
356 kgn_device_t *gmdd_dev;
359 } kgn_mdd_seq_iter_t;
362 kgnilnd_mdd_seq_seek(kgn_mdd_seq_iter_t *gseq, loff_t off)
370 gseq->gmdd_tx = NULL;
377 if (tx == NULL || gseq->gmdd_off > off) {
378 /* search from start */
379 r = gseq->gmdd_dev->gnd_map_list.next;
382 /* continue current search */
383 r = &tx->tx_map_list;
384 here = gseq->gmdd_off;
387 gseq->gmdd_off = off;
389 while (r != &gseq->gmdd_dev->gnd_map_list) {
392 t = list_entry(r, kgn_tx_t, tx_map_list);
403 gseq->gmdd_tx = NULL;
410 kgnilnd_mdd_seq_start(struct seq_file *s, loff_t *pos)
413 kgn_mdd_seq_iter_t *gseq;
416 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
420 LIBCFS_ALLOC(gseq, sizeof(*gseq));
422 CERROR("could not allocate mdd sequence iterator\n");
426 /* only doing device 0 for now */
427 gseq->gmdd_dev = &kgnilnd_data.kgn_devices[0];
428 gseq->gmdd_tx = NULL;
430 /* need to lock map while we poke - huge disturbance
431 * but without it, no way to get the data printed */
432 spin_lock(&gseq->gmdd_dev->gnd_map_lock);
434 /* set private to gseq for stop */
437 rc = kgnilnd_mdd_seq_seek(gseq, *pos);
445 kgnilnd_mdd_seq_stop(struct seq_file *s, void *iter)
447 kgn_mdd_seq_iter_t *gseq = s->private;
450 spin_unlock(&gseq->gmdd_dev->gnd_map_lock);
451 LIBCFS_FREE(gseq, sizeof(*gseq));
456 kgnilnd_mdd_seq_next(struct seq_file *s, void *iter, loff_t *pos)
458 kgn_mdd_seq_iter_t *gseq = iter;
460 loff_t next = *pos + 1;
462 rc = kgnilnd_mdd_seq_seek(gseq, next);
471 kgnilnd_mdd_seq_show(struct seq_file *s, void *iter)
473 kgn_mdd_seq_iter_t *gseq = iter;
479 gni_mem_handle_t hndl;
481 if (gseq->gmdd_off == 0) {
482 seq_printf(s, "%s %22s %16s %8s %8s %37s\n",
483 "tx", "tx_id", "nob", "physnop",
484 "buftype", "mem handle");
491 id = tx->tx_id.txe_smsg_id;
493 physnop = tx->tx_phys_npages;
494 buftype = tx->tx_buftype;
495 hndl.qword1 = tx->tx_map_key.qword1;
496 hndl.qword2 = tx->tx_map_key.qword2;
498 seq_printf(s, "%p %x %16llu %8d %#8x %#llx.%#llxx\n",
499 tx, id, nob, physnop, buftype,
500 hndl.qword1, hndl.qword2);
505 static struct seq_operations kgn_mdd_sops = {
506 .start = kgnilnd_mdd_seq_start,
507 .stop = kgnilnd_mdd_seq_stop,
508 .next = kgnilnd_mdd_seq_next,
509 .show = kgnilnd_mdd_seq_show,
514 kgnilnd_mdd_seq_open(struct inode *inode, struct file *file)
519 rc = seq_open(file, &kgn_mdd_sops);
521 sf = file->private_data;
523 /* NULL means we've not yet open() */
529 static struct file_operations kgn_mdd_fops = {
530 .owner = THIS_MODULE,
531 .open = kgnilnd_mdd_seq_open,
534 .release = seq_release,
539 kgn_device_t *gsmsg_dev;
540 kgn_fma_memblock_t *gsmsg_fmablk;
542 } kgn_smsg_seq_iter_t;
545 kgnilnd_smsg_seq_seek(kgn_smsg_seq_iter_t *gseq, loff_t off)
547 kgn_fma_memblock_t *fmablk;
553 /* offset 0 is the header, so we start real entries at
554 * here == off == 1 */
556 gseq->gsmsg_fmablk = NULL;
561 fmablk = gseq->gsmsg_fmablk;
562 dev = gseq->gsmsg_dev;
564 spin_lock(&dev->gnd_fmablk_lock);
566 if (fmablk != NULL &&
567 gseq->gsmsg_version != atomic_read(&dev->gnd_fmablk_vers)) {
573 if (fmablk == NULL || gseq->gsmsg_off > off) {
574 /* search from start */
575 r = dev->gnd_fma_buffs.next;
578 /* continue current search */
579 r = &fmablk->gnm_bufflist;
580 here = gseq->gsmsg_off;
583 gseq->gsmsg_version = atomic_read(&dev->gnd_fmablk_vers);
584 gseq->gsmsg_off = off;
586 while (r != &dev->gnd_fma_buffs) {
587 kgn_fma_memblock_t *t;
589 t = list_entry(r, kgn_fma_memblock_t, gnm_bufflist);
592 gseq->gsmsg_fmablk = t;
600 gseq->gsmsg_fmablk = NULL;
603 spin_unlock(&dev->gnd_fmablk_lock);
608 kgnilnd_smsg_seq_start(struct seq_file *s, loff_t *pos)
611 kgn_smsg_seq_iter_t *gseq;
614 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
618 LIBCFS_ALLOC(gseq, sizeof(*gseq));
620 CERROR("could not allocate smsg sequence iterator\n");
624 /* only doing device 0 for now */
625 gseq->gsmsg_dev = &kgnilnd_data.kgn_devices[0];
626 gseq->gsmsg_fmablk = NULL;
627 rc = kgnilnd_smsg_seq_seek(gseq, *pos);
631 LIBCFS_FREE(gseq, sizeof(*gseq));
636 kgnilnd_smsg_seq_stop(struct seq_file *s, void *iter)
638 kgn_smsg_seq_iter_t *gseq = iter;
641 LIBCFS_FREE(gseq, sizeof(*gseq));
645 kgnilnd_smsg_seq_next(struct seq_file *s, void *iter, loff_t *pos)
647 kgn_smsg_seq_iter_t *gseq = iter;
649 loff_t next = *pos + 1;
651 rc = kgnilnd_smsg_seq_seek(gseq, next);
653 LIBCFS_FREE(gseq, sizeof(*gseq));
661 kgnilnd_smsg_seq_show(struct seq_file *s, void *iter)
663 kgn_smsg_seq_iter_t *gseq = iter;
664 kgn_fma_memblock_t *fmablk;
666 int avail_mboxs, held_mboxs, num_mboxs;
667 unsigned int blk_size;
669 kgn_fmablk_state_t state;
670 gni_mem_handle_t hndl;
672 if (gseq->gsmsg_off == 0) {
673 seq_printf(s, "%5s %4s %6s/%5s/%5s %9s %18s %37s\n",
674 "blk#", "type", "avail", "held", "total", "size",
675 "fmablk", "mem handle");
679 fmablk = gseq->gsmsg_fmablk;
680 dev = gseq->gsmsg_dev;
681 LASSERT(fmablk != NULL);
683 spin_lock(&dev->gnd_fmablk_lock);
685 if (gseq->gsmsg_version != atomic_read(&dev->gnd_fmablk_vers)) {
687 spin_unlock(&dev->gnd_fmablk_lock);
691 live = fmablk->gnm_hold_timeout == 0;
692 /* none are available if it isn't live... */
693 avail_mboxs = live ? fmablk->gnm_avail_mboxs : 0;
694 held_mboxs = fmablk->gnm_held_mboxs;
695 num_mboxs = fmablk->gnm_num_mboxs;
696 blk_size = fmablk->gnm_blk_size;
697 state = fmablk->gnm_state;
698 hndl.qword1 = fmablk->gnm_hndl.qword1;
699 hndl.qword2 = fmablk->gnm_hndl.qword2;
701 spin_unlock(&dev->gnd_fmablk_lock);
704 seq_printf(s, "%5d %4s %6d/%5d/%5d %9d %18p %#llx.%#llx\n",
705 (int) gseq->gsmsg_off, kgnilnd_fmablk_state2str(state),
706 avail_mboxs, held_mboxs, num_mboxs, blk_size,
707 fmablk, hndl.qword1, hndl.qword2);
709 seq_printf(s, "%5d %4s %6d/%5d/%5d %9d %18p %37s\n",
710 (int) gseq->gsmsg_off, kgnilnd_fmablk_state2str(state),
711 avail_mboxs, held_mboxs, num_mboxs, blk_size,
712 fmablk, "PURGATORY.HOLD");
718 static struct seq_operations kgn_smsg_sops = {
719 .start = kgnilnd_smsg_seq_start,
720 .stop = kgnilnd_smsg_seq_stop,
721 .next = kgnilnd_smsg_seq_next,
722 .show = kgnilnd_smsg_seq_show,
727 kgnilnd_smsg_seq_open(struct inode *inode, struct file *file)
732 rc = seq_open(file, &kgn_smsg_sops);
734 sf = file->private_data;
735 sf->private = PDE_DATA(inode);
741 static struct file_operations kgn_smsg_fops = {
742 .owner = THIS_MODULE,
743 .open = kgnilnd_smsg_seq_open,
746 .release = seq_release,
751 struct list_head *gconn_list;
752 kgn_conn_t *gconn_conn;
755 } kgn_conn_seq_iter_t;
758 kgnilnd_conn_seq_seek(kgn_conn_seq_iter_t *gseq, loff_t off)
760 struct list_head *list, *tmp;
765 gseq->gconn_hashidx = 0;
766 gseq->gconn_list = NULL;
769 if (off > atomic_read(&kgnilnd_data.kgn_nconns)) {
770 gseq->gconn_list = NULL;
774 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
775 if (gseq->gconn_list != NULL &&
776 gseq->gconn_version != kgnilnd_data.kgn_conn_version) {
782 if ((gseq->gconn_list == NULL) ||
783 (gseq->gconn_off > off) ||
784 (gseq->gconn_hashidx >= *kgnilnd_tunables.kgn_peer_hash_size)) {
785 /* search from start */
786 gseq->gconn_hashidx = 0;
787 list = &kgnilnd_data.kgn_conns[gseq->gconn_hashidx];
790 /* continue current search */
791 list = gseq->gconn_list;
794 gseq->gconn_version = kgnilnd_data.kgn_conn_version;
795 gseq->gconn_off = off;
799 list_for_each(tmp, list) {
802 conn = list_entry(tmp, kgn_conn_t, gnc_hashlist);
803 gseq->gconn_conn = conn;
809 /* if we got through this hash bucket with 'off' still to go, try next*/
810 gseq->gconn_hashidx++;
812 (gseq->gconn_hashidx < *kgnilnd_tunables.kgn_peer_hash_size)) {
813 list = &kgnilnd_data.kgn_conns[gseq->gconn_hashidx];
817 gseq->gconn_list = NULL;
820 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
825 kgnilnd_conn_seq_start(struct seq_file *s, loff_t *pos)
828 kgn_conn_seq_iter_t *gseq;
831 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
835 LIBCFS_ALLOC(gseq, sizeof(*gseq));
837 CERROR("could not allocate conn sequence iterator\n");
841 /* only doing device 0 for now */
842 gseq->gconn_list = NULL;
843 rc = kgnilnd_conn_seq_seek(gseq, *pos);
847 LIBCFS_FREE(gseq, sizeof(*gseq));
852 kgnilnd_conn_seq_stop(struct seq_file *s, void *iter)
854 kgn_conn_seq_iter_t *gseq = iter;
857 LIBCFS_FREE(gseq, sizeof(*gseq));
861 kgnilnd_conn_seq_next(struct seq_file *s, void *iter, loff_t *pos)
863 kgn_conn_seq_iter_t *gseq = iter;
865 loff_t next = *pos + 1;
867 rc = kgnilnd_conn_seq_seek(gseq, next);
869 LIBCFS_FREE(gseq, sizeof(*gseq));
877 kgnilnd_conn_seq_show(struct seq_file *s, void *iter)
879 kgn_conn_seq_iter_t *gseq = iter;
880 kgn_peer_t *peer = NULL;
883 /* there is no header data for conns, so offset 0 is the first
886 conn = gseq->gconn_conn;
887 LASSERT(conn != NULL);
889 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
890 if (gseq->gconn_list != NULL &&
891 gseq->gconn_version != kgnilnd_data.kgn_conn_version) {
893 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
897 /* instead of saving off the data, just refcount */
898 kgnilnd_conn_addref(conn);
899 if (conn->gnc_peer) {
900 /* don't use link - after unlock it could get nuked */
901 peer = conn->gnc_peer;
902 kgnilnd_peer_addref(peer);
905 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
907 seq_printf(s, "%p->%s [%d] q %d/%d/%d "
908 "tx sq %u %dms/%dms "
909 "rx sq %u %dms/%dms "
910 "noop r/s %d/%d w/s/cq %lds/%lds/%lds "
911 "sched a/d %lds/%lds "
912 "tx_re %lld TO %ds %s\n",
913 conn, peer ? libcfs_nid2str(peer->gnp_nid) : "<?>",
914 atomic_read(&conn->gnc_refcount),
915 kgnilnd_count_list(&conn->gnc_fmaq),
916 atomic_read(&conn->gnc_nlive_fma),
917 atomic_read(&conn->gnc_nlive_rdma),
918 atomic_read(&conn->gnc_tx_seq),
919 jiffies_to_msecs(jiffies - conn->gnc_last_tx),
920 jiffies_to_msecs(jiffies - conn->gnc_last_tx_cq),
921 atomic_read(&conn->gnc_rx_seq),
922 jiffies_to_msecs(jiffies - conn->gnc_last_rx),
923 jiffies_to_msecs(jiffies - conn->gnc_last_rx_cq),
924 atomic_read(&conn->gnc_reaper_noop),
925 atomic_read(&conn->gnc_sched_noop),
926 cfs_duration_sec(jiffies - conn->gnc_last_noop_want),
927 cfs_duration_sec(jiffies - conn->gnc_last_noop_sent),
928 cfs_duration_sec(jiffies - conn->gnc_last_noop_cq),
929 cfs_duration_sec(jiffies - conn->gnc_last_sched_ask),
930 cfs_duration_sec(jiffies - conn->gnc_last_sched_do),
931 conn->gnc_tx_retrans, conn->gnc_timeout,
932 kgnilnd_conn_state2str(conn));
935 kgnilnd_peer_decref(peer);
936 kgnilnd_conn_decref(conn);
941 static struct seq_operations kgn_conn_sops = {
942 .start = kgnilnd_conn_seq_start,
943 .stop = kgnilnd_conn_seq_stop,
944 .next = kgnilnd_conn_seq_next,
945 .show = kgnilnd_conn_seq_show,
949 #define KGN_DEBUG_PEER_NID_DEFAULT -1
950 static int kgnilnd_debug_peer_nid = KGN_DEBUG_PEER_NID_DEFAULT;
953 kgnilnd_proc_peer_conns_write(struct file *file, const char __user *ubuffer,
954 size_t count, loff_t *ppos)
959 if (count >= sizeof(dummy) || count == 0)
962 if (copy_from_user(dummy, ubuffer, count))
965 rc = sscanf(dummy, "%d", &kgnilnd_debug_peer_nid);
974 /* debug data to print from conns associated with peer nid
977 - mbox_addr (msg_buffer + mbox_offset)
996 kgnilnd_proc_peer_conns_seq_show(struct seq_file *sf, void *v)
1001 struct timespec now;
1004 if (kgnilnd_debug_peer_nid == KGN_DEBUG_PEER_NID_DEFAULT) {
1005 seq_printf(sf, "peer_conns not initialized\n");
1009 /* sample date/time stamp - print time in UTC
1010 * 2012-12-11T16:06:16.966751 123@gni ...
1012 getnstimeofday(&now);
1013 time_to_tm(now.tv_sec, 0, &ctm);
1016 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1017 peer = kgnilnd_find_peer_locked(kgnilnd_debug_peer_nid);
1020 seq_printf(sf, "peer not found for this nid %d\n",
1021 kgnilnd_debug_peer_nid);
1022 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1026 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1028 "%04ld-%02d-%02dT%02d:%02d:%02d.%06ld %s "
1034 "err %d peer err %d "
1035 "tx sq %u %dms/%dms "
1036 "rx sq %u %dms/%dms/%dms "
1038 ctm.tm_year+1900, ctm.tm_mon+1, ctm.tm_mday,
1039 ctm.tm_hour, ctm.tm_min, ctm.tm_sec, now.tv_nsec,
1040 libcfs_nid2str(peer->gnp_nid),
1041 conn->remote_mbox_addr,
1042 kgnilnd_conn_dgram_type2str(conn->gnc_dgram_type),
1043 kgnilnd_conn_state2str(conn),
1044 conn->gnc_in_purgatory,
1045 conn->gnc_close_sent,
1046 conn->gnc_close_recvd,
1048 conn->gnc_peer_error,
1049 atomic_read(&conn->gnc_tx_seq),
1050 jiffies_to_msecs(jifs - conn->gnc_last_tx),
1051 jiffies_to_msecs(jifs - conn->gnc_last_tx_cq),
1052 atomic_read(&conn->gnc_rx_seq),
1053 jiffies_to_msecs(jifs - conn->gnc_first_rx),
1054 jiffies_to_msecs(jifs - conn->gnc_last_rx),
1055 jiffies_to_msecs(jifs - conn->gnc_last_rx_cq),
1056 conn->gnc_tx_retrans);
1059 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1064 kgnilnd_peer_conns_seq_open(struct inode *inode, struct file *file)
1066 return single_open(file, kgnilnd_proc_peer_conns_seq_show,
1070 static const struct file_operations kgn_peer_conns_fops = {
1071 .owner = THIS_MODULE,
1072 .open = kgnilnd_peer_conns_seq_open,
1074 .write = kgnilnd_proc_peer_conns_write,
1075 .llseek = seq_lseek,
1076 .release = seq_release,
1080 kgnilnd_conn_seq_open(struct inode *inode, struct file *file)
1082 struct seq_file *sf;
1085 rc = seq_open(file, &kgn_conn_sops);
1087 sf = file->private_data;
1088 sf->private = PDE_DATA(inode);
1094 static struct file_operations kgn_conn_fops = {
1095 .owner = THIS_MODULE,
1096 .open = kgnilnd_conn_seq_open,
1098 .llseek = seq_lseek,
1099 .release = seq_release,
1103 __u64 gpeer_version;
1104 struct list_head *gpeer_list;
1105 kgn_peer_t *gpeer_peer;
1108 } kgn_peer_seq_iter_t;
1111 kgnilnd_peer_seq_seek(kgn_peer_seq_iter_t *gseq, loff_t off)
1113 struct list_head *list, *tmp;
1118 gseq->gpeer_hashidx = 0;
1119 gseq->gpeer_list = NULL;
1122 if (off > atomic_read(&kgnilnd_data.kgn_npeers)) {
1123 gseq->gpeer_list = NULL;
1127 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1128 if (gseq->gpeer_list != NULL &&
1129 gseq->gpeer_version != kgnilnd_data.kgn_peer_version) {
1135 if ((gseq->gpeer_list == NULL) ||
1136 (gseq->gpeer_off > off) ||
1137 (gseq->gpeer_hashidx >= *kgnilnd_tunables.kgn_peer_hash_size)) {
1138 /* search from start */
1139 gseq->gpeer_hashidx = 0;
1140 list = &kgnilnd_data.kgn_peers[gseq->gpeer_hashidx];
1143 /* continue current search */
1144 list = gseq->gpeer_list;
1147 gseq->gpeer_version = kgnilnd_data.kgn_peer_version;
1148 gseq->gpeer_off = off;
1152 list_for_each(tmp, list) {
1155 peer = list_entry(tmp, kgn_peer_t, gnp_list);
1156 gseq->gpeer_peer = peer;
1162 /* if we got through this hash bucket with 'off' still to go, try next*/
1163 gseq->gpeer_hashidx++;
1164 if ((here <= off) &&
1165 (gseq->gpeer_hashidx < *kgnilnd_tunables.kgn_peer_hash_size)) {
1166 list = &kgnilnd_data.kgn_peers[gseq->gpeer_hashidx];
1170 gseq->gpeer_list = NULL;
1173 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1178 kgnilnd_peer_seq_start(struct seq_file *s, loff_t *pos)
1181 kgn_peer_seq_iter_t *gseq;
1184 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
1188 LIBCFS_ALLOC(gseq, sizeof(*gseq));
1190 CERROR("could not allocate peer sequence iterator\n");
1194 /* only doing device 0 for now */
1195 gseq->gpeer_list = NULL;
1196 rc = kgnilnd_peer_seq_seek(gseq, *pos);
1200 LIBCFS_FREE(gseq, sizeof(*gseq));
1205 kgnilnd_peer_seq_stop(struct seq_file *s, void *iter)
1207 kgn_peer_seq_iter_t *gseq = iter;
1210 LIBCFS_FREE(gseq, sizeof(*gseq));
1214 kgnilnd_peer_seq_next(struct seq_file *s, void *iter, loff_t *pos)
1216 kgn_peer_seq_iter_t *gseq = iter;
1218 loff_t next = *pos + 1;
1220 rc = kgnilnd_peer_seq_seek(gseq, next);
1222 LIBCFS_FREE(gseq, sizeof(*gseq));
1230 kgnilnd_peer_seq_show(struct seq_file *s, void *iter)
1232 kgn_peer_seq_iter_t *gseq = iter;
1237 /* there is no header data for peers, so offset 0 is the first
1240 peer = gseq->gpeer_peer;
1241 LASSERT(peer != NULL);
1243 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1244 if (gseq->gpeer_list != NULL &&
1245 gseq->gpeer_version != kgnilnd_data.kgn_peer_version) {
1247 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1251 /* instead of saving off the data, just refcount */
1252 kgnilnd_peer_addref(peer);
1253 conn = kgnilnd_find_conn_locked(peer);
1255 if (peer->gnp_connecting) {
1257 } else if (conn != NULL) {
1263 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1264 if (conn->gnc_in_purgatory) {
1269 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1271 seq_printf(s, "%p->%s [%d] %s NIC 0x%x q %d conn %c purg %d last %d@%lldms dgram %d@%dms reconn %dms to %lus \n",
1272 peer, libcfs_nid2str(peer->gnp_nid),
1273 atomic_read(&peer->gnp_refcount),
1274 (peer->gnp_state == GNILND_PEER_DOWN) ? "down" :
1275 peer->gnp_state == GNILND_PEER_TIMED_OUT ? "timedout" : "up",
1277 kgnilnd_count_list(&peer->gnp_tx_queue),
1280 peer->gnp_last_errno,
1281 (ktime_get_seconds() - peer->gnp_last_alive) * MSEC_PER_SEC,
1282 peer->gnp_last_dgram_errno,
1283 jiffies_to_msecs(jiffies - peer->gnp_last_dgram_time),
1284 peer->gnp_reconnect_interval != 0
1285 ? jiffies_to_msecs(jiffies - peer->gnp_reconnect_time)
1287 peer->gnp_reconnect_interval);
1289 kgnilnd_peer_decref(peer);
1294 static struct seq_operations kgn_peer_sops = {
1295 .start = kgnilnd_peer_seq_start,
1296 .stop = kgnilnd_peer_seq_stop,
1297 .next = kgnilnd_peer_seq_next,
1298 .show = kgnilnd_peer_seq_show,
1302 kgnilnd_peer_seq_open(struct inode *inode, struct file *file)
1304 struct seq_file *sf;
1307 rc = seq_open(file, &kgn_peer_sops);
1309 sf = file->private_data;
1310 sf->private = PDE_DATA(inode);
1316 static struct file_operations kgn_peer_fops = {
1317 .owner = THIS_MODULE,
1318 .open = kgnilnd_peer_seq_open,
1320 .llseek = seq_lseek,
1321 .release = seq_release,
1324 static struct proc_dir_entry *kgn_proc_root;
1327 kgnilnd_proc_init(void)
1329 struct proc_dir_entry *pde;
1334 kgn_proc_root = proc_mkdir(libcfs_lnd2modname(GNILND), NULL);
1335 if (kgn_proc_root == NULL) {
1336 CERROR("couldn't create proc dir %s\n",
1337 libcfs_lnd2modname(GNILND));
1341 /* Initialize CKSUM_TEST */
1342 pde = proc_create(GNILND_PROC_CKSUM_TEST, 0200, kgn_proc_root,
1343 &kgn_cksum_test_fops);
1345 CERROR("couldn't create proc entry %s\n", GNILND_PROC_CKSUM_TEST);
1346 GOTO(remove_dir, rc = -ENOENT);
1349 /* Initialize STATS */
1350 pde = proc_create(GNILND_PROC_STATS, 0644, kgn_proc_root,
1353 CERROR("couldn't create proc entry %s\n", GNILND_PROC_STATS);
1354 GOTO(remove_test, rc = -ENOENT);
1357 /* Initialize MDD */
1358 pde = proc_create(GNILND_PROC_MDD, 0444, kgn_proc_root, &kgn_mdd_fops);
1360 CERROR("couldn't create proc entry %s\n", GNILND_PROC_MDD);
1361 GOTO(remove_stats, rc = -ENOENT);
1364 /* Initialize SMSG */
1365 pde = proc_create(GNILND_PROC_SMSG, 0444, kgn_proc_root,
1368 CERROR("couldn't create proc entry %s\n", GNILND_PROC_SMSG);
1369 GOTO(remove_mdd, rc = -ENOENT);
1372 /* Initialize CONN */
1373 pde = proc_create(GNILND_PROC_CONN, 0444, kgn_proc_root,
1376 CERROR("couldn't create proc entry %s\n", GNILND_PROC_CONN);
1377 GOTO(remove_smsg, rc = -ENOENT);
1380 /* Initialize peer conns debug */
1381 pde = proc_create(GNILND_PROC_PEER_CONNS, 0644, kgn_proc_root,
1382 &kgn_peer_conns_fops);
1384 CERROR("couldn't create proc entry %s\n", GNILND_PROC_PEER_CONNS);
1385 GOTO(remove_conn, rc = -ENOENT);
1388 /* Initialize PEER */
1389 pde = proc_create(GNILND_PROC_PEER, 0444, kgn_proc_root,
1392 CERROR("couldn't create proc entry %s\n", GNILND_PROC_PEER);
1393 GOTO(remove_pc, rc = -ENOENT);
1398 remove_proc_entry(GNILND_PROC_PEER_CONNS, kgn_proc_root);
1400 remove_proc_entry(GNILND_PROC_CONN, kgn_proc_root);
1402 remove_proc_entry(GNILND_PROC_SMSG, kgn_proc_root);
1404 remove_proc_entry(GNILND_PROC_MDD, kgn_proc_root);
1406 remove_proc_entry(GNILND_PROC_STATS, kgn_proc_root);
1408 remove_proc_entry(GNILND_PROC_CKSUM_TEST, kgn_proc_root);
1410 remove_proc_entry(libcfs_lnd2modname(GNILND), NULL);
1416 kgnilnd_proc_fini(void)
1418 remove_proc_entry(GNILND_PROC_PEER_CONNS, kgn_proc_root);
1419 remove_proc_entry(GNILND_PROC_PEER, kgn_proc_root);
1420 remove_proc_entry(GNILND_PROC_CONN, kgn_proc_root);
1421 remove_proc_entry(GNILND_PROC_MDD, kgn_proc_root);
1422 remove_proc_entry(GNILND_PROC_SMSG, kgn_proc_root);
1423 remove_proc_entry(GNILND_PROC_STATS, kgn_proc_root);
1424 remove_proc_entry(GNILND_PROC_CKSUM_TEST, kgn_proc_root);
1425 remove_proc_entry(libcfs_lnd2modname(GNILND), NULL);