2 * Copyright (C) 2009-2012 Cray, Inc.
4 * Copyright (c) 2013, Intel Corporation.
6 * Author: Nic Henke <nic@cray.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 /* this code liberated and modified from lnet/lnet/router_proc.c */
26 #define DEBUG_SUBSYSTEM S_LND
28 #include <linux/seq_file.h>
30 #define GNILND_PROC_STATS "stats"
31 #define GNILND_PROC_MDD "mdd"
32 #define GNILND_PROC_SMSG "smsg"
33 #define GNILND_PROC_CONN "conn"
34 #define GNILND_PROC_PEER_CONNS "peer_conns"
35 #define GNILND_PROC_PEER "peer"
36 #define GNILND_PROC_CKSUM_TEST "cksum_test"
39 _kgnilnd_proc_run_cksum_test(int caseno, int nloops, int nob)
41 lnet_kiov_t *src, *dest;
42 struct timespec begin, end, diff;
49 LIBCFS_ALLOC(src, LNET_MAX_IOV * sizeof(lnet_kiov_t));
50 LIBCFS_ALLOC(dest, LNET_MAX_IOV * sizeof(lnet_kiov_t));
52 if (src == NULL || dest == NULL) {
53 CERROR("couldn't allocate iovs\n");
54 GOTO(unwind, rc = -ENOMEM);
57 for (i = 0; i < LNET_MAX_IOV; i++) {
58 src[i].kiov_offset = 0;
59 src[i].kiov_len = PAGE_SIZE;
60 src[i].kiov_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
62 if (src[i].kiov_page == NULL) {
63 CERROR("couldn't allocate page %d\n", i);
64 GOTO(unwind, rc = -ENOMEM);
67 dest[i].kiov_offset = 0;
68 dest[i].kiov_len = PAGE_SIZE;
69 dest[i].kiov_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
71 if (dest[i].kiov_page == NULL) {
72 CERROR("couldn't allocate page %d\n", i);
73 GOTO(unwind, rc = -ENOMEM);
77 /* add extra 2 pages - one for offset of src, 2nd to allow dest offset */
78 niov = (nob / PAGE_SIZE) + 2;
79 if (niov > LNET_MAX_IOV) {
80 CERROR("bytes %d too large, requires niov %d > %d\n",
81 nob, niov, LNET_MAX_IOV);
82 GOTO(unwind, rc = -E2BIG);
86 src[0].kiov_offset = 317;
87 dest[0].kiov_offset = 592;
94 dest[0].kiov_offset -= 1;
98 src[0].kiov_offset += 1;
102 src[0].kiov_offset += 1;
103 dest[0].kiov_offset -= 1;
105 src[0].kiov_len = PAGE_SIZE - src[0].kiov_offset;
106 dest[0].kiov_len = PAGE_SIZE - dest[0].kiov_offset;
108 for (i = 0; i < niov; i++) {
109 memset(page_address(src[i].kiov_page) + src[i].kiov_offset,
110 0xf0 + i, src[i].kiov_len);
113 lnet_copy_kiov2kiov(niov, dest, 0, niov, src, 0, nob);
115 getnstimeofday(&begin);
117 for (n = 0; n < nloops; n++) {
118 CDEBUG(D_BUFFS, "case %d loop %d src %d dest %d nob %d niov %d\n",
119 caseno, n, src[0].kiov_offset, dest[0].kiov_offset, nob, niov);
120 cksum = kgnilnd_cksum_kiov(niov, src, 0, nob - (n % nob), 1);
121 cksum2 = kgnilnd_cksum_kiov(niov, dest, 0, nob - (n % nob), 1);
123 if (cksum != cksum2) {
124 CERROR("case %d loop %d different checksums %x expected %x\n",
125 j, n, cksum2, cksum);
126 GOTO(unwind, rc = -ENOKEY);
130 getnstimeofday(&end);
132 mbytes = ((__u64)nloops * nob * 2) / (1024*1024);
134 diff = kgnilnd_ts_sub(end, begin);
136 LCONSOLE_INFO("running "LPD64"MB took %ld.%ld seconds\n",
137 mbytes, diff.tv_sec, diff.tv_nsec);
140 CDEBUG(D_NET, "freeing %d pages\n", i);
141 for (i -= 1; i >= 0; i--) {
142 if (src[i].kiov_page != NULL) {
143 __free_page(src[i].kiov_page);
145 if (dest[i].kiov_page != NULL) {
146 __free_page(dest[i].kiov_page);
151 LIBCFS_FREE(src, LNET_MAX_IOV * sizeof(lnet_kiov_t));
153 LIBCFS_FREE(dest, LNET_MAX_IOV * sizeof(lnet_kiov_t));
158 kgnilnd_proc_cksum_test_write(struct file *file, const char *ubuffer,
159 unsigned long count, void *data)
161 char dummy[256 + 1] = { '\0' };
162 int testno, nloops, nbytes;
166 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
167 CERROR("can't run cksum test, kgnilnd is not initialized yet\n");
171 if (count >= sizeof(dummy) || count == 0)
174 if (copy_from_user(dummy, ubuffer, count))
177 if (sscanf(dummy, "%d:%d:%d", &testno, &nloops, &nbytes) == 3) {
178 rc = _kgnilnd_proc_run_cksum_test(testno, nloops, nbytes);
182 /* spurious, but lets us know the parse was ok */
190 kgnilnd_proc_stats_read(char *page, char **start, off_t off,
191 int count, int *eof, void *data)
197 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
199 "kgnilnd is not initialized yet\n");
203 /* only do the first device */
204 dev = &kgnilnd_data.kgn_devices[0];
206 /* sampling is racy, but so is reading this file! */
208 do_gettimeofday(&now);
210 rc = sprintf(page, "time: %lu.%lu\n"
219 "n_eager_allocs: %d\n"
220 "GART map bytes: %ld\n"
221 "TX queued maps: %d\n"
222 "TX phys nmaps: %d\n"
223 "TX phys bytes: %lu\n"
224 "TX virt nmaps: %d\n"
225 "TX virt bytes: "LPU64"\n"
226 "RDMAQ bytes_auth: %ld\n"
227 "RDMAQ bytes_left: %ld\n"
228 "RDMAQ nstalls: %d\n"
229 "dev mutex delay: %ld\n"
231 "dev n_schedule: %d\n"
232 "SMSG fast_try: %d\n"
234 "SMSG fast_block: %d\n"
236 "SMSG tx_bytes: %ld\n"
238 "SMSG rx_bytes: %ld\n"
240 "RDMA tx_bytes: %ld\n"
242 "RDMA rx_bytes: %ld\n"
246 "RDMA REV length: %d\n"
247 "RDMA REV offset: %d\n"
248 "RDMA REV copy: %d\n",
249 now.tv_sec, now.tv_usec,
250 atomic_read(&kgnilnd_data.kgn_ntx),
251 atomic_read(&kgnilnd_data.kgn_npeers),
252 atomic_read(&kgnilnd_data.kgn_nconns),
253 atomic_read(&dev->gnd_neps),
254 atomic_read(&dev->gnd_ndgrams),
255 atomic_read(&dev->gnd_nfmablk),
256 atomic_read(&dev->gnd_n_mdd), atomic_read(&dev->gnd_n_mdd_held),
257 atomic_read(&kgnilnd_data.kgn_neager_allocs),
258 atomic64_read(&dev->gnd_nbytes_map),
259 atomic_read(&dev->gnd_nq_map),
260 dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE,
261 dev->gnd_map_nvirt, dev->gnd_map_virtnob,
262 atomic64_read(&dev->gnd_rdmaq_bytes_out),
263 atomic64_read(&dev->gnd_rdmaq_bytes_ok),
264 atomic_read(&dev->gnd_rdmaq_nstalls),
265 dev->gnd_mutex_delay,
266 atomic_read(&dev->gnd_n_yield), atomic_read(&dev->gnd_n_schedule),
267 atomic_read(&dev->gnd_fast_try), atomic_read(&dev->gnd_fast_ok),
268 atomic_read(&dev->gnd_fast_block),
269 atomic_read(&dev->gnd_short_ntx), atomic64_read(&dev->gnd_short_txbytes),
270 atomic_read(&dev->gnd_short_nrx), atomic64_read(&dev->gnd_short_rxbytes),
271 atomic_read(&dev->gnd_rdma_ntx), atomic64_read(&dev->gnd_rdma_txbytes),
272 atomic_read(&dev->gnd_rdma_nrx), atomic64_read(&dev->gnd_rdma_rxbytes),
273 atomic_read(&kgnilnd_data.kgn_nvmap_short),
274 atomic_read(&kgnilnd_data.kgn_nvmap_cksum),
275 atomic_read(&kgnilnd_data.kgn_nkmap_short),
276 atomic_read(&kgnilnd_data.kgn_rev_length),
277 atomic_read(&kgnilnd_data.kgn_rev_offset),
278 atomic_read(&kgnilnd_data.kgn_rev_copy_buff));
284 kgnilnd_proc_stats_write(struct file *file, const char *ubuffer,
285 unsigned long count, void *data)
289 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
290 CERROR("kgnilnd is not initialized for stats write\n");
294 /* only do the first device */
295 dev = &kgnilnd_data.kgn_devices[0];
297 atomic_set(&dev->gnd_short_ntx, 0);
298 atomic_set(&dev->gnd_short_nrx, 0);
299 atomic64_set(&dev->gnd_short_txbytes, 0);
300 atomic64_set(&dev->gnd_short_rxbytes, 0);
301 atomic_set(&dev->gnd_rdma_ntx, 0);
302 atomic_set(&dev->gnd_rdma_nrx, 0);
303 atomic_set(&dev->gnd_fast_ok, 0);
304 atomic_set(&dev->gnd_fast_try, 0);
305 atomic_set(&dev->gnd_fast_block, 0);
306 atomic64_set(&dev->gnd_rdma_txbytes, 0);
307 atomic64_set(&dev->gnd_rdma_rxbytes, 0);
308 atomic_set(&dev->gnd_rdmaq_nstalls, 0);
309 set_mb(dev->gnd_mutex_delay, 0);
310 atomic_set(&dev->gnd_n_yield, 0);
311 atomic_set(&dev->gnd_n_schedule, 0);
312 atomic_set(&kgnilnd_data.kgn_nvmap_short, 0);
313 atomic_set(&kgnilnd_data.kgn_nvmap_cksum, 0);
314 atomic_set(&kgnilnd_data.kgn_nkmap_short, 0);
315 /* sampling is racy, but so is writing this file! */
321 kgn_device_t *gmdd_dev;
324 } kgn_mdd_seq_iter_t;
327 kgnilnd_mdd_seq_seek(kgn_mdd_seq_iter_t *gseq, loff_t off)
335 gseq->gmdd_tx = NULL;
342 if (tx == NULL || gseq->gmdd_off > off) {
343 /* search from start */
344 r = gseq->gmdd_dev->gnd_map_list.next;
347 /* continue current search */
348 r = &tx->tx_map_list;
349 here = gseq->gmdd_off;
352 gseq->gmdd_off = off;
354 while (r != &gseq->gmdd_dev->gnd_map_list) {
357 t = list_entry(r, kgn_tx_t, tx_map_list);
368 gseq->gmdd_tx = NULL;
375 kgnilnd_mdd_seq_start(struct seq_file *s, loff_t *pos)
378 kgn_mdd_seq_iter_t *gseq;
381 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
385 LIBCFS_ALLOC(gseq, sizeof(*gseq));
387 CERROR("could not allocate mdd sequence iterator\n");
391 /* only doing device 0 for now */
392 gseq->gmdd_dev = &kgnilnd_data.kgn_devices[0];
393 gseq->gmdd_tx = NULL;
395 /* need to lock map while we poke - huge disturbance
396 * but without it, no way to get the data printed */
397 spin_lock(&gseq->gmdd_dev->gnd_map_lock);
399 /* set private to gseq for stop */
402 rc = kgnilnd_mdd_seq_seek(gseq, *pos);
410 kgnilnd_mdd_seq_stop(struct seq_file *s, void *iter)
412 kgn_mdd_seq_iter_t *gseq = s->private;
415 spin_unlock(&gseq->gmdd_dev->gnd_map_lock);
416 LIBCFS_FREE(gseq, sizeof(*gseq));
421 kgnilnd_mdd_seq_next(struct seq_file *s, void *iter, loff_t *pos)
423 kgn_mdd_seq_iter_t *gseq = iter;
425 loff_t next = *pos + 1;
427 rc = kgnilnd_mdd_seq_seek(gseq, next);
436 kgnilnd_mdd_seq_show(struct seq_file *s, void *iter)
438 kgn_mdd_seq_iter_t *gseq = iter;
444 gni_mem_handle_t hndl;
446 if (gseq->gmdd_off == 0) {
447 seq_printf(s, "%s %22s %16s %8s %8s %37s\n",
448 "tx", "tx_id", "nob", "physnop",
449 "buftype", "mem handle");
456 id = tx->tx_id.txe_smsg_id;
458 physnop = tx->tx_phys_npages;
459 buftype = tx->tx_buftype;
460 hndl.qword1 = tx->tx_map_key.qword1;
461 hndl.qword2 = tx->tx_map_key.qword2;
463 seq_printf(s, "%p %x %16"LPF64"u %8d %#8x "LPX64"."LPX64"x\n",
464 tx, id, nob, physnop, buftype,
465 hndl.qword1, hndl.qword2);
470 static struct seq_operations kgn_mdd_sops = {
471 .start = kgnilnd_mdd_seq_start,
472 .stop = kgnilnd_mdd_seq_stop,
473 .next = kgnilnd_mdd_seq_next,
474 .show = kgnilnd_mdd_seq_show,
479 kgnilnd_mdd_seq_open(struct inode *inode, struct file *file)
484 rc = seq_open(file, &kgn_mdd_sops);
486 sf = file->private_data;
488 /* NULL means we've not yet open() */
494 static struct file_operations kgn_mdd_fops = {
495 .owner = THIS_MODULE,
496 .open = kgnilnd_mdd_seq_open,
499 .release = seq_release,
504 kgn_device_t *gsmsg_dev;
505 kgn_fma_memblock_t *gsmsg_fmablk;
507 } kgn_smsg_seq_iter_t;
510 kgnilnd_smsg_seq_seek(kgn_smsg_seq_iter_t *gseq, loff_t off)
512 kgn_fma_memblock_t *fmablk;
518 /* offset 0 is the header, so we start real entries at
519 * here == off == 1 */
521 gseq->gsmsg_fmablk = NULL;
526 fmablk = gseq->gsmsg_fmablk;
527 dev = gseq->gsmsg_dev;
529 spin_lock(&dev->gnd_fmablk_lock);
531 if (fmablk != NULL &&
532 gseq->gsmsg_version != atomic_read(&dev->gnd_fmablk_vers)) {
538 if (fmablk == NULL || gseq->gsmsg_off > off) {
539 /* search from start */
540 r = dev->gnd_fma_buffs.next;
543 /* continue current search */
544 r = &fmablk->gnm_bufflist;
545 here = gseq->gsmsg_off;
548 gseq->gsmsg_version = atomic_read(&dev->gnd_fmablk_vers);
549 gseq->gsmsg_off = off;
551 while (r != &dev->gnd_fma_buffs) {
552 kgn_fma_memblock_t *t;
554 t = list_entry(r, kgn_fma_memblock_t, gnm_bufflist);
557 gseq->gsmsg_fmablk = t;
565 gseq->gsmsg_fmablk = NULL;
568 spin_unlock(&dev->gnd_fmablk_lock);
573 kgnilnd_smsg_seq_start(struct seq_file *s, loff_t *pos)
576 kgn_smsg_seq_iter_t *gseq;
579 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
583 LIBCFS_ALLOC(gseq, sizeof(*gseq));
585 CERROR("could not allocate smsg sequence iterator\n");
589 /* only doing device 0 for now */
590 gseq->gsmsg_dev = &kgnilnd_data.kgn_devices[0];
591 gseq->gsmsg_fmablk = NULL;
592 rc = kgnilnd_smsg_seq_seek(gseq, *pos);
596 LIBCFS_FREE(gseq, sizeof(*gseq));
601 kgnilnd_smsg_seq_stop(struct seq_file *s, void *iter)
603 kgn_smsg_seq_iter_t *gseq = iter;
606 LIBCFS_FREE(gseq, sizeof(*gseq));
610 kgnilnd_smsg_seq_next(struct seq_file *s, void *iter, loff_t *pos)
612 kgn_smsg_seq_iter_t *gseq = iter;
614 loff_t next = *pos + 1;
616 rc = kgnilnd_smsg_seq_seek(gseq, next);
618 LIBCFS_FREE(gseq, sizeof(*gseq));
626 kgnilnd_smsg_seq_show(struct seq_file *s, void *iter)
628 kgn_smsg_seq_iter_t *gseq = iter;
629 kgn_fma_memblock_t *fmablk;
631 int avail_mboxs, held_mboxs, num_mboxs;
632 unsigned int blk_size;
634 kgn_fmablk_state_t state;
635 gni_mem_handle_t hndl;
637 if (gseq->gsmsg_off == 0) {
638 seq_printf(s, "%5s %4s %6s/%5s/%5s %9s %18s %37s\n",
639 "blk#", "type", "avail", "held", "total", "size",
640 "fmablk", "mem handle");
644 fmablk = gseq->gsmsg_fmablk;
645 dev = gseq->gsmsg_dev;
646 LASSERT(fmablk != NULL);
648 spin_lock(&dev->gnd_fmablk_lock);
650 if (gseq->gsmsg_version != atomic_read(&dev->gnd_fmablk_vers)) {
652 spin_unlock(&dev->gnd_fmablk_lock);
656 live = fmablk->gnm_hold_timeout == 0;
657 /* none are available if it isn't live... */
658 avail_mboxs = live ? fmablk->gnm_avail_mboxs : 0;
659 held_mboxs = fmablk->gnm_held_mboxs;
660 num_mboxs = fmablk->gnm_num_mboxs;
661 blk_size = fmablk->gnm_blk_size;
662 state = fmablk->gnm_state;
663 hndl.qword1 = fmablk->gnm_hndl.qword1;
664 hndl.qword2 = fmablk->gnm_hndl.qword2;
666 spin_unlock(&dev->gnd_fmablk_lock);
669 seq_printf(s, "%5d %4s %6d/%5d/%5d %9d %18p "LPX64"."LPX64"\n",
670 (int) gseq->gsmsg_off, kgnilnd_fmablk_state2str(state),
671 avail_mboxs, held_mboxs, num_mboxs, blk_size,
672 fmablk, hndl.qword1, hndl.qword2);
674 seq_printf(s, "%5d %4s %6d/%5d/%5d %9d %18p %37s\n",
675 (int) gseq->gsmsg_off, kgnilnd_fmablk_state2str(state),
676 avail_mboxs, held_mboxs, num_mboxs, blk_size,
677 fmablk, "PURGATORY.HOLD");
683 static struct seq_operations kgn_smsg_sops = {
684 .start = kgnilnd_smsg_seq_start,
685 .stop = kgnilnd_smsg_seq_stop,
686 .next = kgnilnd_smsg_seq_next,
687 .show = kgnilnd_smsg_seq_show,
692 kgnilnd_smsg_seq_open(struct inode *inode, struct file *file)
694 struct proc_dir_entry *dp = PDE(inode);
698 rc = seq_open(file, &kgn_smsg_sops);
700 sf = file->private_data;
701 sf->private = dp->data;
707 static struct file_operations kgn_smsg_fops = {
708 .owner = THIS_MODULE,
709 .open = kgnilnd_smsg_seq_open,
712 .release = seq_release,
717 struct list_head *gconn_list;
718 kgn_conn_t *gconn_conn;
721 } kgn_conn_seq_iter_t;
724 kgnilnd_conn_seq_seek(kgn_conn_seq_iter_t *gseq, loff_t off)
726 struct list_head *list, *tmp;
731 gseq->gconn_hashidx = 0;
732 gseq->gconn_list = NULL;
735 if (off > atomic_read(&kgnilnd_data.kgn_nconns)) {
736 gseq->gconn_list = NULL;
740 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
741 if (gseq->gconn_list != NULL &&
742 gseq->gconn_version != kgnilnd_data.kgn_conn_version) {
748 if ((gseq->gconn_list == NULL) ||
749 (gseq->gconn_off > off) ||
750 (gseq->gconn_hashidx >= *kgnilnd_tunables.kgn_peer_hash_size)) {
751 /* search from start */
752 gseq->gconn_hashidx = 0;
753 list = &kgnilnd_data.kgn_conns[gseq->gconn_hashidx];
756 /* continue current search */
757 list = gseq->gconn_list;
760 gseq->gconn_version = kgnilnd_data.kgn_conn_version;
761 gseq->gconn_off = off;
765 list_for_each(tmp, list) {
768 conn = list_entry(tmp, kgn_conn_t, gnc_hashlist);
769 gseq->gconn_conn = conn;
775 /* if we got through this hash bucket with 'off' still to go, try next*/
776 gseq->gconn_hashidx++;
778 (gseq->gconn_hashidx < *kgnilnd_tunables.kgn_peer_hash_size)) {
779 list = &kgnilnd_data.kgn_conns[gseq->gconn_hashidx];
783 gseq->gconn_list = NULL;
786 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
791 kgnilnd_conn_seq_start(struct seq_file *s, loff_t *pos)
794 kgn_conn_seq_iter_t *gseq;
797 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
801 LIBCFS_ALLOC(gseq, sizeof(*gseq));
803 CERROR("could not allocate conn sequence iterator\n");
807 /* only doing device 0 for now */
808 gseq->gconn_list = NULL;
809 rc = kgnilnd_conn_seq_seek(gseq, *pos);
813 LIBCFS_FREE(gseq, sizeof(*gseq));
818 kgnilnd_conn_seq_stop(struct seq_file *s, void *iter)
820 kgn_conn_seq_iter_t *gseq = iter;
823 LIBCFS_FREE(gseq, sizeof(*gseq));
827 kgnilnd_conn_seq_next(struct seq_file *s, void *iter, loff_t *pos)
829 kgn_conn_seq_iter_t *gseq = iter;
831 loff_t next = *pos + 1;
833 rc = kgnilnd_conn_seq_seek(gseq, next);
835 LIBCFS_FREE(gseq, sizeof(*gseq));
843 kgnilnd_conn_seq_show(struct seq_file *s, void *iter)
845 kgn_conn_seq_iter_t *gseq = iter;
846 kgn_peer_t *peer = NULL;
849 /* there is no header data for conns, so offset 0 is the first
852 conn = gseq->gconn_conn;
853 LASSERT(conn != NULL);
855 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
856 if (gseq->gconn_list != NULL &&
857 gseq->gconn_version != kgnilnd_data.kgn_conn_version) {
859 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
863 /* instead of saving off the data, just refcount */
864 kgnilnd_conn_addref(conn);
865 if (conn->gnc_peer) {
866 /* don't use link - after unlock it could get nuked */
867 peer = conn->gnc_peer;
868 kgnilnd_peer_addref(peer);
871 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
873 seq_printf(s, "%p->%s [%d] q %d/%d/%d "
874 "tx sq %u %dms/%dms "
875 "rx sq %u %dms/%dms "
876 "noop r/s %d/%d w/s/cq %lds/%lds/%lds "
877 "sched a/d %lds/%lds "
878 "tx_re "LPD64" TO %ds %s\n",
879 conn, peer ? libcfs_nid2str(peer->gnp_nid) : "<?>",
880 atomic_read(&conn->gnc_refcount),
881 kgnilnd_count_list(&conn->gnc_fmaq),
882 atomic_read(&conn->gnc_nlive_fma),
883 atomic_read(&conn->gnc_nlive_rdma),
885 jiffies_to_msecs(jiffies - conn->gnc_last_tx),
886 jiffies_to_msecs(jiffies - conn->gnc_last_tx_cq),
888 jiffies_to_msecs(jiffies - conn->gnc_last_rx),
889 jiffies_to_msecs(jiffies - conn->gnc_last_rx_cq),
890 atomic_read(&conn->gnc_reaper_noop),
891 atomic_read(&conn->gnc_sched_noop),
892 cfs_duration_sec(jiffies - conn->gnc_last_noop_want),
893 cfs_duration_sec(jiffies - conn->gnc_last_noop_sent),
894 cfs_duration_sec(jiffies - conn->gnc_last_noop_cq),
895 cfs_duration_sec(jiffies - conn->gnc_last_sched_ask),
896 cfs_duration_sec(jiffies - conn->gnc_last_sched_do),
897 conn->gnc_tx_retrans, conn->gnc_timeout,
898 kgnilnd_conn_state2str(conn));
901 kgnilnd_peer_decref(peer);
902 kgnilnd_conn_decref(conn);
907 static struct seq_operations kgn_conn_sops = {
908 .start = kgnilnd_conn_seq_start,
909 .stop = kgnilnd_conn_seq_stop,
910 .next = kgnilnd_conn_seq_next,
911 .show = kgnilnd_conn_seq_show,
915 #define KGN_DEBUG_PEER_NID_DEFAULT -1
916 static int kgnilnd_debug_peer_nid = KGN_DEBUG_PEER_NID_DEFAULT;
919 kgnilnd_proc_peer_conns_write(struct file *file, const char *ubuffer,
920 unsigned long count, void *data)
925 if (count >= sizeof(dummy) || count == 0)
928 if (copy_from_user(dummy, ubuffer, count))
931 rc = sscanf(dummy, "%d", &kgnilnd_debug_peer_nid);
940 /* debug data to print from conns associated with peer nid
943 - mbox_addr (msg_buffer + mbox_offset)
962 kgnilnd_proc_peer_conns_read(char *page, char **start, off_t off,
963 int count, int *eof, void *data)
973 if (kgnilnd_debug_peer_nid == KGN_DEBUG_PEER_NID_DEFAULT) {
974 rc = sprintf(page, "peer_conns not initialized\n");
978 /* sample date/time stamp - print time in UTC
979 * 2012-12-11T16:06:16.966751 123@gni ...
981 getnstimeofday(&now);
982 time_to_tm(now.tv_sec, 0, &ctm);
985 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
986 peer = kgnilnd_find_peer_locked(kgnilnd_debug_peer_nid);
989 rc = sprintf(page, "peer not found for this nid %d\n",
990 kgnilnd_debug_peer_nid);
991 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
995 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
996 len += scnprintf(page, count - len,
997 "%04ld-%02d-%02dT%02d:%02d:%02d.%06ld %s "
1003 "err %d peer err %d "
1004 "tx sq %u %dms/%dms "
1005 "rx sq %u %dms/%dms/%dms "
1007 ctm.tm_year+1900, ctm.tm_mon+1, ctm.tm_mday,
1008 ctm.tm_hour, ctm.tm_min, ctm.tm_sec, now.tv_nsec,
1009 libcfs_nid2str(peer->gnp_nid),
1010 conn->remote_mbox_addr,
1011 kgnilnd_conn_dgram_type2str(conn->gnc_dgram_type),
1012 kgnilnd_conn_state2str(conn),
1013 conn->gnc_in_purgatory,
1014 conn->gnc_close_sent,
1015 conn->gnc_close_recvd,
1017 conn->gnc_peer_error,
1019 jiffies_to_msecs(jifs - conn->gnc_last_tx),
1020 jiffies_to_msecs(jifs - conn->gnc_last_tx_cq),
1022 jiffies_to_msecs(jifs - conn->gnc_first_rx),
1023 jiffies_to_msecs(jifs - conn->gnc_last_rx),
1024 jiffies_to_msecs(jifs - conn->gnc_last_rx_cq),
1025 conn->gnc_tx_retrans);
1028 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1033 kgnilnd_conn_seq_open(struct inode *inode, struct file *file)
1035 struct proc_dir_entry *dp = PDE(inode);
1036 struct seq_file *sf;
1039 rc = seq_open(file, &kgn_conn_sops);
1041 sf = file->private_data;
1042 sf->private = dp->data;
1048 static struct file_operations kgn_conn_fops = {
1049 .owner = THIS_MODULE,
1050 .open = kgnilnd_conn_seq_open,
1052 .llseek = seq_lseek,
1053 .release = seq_release,
1057 __u64 gpeer_version;
1058 struct list_head *gpeer_list;
1059 kgn_peer_t *gpeer_peer;
1062 } kgn_peer_seq_iter_t;
1065 kgnilnd_peer_seq_seek(kgn_peer_seq_iter_t *gseq, loff_t off)
1067 struct list_head *list, *tmp;
1072 gseq->gpeer_hashidx = 0;
1073 gseq->gpeer_list = NULL;
1076 if (off > atomic_read(&kgnilnd_data.kgn_npeers)) {
1077 gseq->gpeer_list = NULL;
1081 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1082 if (gseq->gpeer_list != NULL &&
1083 gseq->gpeer_version != kgnilnd_data.kgn_peer_version) {
1089 if ((gseq->gpeer_list == NULL) ||
1090 (gseq->gpeer_off > off) ||
1091 (gseq->gpeer_hashidx >= *kgnilnd_tunables.kgn_peer_hash_size)) {
1092 /* search from start */
1093 gseq->gpeer_hashidx = 0;
1094 list = &kgnilnd_data.kgn_peers[gseq->gpeer_hashidx];
1097 /* continue current search */
1098 list = gseq->gpeer_list;
1101 gseq->gpeer_version = kgnilnd_data.kgn_peer_version;
1102 gseq->gpeer_off = off;
1106 list_for_each(tmp, list) {
1109 peer = list_entry(tmp, kgn_peer_t, gnp_list);
1110 gseq->gpeer_peer = peer;
1116 /* if we got through this hash bucket with 'off' still to go, try next*/
1117 gseq->gpeer_hashidx++;
1118 if ((here <= off) &&
1119 (gseq->gpeer_hashidx < *kgnilnd_tunables.kgn_peer_hash_size)) {
1120 list = &kgnilnd_data.kgn_peers[gseq->gpeer_hashidx];
1124 gseq->gpeer_list = NULL;
1127 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1132 kgnilnd_peer_seq_start(struct seq_file *s, loff_t *pos)
1135 kgn_peer_seq_iter_t *gseq;
1138 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
1142 LIBCFS_ALLOC(gseq, sizeof(*gseq));
1144 CERROR("could not allocate peer sequence iterator\n");
1148 /* only doing device 0 for now */
1149 gseq->gpeer_list = NULL;
1150 rc = kgnilnd_peer_seq_seek(gseq, *pos);
1154 LIBCFS_FREE(gseq, sizeof(*gseq));
1159 kgnilnd_peer_seq_stop(struct seq_file *s, void *iter)
1161 kgn_peer_seq_iter_t *gseq = iter;
1164 LIBCFS_FREE(gseq, sizeof(*gseq));
1168 kgnilnd_peer_seq_next(struct seq_file *s, void *iter, loff_t *pos)
1170 kgn_peer_seq_iter_t *gseq = iter;
1172 loff_t next = *pos + 1;
1174 rc = kgnilnd_peer_seq_seek(gseq, next);
1176 LIBCFS_FREE(gseq, sizeof(*gseq));
1184 kgnilnd_peer_seq_show(struct seq_file *s, void *iter)
1186 kgn_peer_seq_iter_t *gseq = iter;
1191 /* there is no header data for peers, so offset 0 is the first
1194 peer = gseq->gpeer_peer;
1195 LASSERT(peer != NULL);
1197 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1198 if (gseq->gpeer_list != NULL &&
1199 gseq->gpeer_version != kgnilnd_data.kgn_peer_version) {
1201 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1205 /* instead of saving off the data, just refcount */
1206 kgnilnd_peer_addref(peer);
1207 conn = kgnilnd_find_conn_locked(peer);
1209 if (peer->gnp_connecting) {
1211 } else if (conn != NULL) {
1217 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1218 if (conn->gnc_in_purgatory) {
1223 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1225 seq_printf(s, "%p->%s [%d] %s NIC 0x%x q %d conn %c purg %d "
1226 "last %d@%dms dgram %d@%dms "
1227 "reconn %dms to %lus \n",
1228 peer, libcfs_nid2str(peer->gnp_nid),
1229 atomic_read(&peer->gnp_refcount),
1230 (peer->gnp_down == GNILND_RCA_NODE_DOWN) ? "down" : "up",
1232 kgnilnd_count_list(&peer->gnp_tx_queue),
1235 peer->gnp_last_errno,
1236 jiffies_to_msecs(jiffies - peer->gnp_last_alive),
1237 peer->gnp_last_dgram_errno,
1238 jiffies_to_msecs(jiffies - peer->gnp_last_dgram_time),
1239 peer->gnp_reconnect_interval != 0
1240 ? jiffies_to_msecs(jiffies - peer->gnp_reconnect_time)
1242 peer->gnp_reconnect_interval);
1244 kgnilnd_peer_decref(peer);
1249 static struct seq_operations kgn_peer_sops = {
1250 .start = kgnilnd_peer_seq_start,
1251 .stop = kgnilnd_peer_seq_stop,
1252 .next = kgnilnd_peer_seq_next,
1253 .show = kgnilnd_peer_seq_show,
1257 kgnilnd_peer_seq_open(struct inode *inode, struct file *file)
1259 struct proc_dir_entry *dp = PDE(inode);
1260 struct seq_file *sf;
1263 rc = seq_open(file, &kgn_peer_sops);
1265 sf = file->private_data;
1266 sf->private = dp->data;
1272 static struct file_operations kgn_peer_fops = {
1273 .owner = THIS_MODULE,
1274 .open = kgnilnd_peer_seq_open,
1276 .llseek = seq_lseek,
1277 .release = seq_release,
1280 static struct proc_dir_entry *kgn_proc_root;
1283 kgnilnd_proc_init(void)
1285 struct proc_dir_entry *pde;
1290 kgn_proc_root = proc_mkdir(libcfs_lnd2modname(GNILND), NULL);
1291 if (kgn_proc_root == NULL) {
1292 CERROR("couldn't create proc dir %s\n",
1293 libcfs_lnd2modname(GNILND));
1297 /* Initialize CKSUM_TEST */
1298 pde = create_proc_entry(GNILND_PROC_CKSUM_TEST, 0200, kgn_proc_root);
1300 CERROR("couldn't create proc entry %s\n", GNILND_PROC_CKSUM_TEST);
1301 GOTO(remove_dir, rc = -ENOENT);
1305 pde->write_proc = kgnilnd_proc_cksum_test_write;
1307 /* Initialize STATS */
1308 pde = create_proc_entry(GNILND_PROC_STATS, 0644, kgn_proc_root);
1310 CERROR("couldn't create proc entry %s\n", GNILND_PROC_STATS);
1311 GOTO(remove_test, rc = -ENOENT);
1315 pde->read_proc = kgnilnd_proc_stats_read;
1316 pde->write_proc = kgnilnd_proc_stats_write;
1318 /* Initialize MDD */
1319 pde = create_proc_entry(GNILND_PROC_MDD, 0444, kgn_proc_root);
1321 CERROR("couldn't create proc entry %s\n", GNILND_PROC_MDD);
1322 GOTO(remove_stats, rc = -ENOENT);
1326 pde->proc_fops = &kgn_mdd_fops;
1328 /* Initialize SMSG */
1329 pde = create_proc_entry(GNILND_PROC_SMSG, 0444, kgn_proc_root);
1331 CERROR("couldn't create proc entry %s\n", GNILND_PROC_SMSG);
1332 GOTO(remove_mdd, rc = -ENOENT);
1336 pde->proc_fops = &kgn_smsg_fops;
1338 /* Initialize CONN */
1339 pde = create_proc_entry(GNILND_PROC_CONN, 0444, kgn_proc_root);
1341 CERROR("couldn't create proc entry %s\n", GNILND_PROC_CONN);
1342 GOTO(remove_smsg, rc = -ENOENT);
1346 pde->proc_fops = &kgn_conn_fops;
1348 /* Initialize peer conns debug */
1349 pde = create_proc_entry(GNILND_PROC_PEER_CONNS, 0644, kgn_proc_root);
1351 CERROR("couldn't create proc entry %s\n", GNILND_PROC_PEER_CONNS);
1352 GOTO(remove_conn, rc = -ENOENT);
1356 pde->read_proc = kgnilnd_proc_peer_conns_read;
1357 pde->write_proc = kgnilnd_proc_peer_conns_write;
1359 /* Initialize PEER */
1360 pde = create_proc_entry(GNILND_PROC_PEER, 0444, kgn_proc_root);
1362 CERROR("couldn't create proc entry %s\n", GNILND_PROC_PEER);
1363 GOTO(remove_pc, rc = -ENOENT);
1367 pde->proc_fops = &kgn_peer_fops;
1371 remove_proc_entry(GNILND_PROC_PEER_CONNS, kgn_proc_root);
1373 remove_proc_entry(GNILND_PROC_CONN, kgn_proc_root);
1375 remove_proc_entry(GNILND_PROC_SMSG, kgn_proc_root);
1377 remove_proc_entry(GNILND_PROC_MDD, kgn_proc_root);
1379 remove_proc_entry(GNILND_PROC_STATS, kgn_proc_root);
1381 remove_proc_entry(GNILND_PROC_CKSUM_TEST, kgn_proc_root);
1383 remove_proc_entry(kgn_proc_root->name, NULL);
1389 kgnilnd_proc_fini(void)
1391 remove_proc_entry(GNILND_PROC_PEER_CONNS, kgn_proc_root);
1392 remove_proc_entry(GNILND_PROC_PEER, kgn_proc_root);
1393 remove_proc_entry(GNILND_PROC_CONN, kgn_proc_root);
1394 remove_proc_entry(GNILND_PROC_MDD, kgn_proc_root);
1395 remove_proc_entry(GNILND_PROC_SMSG, kgn_proc_root);
1396 remove_proc_entry(GNILND_PROC_STATS, kgn_proc_root);
1397 remove_proc_entry(GNILND_PROC_CKSUM_TEST, kgn_proc_root);
1398 remove_proc_entry(kgn_proc_root->name, NULL);