2 * Copyright (C) 2009-2012 Cray, Inc.
4 * Author: Nic Henke <nic@cray.com>
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 /* this code liberated and modified from lnet/lnet/router_proc.c */
24 #define DEBUG_SUBSYSTEM S_LND
26 #include <linux/seq_file.h>
28 #define GNILND_PROC_STATS "stats"
29 #define GNILND_PROC_MDD "mdd"
30 #define GNILND_PROC_SMSG "smsg"
31 #define GNILND_PROC_CONN "conn"
32 #define GNILND_PROC_PEER_CONNS "peer_conns"
33 #define GNILND_PROC_PEER "peer"
34 #define GNILND_PROC_CKSUM_TEST "cksum_test"
37 _kgnilnd_proc_run_cksum_test(int caseno, int nloops, int nob)
39 lnet_kiov_t *src, *dest;
40 struct timespec begin, end, diff;
47 LIBCFS_ALLOC(src, LNET_MAX_IOV * sizeof(lnet_kiov_t));
48 LIBCFS_ALLOC(dest, LNET_MAX_IOV * sizeof(lnet_kiov_t));
50 if (src == NULL || dest == NULL) {
51 CERROR("couldn't allocate iovs\n");
52 GOTO(unwind, rc = -ENOMEM);
55 for (i = 0; i < LNET_MAX_IOV; i++) {
56 src[i].kiov_offset = 0;
57 src[i].kiov_len = PAGE_SIZE;
58 src[i].kiov_page = alloc_page(__GFP_IO | __GFP_FS | __GFP_ZERO);
60 if (src[i].kiov_page == NULL) {
61 CERROR("couldn't allocate page %d\n", i);
62 GOTO(unwind, rc = -ENOMEM);
65 dest[i].kiov_offset = 0;
66 dest[i].kiov_len = PAGE_SIZE;
67 dest[i].kiov_page = alloc_page(__GFP_IO | __GFP_FS | __GFP_ZERO);
69 if (dest[i].kiov_page == NULL) {
70 CERROR("couldn't allocate page %d\n", i);
71 GOTO(unwind, rc = -ENOMEM);
75 /* add extra 2 pages - one for offset of src, 2nd to allow dest offset */
76 niov = (nob / PAGE_SIZE) + 2;
77 if (niov > LNET_MAX_IOV) {
78 CERROR("bytes %d too large, requires niov %d > %d\n",
79 nob, niov, LNET_MAX_IOV);
80 GOTO(unwind, rc = -E2BIG);
84 src[0].kiov_offset = 317;
85 dest[0].kiov_offset = 592;
92 dest[0].kiov_offset -= 1;
96 src[0].kiov_offset += 1;
100 src[0].kiov_offset += 1;
101 dest[0].kiov_offset -= 1;
103 src[0].kiov_len = PAGE_SIZE - src[0].kiov_offset;
104 dest[0].kiov_len = PAGE_SIZE - dest[0].kiov_offset;
106 for (i = 0; i < niov; i++) {
107 memset(page_address(src[i].kiov_page) + src[i].kiov_offset,
108 0xf0 + i, src[i].kiov_len);
111 lnet_copy_kiov2kiov(niov, dest, 0, niov, src, 0, nob);
113 getnstimeofday(&begin);
115 for (n = 0; n < nloops; n++) {
116 CDEBUG(D_BUFFS, "case %d loop %d src %d dest %d nob %d niov %d\n",
117 caseno, n, src[0].kiov_offset, dest[0].kiov_offset, nob, niov);
118 cksum = kgnilnd_cksum_kiov(niov, src, 0, nob - (n % nob), 1);
119 cksum2 = kgnilnd_cksum_kiov(niov, dest, 0, nob - (n % nob), 1);
121 if (cksum != cksum2) {
122 CERROR("case %d loop %d different checksums %x expected %x\n",
123 j, n, cksum2, cksum);
124 GOTO(unwind, rc = -ENOKEY);
128 getnstimeofday(&end);
130 mbytes = (nloops * nob * 2) / (1024*1024);
132 diff = kgnilnd_ts_sub(end, begin);
134 LCONSOLE_INFO("running "LPD64"MB took %ld.%ld seconds\n",
135 mbytes, diff.tv_sec, diff.tv_nsec);
138 CDEBUG(D_NET, "freeing %d pages\n", i);
139 for (i -= 1; i >= 0; i--) {
140 if (src[i].kiov_page != NULL) {
141 __free_page(src[i].kiov_page);
143 if (dest[i].kiov_page != NULL) {
144 __free_page(dest[i].kiov_page);
149 LIBCFS_FREE(src, LNET_MAX_IOV * sizeof(lnet_kiov_t));
151 LIBCFS_FREE(dest, LNET_MAX_IOV * sizeof(lnet_kiov_t));
156 kgnilnd_proc_cksum_test_write(struct file *file, const char *ubuffer,
157 unsigned long count, void *data)
159 char dummy[256 + 1] = { '\0' };
160 int testno, nloops, nbytes;
164 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
165 CERROR("can't run cksum test, kgnilnd is not initialized yet\n");
169 if (count >= sizeof(dummy) || count == 0)
172 if (copy_from_user(dummy, ubuffer, count))
175 if (sscanf(dummy, "%d:%d:%d", &testno, &nloops, &nbytes) == 3) {
176 rc = _kgnilnd_proc_run_cksum_test(testno, nloops, nbytes);
180 /* spurious, but lets us know the parse was ok */
188 kgnilnd_proc_stats_read(char *page, char **start, off_t off,
189 int count, int *eof, void *data)
195 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
197 "kgnilnd is not initialized yet\n");
201 /* only do the first device */
202 dev = &kgnilnd_data.kgn_devices[0];
204 /* sampling is racy, but so is reading this file! */
206 do_gettimeofday(&now);
208 rc = sprintf(page, "time: %lu.%lu\n"
217 "GART map bytes: %ld\n"
218 "TX queued maps: %d\n"
219 "TX phys nmaps: %d\n"
220 "TX phys bytes: %lu\n"
221 "TX virt nmaps: %d\n"
222 "TX virt bytes: "LPU64"\n"
223 "RDMAQ bytes_auth: %ld\n"
224 "RDMAQ bytes_left: %ld\n"
225 "RDMAQ nstalls: %d\n"
226 "dev mutex delay: %ld\n"
228 "dev n_schedule: %d\n"
229 "SMSG fast_try: %d\n"
231 "SMSG fast_block: %d\n"
233 "SMSG tx_bytes: %ld\n"
235 "SMSG rx_bytes: %ld\n"
237 "RDMA tx_bytes: %ld\n"
239 "RDMA rx_bytes: %ld\n"
243 "RDMA REV length: %d\n"
244 "RDMA REV offset: %d\n"
245 "RDMA REV copy: %d\n",
246 now.tv_sec, now.tv_usec,
247 atomic_read(&kgnilnd_data.kgn_ntx),
248 atomic_read(&kgnilnd_data.kgn_npeers),
249 atomic_read(&kgnilnd_data.kgn_nconns),
250 atomic_read(&dev->gnd_neps),
251 atomic_read(&dev->gnd_ndgrams),
252 atomic_read(&dev->gnd_nfmablk),
253 atomic_read(&dev->gnd_n_mdd), atomic_read(&dev->gnd_n_mdd_held),
254 atomic64_read(&dev->gnd_nbytes_map),
255 atomic_read(&dev->gnd_nq_map),
256 dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE,
257 dev->gnd_map_nvirt, dev->gnd_map_virtnob,
258 atomic64_read(&dev->gnd_rdmaq_bytes_out),
259 atomic64_read(&dev->gnd_rdmaq_bytes_ok),
260 atomic_read(&dev->gnd_rdmaq_nstalls),
261 dev->gnd_mutex_delay,
262 atomic_read(&dev->gnd_n_yield), atomic_read(&dev->gnd_n_schedule),
263 atomic_read(&dev->gnd_fast_try), atomic_read(&dev->gnd_fast_ok),
264 atomic_read(&dev->gnd_fast_block),
265 atomic_read(&dev->gnd_short_ntx), atomic64_read(&dev->gnd_short_txbytes),
266 atomic_read(&dev->gnd_short_nrx), atomic64_read(&dev->gnd_short_rxbytes),
267 atomic_read(&dev->gnd_rdma_ntx), atomic64_read(&dev->gnd_rdma_txbytes),
268 atomic_read(&dev->gnd_rdma_nrx), atomic64_read(&dev->gnd_rdma_rxbytes),
269 atomic_read(&kgnilnd_data.kgn_nvmap_short),
270 atomic_read(&kgnilnd_data.kgn_nvmap_cksum),
271 atomic_read(&kgnilnd_data.kgn_nkmap_short),
272 atomic_read(&kgnilnd_data.kgn_rev_length),
273 atomic_read(&kgnilnd_data.kgn_rev_offset),
274 atomic_read(&kgnilnd_data.kgn_rev_copy_buff));
280 kgnilnd_proc_stats_write(struct file *file, const char *ubuffer,
281 unsigned long count, void *data)
285 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
286 CERROR("kgnilnd is not initialized for stats write\n");
290 /* only do the first device */
291 dev = &kgnilnd_data.kgn_devices[0];
293 atomic_set(&dev->gnd_short_ntx, 0);
294 atomic_set(&dev->gnd_short_nrx, 0);
295 atomic64_set(&dev->gnd_short_txbytes, 0);
296 atomic64_set(&dev->gnd_short_rxbytes, 0);
297 atomic_set(&dev->gnd_rdma_ntx, 0);
298 atomic_set(&dev->gnd_rdma_nrx, 0);
299 atomic_set(&dev->gnd_fast_ok, 0);
300 atomic_set(&dev->gnd_fast_try, 0);
301 atomic_set(&dev->gnd_fast_block, 0);
302 atomic64_set(&dev->gnd_rdma_txbytes, 0);
303 atomic64_set(&dev->gnd_rdma_rxbytes, 0);
304 atomic_set(&dev->gnd_rdmaq_nstalls, 0);
305 set_mb(dev->gnd_mutex_delay, 0);
306 atomic_set(&dev->gnd_n_yield, 0);
307 atomic_set(&dev->gnd_n_schedule, 0);
308 atomic_set(&kgnilnd_data.kgn_nvmap_short, 0);
309 atomic_set(&kgnilnd_data.kgn_nvmap_cksum, 0);
310 atomic_set(&kgnilnd_data.kgn_nkmap_short, 0);
311 /* sampling is racy, but so is writing this file! */
317 kgn_device_t *gmdd_dev;
320 } kgn_mdd_seq_iter_t;
323 kgnilnd_mdd_seq_seek(kgn_mdd_seq_iter_t *gseq, loff_t off)
331 gseq->gmdd_tx = NULL;
338 if (tx == NULL || gseq->gmdd_off > off) {
339 /* search from start */
340 r = gseq->gmdd_dev->gnd_map_list.next;
343 /* continue current search */
344 r = &tx->tx_map_list;
345 here = gseq->gmdd_off;
348 gseq->gmdd_off = off;
350 while (r != &gseq->gmdd_dev->gnd_map_list) {
353 t = list_entry(r, kgn_tx_t, tx_map_list);
364 gseq->gmdd_tx = NULL;
371 kgnilnd_mdd_seq_start(struct seq_file *s, loff_t *pos)
374 kgn_mdd_seq_iter_t *gseq;
377 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
381 LIBCFS_ALLOC(gseq, sizeof(*gseq));
383 CERROR("could not allocate mdd sequence iterator\n");
387 /* only doing device 0 for now */
388 gseq->gmdd_dev = &kgnilnd_data.kgn_devices[0];
389 gseq->gmdd_tx = NULL;
391 /* need to lock map while we poke - huge disturbance
392 * but without it, no way to get the data printed */
393 spin_lock(&gseq->gmdd_dev->gnd_map_lock);
395 /* set private to gseq for stop */
398 rc = kgnilnd_mdd_seq_seek(gseq, *pos);
406 kgnilnd_mdd_seq_stop(struct seq_file *s, void *iter)
408 kgn_mdd_seq_iter_t *gseq = s->private;
411 spin_unlock(&gseq->gmdd_dev->gnd_map_lock);
412 LIBCFS_FREE(gseq, sizeof(*gseq));
417 kgnilnd_mdd_seq_next(struct seq_file *s, void *iter, loff_t *pos)
419 kgn_mdd_seq_iter_t *gseq = iter;
421 loff_t next = *pos + 1;
423 rc = kgnilnd_mdd_seq_seek(gseq, next);
432 kgnilnd_mdd_seq_show(struct seq_file *s, void *iter)
434 kgn_mdd_seq_iter_t *gseq = iter;
440 gni_mem_handle_t hndl;
442 if (gseq->gmdd_off == 0) {
443 seq_printf(s, "%s %22s %16s %8s %8s %37s\n",
444 "tx", "tx_id", "nob", "physnop",
445 "buftype", "mem handle");
452 id = tx->tx_id.txe_smsg_id;
454 physnop = tx->tx_phys_npages;
455 buftype = tx->tx_buftype;
456 hndl.qword1 = tx->tx_map_key.qword1;
457 hndl.qword2 = tx->tx_map_key.qword2;
459 seq_printf(s, "%p %x %16"LPF64"u %8d %#8x "LPX64"."LPX64"x\n",
460 tx, id, nob, physnop, buftype,
461 hndl.qword1, hndl.qword2);
466 static struct seq_operations kgn_mdd_sops = {
467 .start = kgnilnd_mdd_seq_start,
468 .stop = kgnilnd_mdd_seq_stop,
469 .next = kgnilnd_mdd_seq_next,
470 .show = kgnilnd_mdd_seq_show,
475 kgnilnd_mdd_seq_open(struct inode *inode, struct file *file)
480 rc = seq_open(file, &kgn_mdd_sops);
482 sf = file->private_data;
484 /* NULL means we've not yet open() */
490 static struct file_operations kgn_mdd_fops = {
491 .owner = THIS_MODULE,
492 .open = kgnilnd_mdd_seq_open,
495 .release = seq_release,
500 kgn_device_t *gsmsg_dev;
501 kgn_fma_memblock_t *gsmsg_fmablk;
503 } kgn_smsg_seq_iter_t;
506 kgnilnd_smsg_seq_seek(kgn_smsg_seq_iter_t *gseq, loff_t off)
508 kgn_fma_memblock_t *fmablk;
514 /* offset 0 is the header, so we start real entries at
515 * here == off == 1 */
517 gseq->gsmsg_fmablk = NULL;
522 fmablk = gseq->gsmsg_fmablk;
523 dev = gseq->gsmsg_dev;
525 spin_lock(&dev->gnd_fmablk_lock);
527 if (fmablk != NULL &&
528 gseq->gsmsg_version != atomic_read(&dev->gnd_fmablk_vers)) {
534 if (fmablk == NULL || gseq->gsmsg_off > off) {
535 /* search from start */
536 r = dev->gnd_fma_buffs.next;
539 /* continue current search */
540 r = &fmablk->gnm_bufflist;
541 here = gseq->gsmsg_off;
544 gseq->gsmsg_version = atomic_read(&dev->gnd_fmablk_vers);
545 gseq->gsmsg_off = off;
547 while (r != &dev->gnd_fma_buffs) {
548 kgn_fma_memblock_t *t;
550 t = list_entry(r, kgn_fma_memblock_t, gnm_bufflist);
553 gseq->gsmsg_fmablk = t;
561 gseq->gsmsg_fmablk = NULL;
564 spin_unlock(&dev->gnd_fmablk_lock);
569 kgnilnd_smsg_seq_start(struct seq_file *s, loff_t *pos)
572 kgn_smsg_seq_iter_t *gseq;
575 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
579 LIBCFS_ALLOC(gseq, sizeof(*gseq));
581 CERROR("could not allocate smsg sequence iterator\n");
585 /* only doing device 0 for now */
586 gseq->gsmsg_dev = &kgnilnd_data.kgn_devices[0];
587 gseq->gsmsg_fmablk = NULL;
588 rc = kgnilnd_smsg_seq_seek(gseq, *pos);
592 LIBCFS_FREE(gseq, sizeof(*gseq));
597 kgnilnd_smsg_seq_stop(struct seq_file *s, void *iter)
599 kgn_smsg_seq_iter_t *gseq = iter;
602 LIBCFS_FREE(gseq, sizeof(*gseq));
606 kgnilnd_smsg_seq_next(struct seq_file *s, void *iter, loff_t *pos)
608 kgn_smsg_seq_iter_t *gseq = iter;
610 loff_t next = *pos + 1;
612 rc = kgnilnd_smsg_seq_seek(gseq, next);
614 LIBCFS_FREE(gseq, sizeof(*gseq));
622 kgnilnd_smsg_seq_show(struct seq_file *s, void *iter)
624 kgn_smsg_seq_iter_t *gseq = iter;
625 kgn_fma_memblock_t *fmablk;
627 int avail_mboxs, held_mboxs, num_mboxs;
628 unsigned int blk_size;
630 kgn_fmablk_state_t state;
631 gni_mem_handle_t hndl;
633 if (gseq->gsmsg_off == 0) {
634 seq_printf(s, "%5s %4s %6s/%5s/%5s %9s %18s %37s\n",
635 "blk#", "type", "avail", "held", "total", "size",
636 "fmablk", "mem handle");
640 fmablk = gseq->gsmsg_fmablk;
641 dev = gseq->gsmsg_dev;
642 LASSERT(fmablk != NULL);
644 spin_lock(&dev->gnd_fmablk_lock);
646 if (gseq->gsmsg_version != atomic_read(&dev->gnd_fmablk_vers)) {
648 spin_unlock(&dev->gnd_fmablk_lock);
652 live = fmablk->gnm_hold_timeout == 0;
653 /* none are available if it isn't live... */
654 avail_mboxs = live ? fmablk->gnm_avail_mboxs : 0;
655 held_mboxs = fmablk->gnm_held_mboxs;
656 num_mboxs = fmablk->gnm_num_mboxs;
657 blk_size = fmablk->gnm_blk_size;
658 state = fmablk->gnm_state;
659 hndl.qword1 = fmablk->gnm_hndl.qword1;
660 hndl.qword2 = fmablk->gnm_hndl.qword2;
662 spin_unlock(&dev->gnd_fmablk_lock);
665 seq_printf(s, "%5d %4s %6d/%5d/%5d %9d %18p "LPX64"."LPX64"\n",
666 (int) gseq->gsmsg_off, kgnilnd_fmablk_state2str(state),
667 avail_mboxs, held_mboxs, num_mboxs, blk_size,
668 fmablk, hndl.qword1, hndl.qword2);
670 seq_printf(s, "%5d %4s %6d/%5d/%5d %9d %18p %37s\n",
671 (int) gseq->gsmsg_off, kgnilnd_fmablk_state2str(state),
672 avail_mboxs, held_mboxs, num_mboxs, blk_size,
673 fmablk, "PURGATORY.HOLD");
679 static struct seq_operations kgn_smsg_sops = {
680 .start = kgnilnd_smsg_seq_start,
681 .stop = kgnilnd_smsg_seq_stop,
682 .next = kgnilnd_smsg_seq_next,
683 .show = kgnilnd_smsg_seq_show,
688 kgnilnd_smsg_seq_open(struct inode *inode, struct file *file)
690 struct proc_dir_entry *dp = PDE(inode);
694 rc = seq_open(file, &kgn_smsg_sops);
696 sf = file->private_data;
697 sf->private = dp->data;
703 static struct file_operations kgn_smsg_fops = {
704 .owner = THIS_MODULE,
705 .open = kgnilnd_smsg_seq_open,
708 .release = seq_release,
713 struct list_head *gconn_list;
714 kgn_conn_t *gconn_conn;
717 } kgn_conn_seq_iter_t;
720 kgnilnd_conn_seq_seek(kgn_conn_seq_iter_t *gseq, loff_t off)
722 struct list_head *list, *tmp;
727 gseq->gconn_hashidx = 0;
728 gseq->gconn_list = NULL;
731 if (off > atomic_read(&kgnilnd_data.kgn_nconns)) {
732 gseq->gconn_list = NULL;
736 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
737 if (gseq->gconn_list != NULL &&
738 gseq->gconn_version != kgnilnd_data.kgn_conn_version) {
744 if ((gseq->gconn_list == NULL) ||
745 (gseq->gconn_off > off) ||
746 (gseq->gconn_hashidx >= *kgnilnd_tunables.kgn_peer_hash_size)) {
747 /* search from start */
748 gseq->gconn_hashidx = 0;
749 list = &kgnilnd_data.kgn_conns[gseq->gconn_hashidx];
752 /* continue current search */
753 list = gseq->gconn_list;
756 gseq->gconn_version = kgnilnd_data.kgn_conn_version;
757 gseq->gconn_off = off;
761 list_for_each(tmp, list) {
764 conn = list_entry(tmp, kgn_conn_t, gnc_hashlist);
765 gseq->gconn_conn = conn;
771 /* if we got through this hash bucket with 'off' still to go, try next*/
772 gseq->gconn_hashidx++;
774 (gseq->gconn_hashidx < *kgnilnd_tunables.kgn_peer_hash_size)) {
775 list = &kgnilnd_data.kgn_conns[gseq->gconn_hashidx];
779 gseq->gconn_list = NULL;
782 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
787 kgnilnd_conn_seq_start(struct seq_file *s, loff_t *pos)
790 kgn_conn_seq_iter_t *gseq;
793 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
797 LIBCFS_ALLOC(gseq, sizeof(*gseq));
799 CERROR("could not allocate conn sequence iterator\n");
803 /* only doing device 0 for now */
804 gseq->gconn_list = NULL;
805 rc = kgnilnd_conn_seq_seek(gseq, *pos);
809 LIBCFS_FREE(gseq, sizeof(*gseq));
814 kgnilnd_conn_seq_stop(struct seq_file *s, void *iter)
816 kgn_conn_seq_iter_t *gseq = iter;
819 LIBCFS_FREE(gseq, sizeof(*gseq));
823 kgnilnd_conn_seq_next(struct seq_file *s, void *iter, loff_t *pos)
825 kgn_conn_seq_iter_t *gseq = iter;
827 loff_t next = *pos + 1;
829 rc = kgnilnd_conn_seq_seek(gseq, next);
831 LIBCFS_FREE(gseq, sizeof(*gseq));
839 kgnilnd_conn_seq_show(struct seq_file *s, void *iter)
841 kgn_conn_seq_iter_t *gseq = iter;
842 kgn_peer_t *peer = NULL;
845 /* there is no header data for conns, so offset 0 is the first
848 conn = gseq->gconn_conn;
849 LASSERT(conn != NULL);
851 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
852 if (gseq->gconn_list != NULL &&
853 gseq->gconn_version != kgnilnd_data.kgn_conn_version) {
855 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
859 /* instead of saving off the data, just refcount */
860 kgnilnd_conn_addref(conn);
861 if (conn->gnc_peer) {
862 /* don't use link - after unlock it could get nuked */
863 peer = conn->gnc_peer;
864 kgnilnd_peer_addref(peer);
867 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
869 seq_printf(s, "%p->%s [%d] q %d/%d/%d "
870 "tx sq %u %dms/%dms "
871 "rx sq %u %dms/%dms "
872 "noop r/s %d/%d w/s/cq %lds/%lds/%lds "
873 "sched a/d %lds/%lds "
874 "tx_re "LPD64" TO %ds %s\n",
875 conn, peer ? libcfs_nid2str(peer->gnp_nid) : "<?>",
876 atomic_read(&conn->gnc_refcount),
877 kgnilnd_count_list(&conn->gnc_fmaq),
878 atomic_read(&conn->gnc_nlive_fma),
879 atomic_read(&conn->gnc_nlive_rdma),
881 jiffies_to_msecs(jiffies - conn->gnc_last_tx),
882 jiffies_to_msecs(jiffies - conn->gnc_last_tx_cq),
884 jiffies_to_msecs(jiffies - conn->gnc_last_rx),
885 jiffies_to_msecs(jiffies - conn->gnc_last_rx_cq),
886 atomic_read(&conn->gnc_reaper_noop),
887 atomic_read(&conn->gnc_sched_noop),
888 cfs_duration_sec(jiffies - conn->gnc_last_noop_want),
889 cfs_duration_sec(jiffies - conn->gnc_last_noop_sent),
890 cfs_duration_sec(jiffies - conn->gnc_last_noop_cq),
891 cfs_duration_sec(jiffies - conn->gnc_last_sched_ask),
892 cfs_duration_sec(jiffies - conn->gnc_last_sched_do),
893 conn->gnc_tx_retrans, conn->gnc_timeout,
894 kgnilnd_conn_state2str(conn));
897 kgnilnd_peer_decref(peer);
898 kgnilnd_conn_decref(conn);
903 static struct seq_operations kgn_conn_sops = {
904 .start = kgnilnd_conn_seq_start,
905 .stop = kgnilnd_conn_seq_stop,
906 .next = kgnilnd_conn_seq_next,
907 .show = kgnilnd_conn_seq_show,
911 #define KGN_DEBUG_PEER_NID_DEFAULT -1
912 static int kgnilnd_debug_peer_nid = KGN_DEBUG_PEER_NID_DEFAULT;
915 kgnilnd_proc_peer_conns_write(struct file *file, const char *ubuffer,
916 unsigned long count, void *data)
921 if (count >= sizeof(dummy) || count == 0)
924 if (copy_from_user(dummy, ubuffer, count))
927 rc = sscanf(dummy, "%d", &kgnilnd_debug_peer_nid);
936 /* debug data to print from conns associated with peer nid
939 - mbox_addr (msg_buffer + mbox_offset)
958 kgnilnd_proc_peer_conns_read(char *page, char **start, off_t off,
959 int count, int *eof, void *data)
969 if (kgnilnd_debug_peer_nid == KGN_DEBUG_PEER_NID_DEFAULT) {
970 rc = sprintf(page, "peer_conns not initialized\n");
974 /* sample date/time stamp - print time in UTC
975 * 2012-12-11T16:06:16.966751 123@gni ...
977 getnstimeofday(&now);
978 time_to_tm(now.tv_sec, 0, &ctm);
981 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
982 peer = kgnilnd_find_peer_locked(kgnilnd_debug_peer_nid);
985 rc = sprintf(page, "peer not found for this nid %d\n",
986 kgnilnd_debug_peer_nid);
987 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
991 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
992 len += scnprintf(page, count - len,
993 "%04ld-%02d-%02dT%02d:%02d:%02d.%06ld %s "
999 "err %d peer err %d "
1000 "tx sq %u %dms/%dms "
1001 "rx sq %u %dms/%dms/%dms "
1003 ctm.tm_year+1900, ctm.tm_mon+1, ctm.tm_mday,
1004 ctm.tm_hour, ctm.tm_min, ctm.tm_sec, now.tv_nsec,
1005 libcfs_nid2str(peer->gnp_nid),
1006 conn->remote_mbox_addr,
1007 kgnilnd_conn_dgram_type2str(conn->gnc_dgram_type),
1008 kgnilnd_conn_state2str(conn),
1009 conn->gnc_in_purgatory,
1010 conn->gnc_close_sent,
1011 conn->gnc_close_recvd,
1013 conn->gnc_peer_error,
1015 jiffies_to_msecs(jifs - conn->gnc_last_tx),
1016 jiffies_to_msecs(jifs - conn->gnc_last_tx_cq),
1018 jiffies_to_msecs(jifs - conn->gnc_first_rx),
1019 jiffies_to_msecs(jifs - conn->gnc_last_rx),
1020 jiffies_to_msecs(jifs - conn->gnc_last_rx_cq),
1021 conn->gnc_tx_retrans);
1024 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1029 kgnilnd_conn_seq_open(struct inode *inode, struct file *file)
1031 struct proc_dir_entry *dp = PDE(inode);
1032 struct seq_file *sf;
1035 rc = seq_open(file, &kgn_conn_sops);
1037 sf = file->private_data;
1038 sf->private = dp->data;
1044 static struct file_operations kgn_conn_fops = {
1045 .owner = THIS_MODULE,
1046 .open = kgnilnd_conn_seq_open,
1048 .llseek = seq_lseek,
1049 .release = seq_release,
1053 __u64 gpeer_version;
1054 struct list_head *gpeer_list;
1055 kgn_peer_t *gpeer_peer;
1058 } kgn_peer_seq_iter_t;
1061 kgnilnd_peer_seq_seek(kgn_peer_seq_iter_t *gseq, loff_t off)
1063 struct list_head *list, *tmp;
1068 gseq->gpeer_hashidx = 0;
1069 gseq->gpeer_list = NULL;
1072 if (off > atomic_read(&kgnilnd_data.kgn_npeers)) {
1073 gseq->gpeer_list = NULL;
1077 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1078 if (gseq->gpeer_list != NULL &&
1079 gseq->gpeer_version != kgnilnd_data.kgn_peer_version) {
1085 if ((gseq->gpeer_list == NULL) ||
1086 (gseq->gpeer_off > off) ||
1087 (gseq->gpeer_hashidx >= *kgnilnd_tunables.kgn_peer_hash_size)) {
1088 /* search from start */
1089 gseq->gpeer_hashidx = 0;
1090 list = &kgnilnd_data.kgn_peers[gseq->gpeer_hashidx];
1093 /* continue current search */
1094 list = gseq->gpeer_list;
1097 gseq->gpeer_version = kgnilnd_data.kgn_peer_version;
1098 gseq->gpeer_off = off;
1102 list_for_each(tmp, list) {
1105 peer = list_entry(tmp, kgn_peer_t, gnp_list);
1106 gseq->gpeer_peer = peer;
1112 /* if we got through this hash bucket with 'off' still to go, try next*/
1113 gseq->gpeer_hashidx++;
1114 if ((here <= off) &&
1115 (gseq->gpeer_hashidx < *kgnilnd_tunables.kgn_peer_hash_size)) {
1116 list = &kgnilnd_data.kgn_peers[gseq->gpeer_hashidx];
1120 gseq->gpeer_list = NULL;
1123 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1128 kgnilnd_peer_seq_start(struct seq_file *s, loff_t *pos)
1131 kgn_peer_seq_iter_t *gseq;
1134 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
1138 LIBCFS_ALLOC(gseq, sizeof(*gseq));
1140 CERROR("could not allocate peer sequence iterator\n");
1144 /* only doing device 0 for now */
1145 gseq->gpeer_list = NULL;
1146 rc = kgnilnd_peer_seq_seek(gseq, *pos);
1150 LIBCFS_FREE(gseq, sizeof(*gseq));
1155 kgnilnd_peer_seq_stop(struct seq_file *s, void *iter)
1157 kgn_peer_seq_iter_t *gseq = iter;
1160 LIBCFS_FREE(gseq, sizeof(*gseq));
1164 kgnilnd_peer_seq_next(struct seq_file *s, void *iter, loff_t *pos)
1166 kgn_peer_seq_iter_t *gseq = iter;
1168 loff_t next = *pos + 1;
1170 rc = kgnilnd_peer_seq_seek(gseq, next);
1172 LIBCFS_FREE(gseq, sizeof(*gseq));
1180 kgnilnd_peer_seq_show(struct seq_file *s, void *iter)
1182 kgn_peer_seq_iter_t *gseq = iter;
1187 /* there is no header data for peers, so offset 0 is the first
1190 peer = gseq->gpeer_peer;
1191 LASSERT(peer != NULL);
1193 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1194 if (gseq->gpeer_list != NULL &&
1195 gseq->gpeer_version != kgnilnd_data.kgn_peer_version) {
1197 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1201 /* instead of saving off the data, just refcount */
1202 kgnilnd_peer_addref(peer);
1203 conn = kgnilnd_find_conn_locked(peer);
1205 if (peer->gnp_connecting) {
1207 } else if (conn != NULL) {
1213 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1214 if (conn->gnc_in_purgatory) {
1219 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1221 seq_printf(s, "%p->%s [%d] %s NIC 0x%x q %d conn %c purg %d "
1222 "last %d@%dms dgram %d@%dms "
1223 "reconn %dms to %lus \n",
1224 peer, libcfs_nid2str(peer->gnp_nid),
1225 atomic_read(&peer->gnp_refcount),
1226 (peer->gnp_down == GNILND_RCA_NODE_DOWN) ? "down" : "up",
1228 kgnilnd_count_list(&peer->gnp_tx_queue),
1231 peer->gnp_last_errno,
1232 jiffies_to_msecs(jiffies - peer->gnp_last_alive),
1233 peer->gnp_last_dgram_errno,
1234 jiffies_to_msecs(jiffies - peer->gnp_last_dgram_time),
1235 peer->gnp_reconnect_interval != 0
1236 ? jiffies_to_msecs(jiffies - peer->gnp_reconnect_time)
1238 peer->gnp_reconnect_interval);
1240 kgnilnd_peer_decref(peer);
1245 static struct seq_operations kgn_peer_sops = {
1246 .start = kgnilnd_peer_seq_start,
1247 .stop = kgnilnd_peer_seq_stop,
1248 .next = kgnilnd_peer_seq_next,
1249 .show = kgnilnd_peer_seq_show,
1253 kgnilnd_peer_seq_open(struct inode *inode, struct file *file)
1255 struct proc_dir_entry *dp = PDE(inode);
1256 struct seq_file *sf;
1259 rc = seq_open(file, &kgn_peer_sops);
1261 sf = file->private_data;
1262 sf->private = dp->data;
1268 static struct file_operations kgn_peer_fops = {
1269 .owner = THIS_MODULE,
1270 .open = kgnilnd_peer_seq_open,
1272 .llseek = seq_lseek,
1273 .release = seq_release,
1276 static struct proc_dir_entry *kgn_proc_root;
1279 kgnilnd_proc_init(void)
1281 struct proc_dir_entry *pde;
1286 kgn_proc_root = proc_mkdir(libcfs_lnd2modname(GNILND), NULL);
1287 if (kgn_proc_root == NULL) {
1288 CERROR("couldn't create proc dir %s\n",
1289 libcfs_lnd2modname(GNILND));
1293 /* Initialize CKSUM_TEST */
1294 pde = create_proc_entry(GNILND_PROC_CKSUM_TEST, 0200, kgn_proc_root);
1296 CERROR("couldn't create proc entry %s\n", GNILND_PROC_CKSUM_TEST);
1297 GOTO(remove_dir, rc = -ENOENT);
1301 pde->write_proc = kgnilnd_proc_cksum_test_write;
1303 /* Initialize STATS */
1304 pde = create_proc_entry(GNILND_PROC_STATS, 0644, kgn_proc_root);
1306 CERROR("couldn't create proc entry %s\n", GNILND_PROC_STATS);
1307 GOTO(remove_test, rc = -ENOENT);
1311 pde->read_proc = kgnilnd_proc_stats_read;
1312 pde->write_proc = kgnilnd_proc_stats_write;
1314 /* Initialize MDD */
1315 pde = create_proc_entry(GNILND_PROC_MDD, 0444, kgn_proc_root);
1317 CERROR("couldn't create proc entry %s\n", GNILND_PROC_MDD);
1318 GOTO(remove_stats, rc = -ENOENT);
1322 pde->proc_fops = &kgn_mdd_fops;
1324 /* Initialize SMSG */
1325 pde = create_proc_entry(GNILND_PROC_SMSG, 0444, kgn_proc_root);
1327 CERROR("couldn't create proc entry %s\n", GNILND_PROC_SMSG);
1328 GOTO(remove_mdd, rc = -ENOENT);
1332 pde->proc_fops = &kgn_smsg_fops;
1334 /* Initialize CONN */
1335 pde = create_proc_entry(GNILND_PROC_CONN, 0444, kgn_proc_root);
1337 CERROR("couldn't create proc entry %s\n", GNILND_PROC_CONN);
1338 GOTO(remove_smsg, rc = -ENOENT);
1342 pde->proc_fops = &kgn_conn_fops;
1344 /* Initialize peer conns debug */
1345 pde = create_proc_entry(GNILND_PROC_PEER_CONNS, 0644, kgn_proc_root);
1347 CERROR("couldn't create proc entry %s\n", GNILND_PROC_PEER_CONNS);
1348 GOTO(remove_conn, rc = -ENOENT);
1352 pde->read_proc = kgnilnd_proc_peer_conns_read;
1353 pde->write_proc = kgnilnd_proc_peer_conns_write;
1355 /* Initialize PEER */
1356 pde = create_proc_entry(GNILND_PROC_PEER, 0444, kgn_proc_root);
1358 CERROR("couldn't create proc entry %s\n", GNILND_PROC_PEER);
1359 GOTO(remove_pc, rc = -ENOENT);
1363 pde->proc_fops = &kgn_peer_fops;
1367 remove_proc_entry(GNILND_PROC_PEER_CONNS, kgn_proc_root);
1369 remove_proc_entry(GNILND_PROC_CONN, kgn_proc_root);
1371 remove_proc_entry(GNILND_PROC_SMSG, kgn_proc_root);
1373 remove_proc_entry(GNILND_PROC_MDD, kgn_proc_root);
1375 remove_proc_entry(GNILND_PROC_STATS, kgn_proc_root);
1377 remove_proc_entry(GNILND_PROC_CKSUM_TEST, kgn_proc_root);
1379 remove_proc_entry(kgn_proc_root->name, NULL);
1385 kgnilnd_proc_fini(void)
1387 remove_proc_entry(GNILND_PROC_PEER_CONNS, kgn_proc_root);
1388 remove_proc_entry(GNILND_PROC_PEER, kgn_proc_root);
1389 remove_proc_entry(GNILND_PROC_CONN, kgn_proc_root);
1390 remove_proc_entry(GNILND_PROC_MDD, kgn_proc_root);
1391 remove_proc_entry(GNILND_PROC_SMSG, kgn_proc_root);
1392 remove_proc_entry(GNILND_PROC_STATS, kgn_proc_root);
1393 remove_proc_entry(GNILND_PROC_CKSUM_TEST, kgn_proc_root);
1394 remove_proc_entry(kgn_proc_root->name, NULL);