4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 #define DEBUG_SUBSYSTEM S_CLASS
33 #include <linux/version.h>
34 #include <asm/statfs.h>
35 #include <obd_cksum.h>
36 #include <obd_class.h>
37 #include <lprocfs_status.h>
38 #include <linux/seq_file.h>
39 #include <lustre_osc.h>
41 #include "osc_internal.h"
43 static ssize_t active_show(struct kobject *kobj, struct attribute *attr,
46 struct obd_device *obd = container_of(kobj, struct obd_device,
48 struct obd_import *imp;
51 with_imp_locked(obd, imp, rc)
52 rc = sprintf(buf, "%d\n", !imp->imp_deactive);
57 static ssize_t active_store(struct kobject *kobj, struct attribute *attr,
58 const char *buffer, size_t count)
60 struct obd_device *obd = container_of(kobj, struct obd_device,
62 struct obd_import *imp, *imp0;
66 rc = kstrtobool(buffer, &val);
70 with_imp_locked(obd, imp0, rc)
71 imp = class_import_get(imp0);
75 if (imp->imp_deactive == val)
76 rc = ptlrpc_set_import_active(imp, val);
78 CDEBUG(D_CONFIG, "activate %u: ignoring repeat request\n",
80 class_import_put(imp);
84 LUSTRE_RW_ATTR(active);
86 static ssize_t max_rpcs_in_flight_show(struct kobject *kobj,
87 struct attribute *attr,
90 struct obd_device *obd = container_of(kobj, struct obd_device,
92 struct client_obd *cli = &obd->u.cli;
94 return scnprintf(buf, PAGE_SIZE, "%u\n", cli->cl_max_rpcs_in_flight);
97 static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
98 struct attribute *attr,
102 struct obd_device *obd = container_of(kobj, struct obd_device,
104 struct client_obd *cli = &obd->u.cli;
105 int adding, added, req_count;
109 rc = kstrtouint(buffer, 0, &val);
113 if (val == 0 || val > OSC_MAX_RIF_MAX)
116 adding = (int)val - cli->cl_max_rpcs_in_flight;
117 req_count = atomic_read(&osc_pool_req_count);
118 if (adding > 0 && req_count < osc_reqpool_maxreqcount) {
120 * There might be some race which will cause over-limit
121 * allocation, but it is fine.
123 if (req_count + adding > osc_reqpool_maxreqcount)
124 adding = osc_reqpool_maxreqcount - req_count;
126 added = osc_rq_pool->prp_populate(osc_rq_pool, adding);
127 atomic_add(added, &osc_pool_req_count);
130 spin_lock(&cli->cl_loi_list_lock);
131 cli->cl_max_rpcs_in_flight = val;
132 client_adjust_max_dirty(cli);
133 spin_unlock(&cli->cl_loi_list_lock);
137 LUSTRE_RW_ATTR(max_rpcs_in_flight);
139 static ssize_t max_dirty_mb_show(struct kobject *kobj,
140 struct attribute *attr,
143 struct obd_device *obd = container_of(kobj, struct obd_device,
145 struct client_obd *cli = &obd->u.cli;
147 return scnprintf(buf, PAGE_SIZE, "%lu\n",
148 PAGES_TO_MiB(cli->cl_dirty_max_pages));
151 static ssize_t max_dirty_mb_store(struct kobject *kobj,
152 struct attribute *attr,
156 struct obd_device *obd = container_of(kobj, struct obd_device,
158 struct client_obd *cli = &obd->u.cli;
159 unsigned long pages_number, max_dirty_mb;
162 rc = kstrtoul(buffer, 10, &max_dirty_mb);
166 pages_number = MiB_TO_PAGES(max_dirty_mb);
168 if (pages_number >= MiB_TO_PAGES(OSC_MAX_DIRTY_MB_MAX) ||
169 pages_number > cfs_totalram_pages() / 4) /* 1/4 of RAM */
172 spin_lock(&cli->cl_loi_list_lock);
173 cli->cl_dirty_max_pages = pages_number;
174 osc_wake_cache_waiters(cli);
175 spin_unlock(&cli->cl_loi_list_lock);
179 LUSTRE_RW_ATTR(max_dirty_mb);
181 LUSTRE_ATTR(ost_conn_uuid, 0444, conn_uuid_show, NULL);
182 LUSTRE_RO_ATTR(conn_uuid);
184 LUSTRE_RW_ATTR(ping);
186 static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
188 struct obd_device *obd = m->private;
189 struct client_obd *cli = &obd->u.cli;
190 int shift = 20 - PAGE_SHIFT;
192 seq_printf(m, "used_mb: %ld\n"
195 (atomic_long_read(&cli->cl_lru_in_list) +
196 atomic_long_read(&cli->cl_lru_busy)) >> shift,
197 atomic_long_read(&cli->cl_lru_busy),
198 cli->cl_lru_reclaim);
203 /* shrink the number of caching pages to a specific number */
204 static ssize_t osc_cached_mb_seq_write(struct file *file,
205 const char __user *buffer,
206 size_t count, loff_t *off)
208 struct seq_file *m = file->private_data;
209 struct obd_device *obd = m->private;
210 struct client_obd *cli = &obd->u.cli;
216 if (count >= sizeof(kernbuf))
219 if (copy_from_user(kernbuf, buffer, count))
223 tmp = lprocfs_find_named_value(kernbuf, "used_mb:", &count);
224 rc = sysfs_memparse(tmp, count, &pages_number, "MiB");
228 pages_number >>= PAGE_SHIFT;
230 rc = atomic_long_read(&cli->cl_lru_in_list) - pages_number;
235 env = cl_env_get(&refcheck);
237 (void)osc_lru_shrink(env, cli, rc, true);
238 cl_env_put(env, &refcheck);
245 LPROC_SEQ_FOPS(osc_cached_mb);
247 static ssize_t cur_dirty_bytes_show(struct kobject *kobj,
248 struct attribute *attr,
251 struct obd_device *obd = container_of(kobj, struct obd_device,
253 struct client_obd *cli = &obd->u.cli;
255 return scnprintf(buf, PAGE_SIZE, "%lu\n",
256 cli->cl_dirty_pages << PAGE_SHIFT);
258 LUSTRE_RO_ATTR(cur_dirty_bytes);
260 static int osc_cur_grant_bytes_seq_show(struct seq_file *m, void *v)
262 struct obd_device *obd = m->private;
263 struct client_obd *cli = &obd->u.cli;
265 seq_printf(m, "%lu\n", cli->cl_avail_grant);
269 static ssize_t osc_cur_grant_bytes_seq_write(struct file *file,
270 const char __user *buffer,
271 size_t count, loff_t *off)
273 struct seq_file *m = file->private_data;
274 struct obd_device *obd = m->private;
275 struct client_obd *cli = &obd->u.cli;
276 struct obd_import *imp;
277 char kernbuf[22] = "";
284 if (count >= sizeof(kernbuf))
287 if (copy_from_user(kernbuf, buffer, count))
291 rc = sysfs_memparse(kernbuf, count, &val, "MiB");
295 /* this is only for shrinking grant */
296 if (val >= cli->cl_avail_grant)
299 with_imp_locked(obd, imp, rc)
300 if (imp->imp_state == LUSTRE_IMP_FULL)
301 rc = osc_shrink_grant_to_target(cli, val);
303 return rc ? rc : count;
305 LPROC_SEQ_FOPS(osc_cur_grant_bytes);
307 static ssize_t cur_lost_grant_bytes_show(struct kobject *kobj,
308 struct attribute *attr,
311 struct obd_device *obd = container_of(kobj, struct obd_device,
313 struct client_obd *cli = &obd->u.cli;
315 return scnprintf(buf, PAGE_SIZE, "%lu\n", cli->cl_lost_grant);
317 LUSTRE_RO_ATTR(cur_lost_grant_bytes);
319 static ssize_t cur_dirty_grant_bytes_show(struct kobject *kobj,
320 struct attribute *attr,
323 struct obd_device *obd = container_of(kobj, struct obd_device,
325 struct client_obd *cli = &obd->u.cli;
327 return scnprintf(buf, PAGE_SIZE, "%lu\n", cli->cl_dirty_grant);
329 LUSTRE_RO_ATTR(cur_dirty_grant_bytes);
331 static ssize_t grant_shrink_interval_show(struct kobject *kobj,
332 struct attribute *attr,
335 struct obd_device *obd = container_of(kobj, struct obd_device,
338 return sprintf(buf, "%lld\n", obd->u.cli.cl_grant_shrink_interval);
341 static ssize_t grant_shrink_interval_store(struct kobject *kobj,
342 struct attribute *attr,
346 struct obd_device *obd = container_of(kobj, struct obd_device,
351 rc = kstrtouint(buffer, 0, &val);
358 obd->u.cli.cl_grant_shrink_interval = val;
359 osc_update_next_shrink(&obd->u.cli);
360 osc_schedule_grant_work();
364 LUSTRE_RW_ATTR(grant_shrink_interval);
366 static ssize_t checksums_show(struct kobject *kobj,
367 struct attribute *attr,
370 struct obd_device *obd = container_of(kobj, struct obd_device,
373 return scnprintf(buf, PAGE_SIZE, "%d\n", !!obd->u.cli.cl_checksum);
376 static ssize_t checksums_store(struct kobject *kobj,
377 struct attribute *attr,
381 struct obd_device *obd = container_of(kobj, struct obd_device,
386 rc = kstrtobool(buffer, &val);
390 obd->u.cli.cl_checksum = val;
394 LUSTRE_RW_ATTR(checksums);
398 static int osc_checksum_type_seq_show(struct seq_file *m, void *v)
400 struct obd_device *obd = m->private;
406 for (i = 0; i < ARRAY_SIZE(cksum_name); i++) {
407 if ((BIT(i) & obd->u.cli.cl_supp_cksum_types) == 0)
409 if (obd->u.cli.cl_cksum_type == BIT(i))
410 seq_printf(m, "[%s] ", cksum_name[i]);
412 seq_printf(m, "%s ", cksum_name[i]);
419 static ssize_t osc_checksum_type_seq_write(struct file *file,
420 const char __user *buffer,
421 size_t count, loff_t *off)
423 struct seq_file *m = file->private_data;
424 struct obd_device *obd = m->private;
432 if (count > sizeof(kernbuf) - 1)
434 if (copy_from_user(kernbuf, buffer, count))
437 if (count > 0 && kernbuf[count - 1] == '\n')
438 kernbuf[count - 1] = '\0';
440 kernbuf[count] = '\0';
442 for (i = 0; i < ARRAY_SIZE(cksum_name); i++) {
443 if (strcasecmp(kernbuf, cksum_name[i]) == 0) {
444 obd->u.cli.cl_preferred_cksum_type = BIT(i);
445 if (obd->u.cli.cl_supp_cksum_types & BIT(i)) {
446 obd->u.cli.cl_cksum_type = BIT(i);
456 LPROC_SEQ_FOPS(osc_checksum_type);
458 static ssize_t resend_count_show(struct kobject *kobj,
459 struct attribute *attr,
462 struct obd_device *obd = container_of(kobj, struct obd_device,
465 return sprintf(buf, "%u\n", atomic_read(&obd->u.cli.cl_resends));
468 static ssize_t resend_count_store(struct kobject *kobj,
469 struct attribute *attr,
473 struct obd_device *obd = container_of(kobj, struct obd_device,
478 rc = kstrtouint(buffer, 10, &val);
482 atomic_set(&obd->u.cli.cl_resends, val);
486 LUSTRE_RW_ATTR(resend_count);
488 static ssize_t checksum_dump_show(struct kobject *kobj,
489 struct attribute *attr,
492 struct obd_device *obd = container_of(kobj, struct obd_device,
495 return scnprintf(buf, PAGE_SIZE, "%d\n", !!obd->u.cli.cl_checksum_dump);
498 static ssize_t checksum_dump_store(struct kobject *kobj,
499 struct attribute *attr,
503 struct obd_device *obd = container_of(kobj, struct obd_device,
508 rc = kstrtobool(buffer, &val);
512 obd->u.cli.cl_checksum_dump = val;
516 LUSTRE_RW_ATTR(checksum_dump);
518 static ssize_t destroys_in_flight_show(struct kobject *kobj,
519 struct attribute *attr,
522 struct obd_device *obd = container_of(kobj, struct obd_device,
525 return sprintf(buf, "%u\n",
526 atomic_read(&obd->u.cli.cl_destroy_in_flight));
528 LUSTRE_RO_ATTR(destroys_in_flight);
530 LPROC_SEQ_FOPS_RW_TYPE(osc, obd_max_pages_per_rpc);
532 LUSTRE_RW_ATTR(short_io_bytes);
534 #ifdef CONFIG_PROC_FS
535 static int osc_unstable_stats_seq_show(struct seq_file *m, void *v)
537 struct obd_device *obd = m->private;
538 struct client_obd *cli = &obd->u.cli;
542 pages = atomic_long_read(&cli->cl_unstable_count);
543 mb = (pages * PAGE_SIZE) >> 20;
545 seq_printf(m, "unstable_pages: %20ld\n"
546 "unstable_mb: %10d\n",
550 LPROC_SEQ_FOPS_RO(osc_unstable_stats);
552 static ssize_t idle_timeout_show(struct kobject *kobj, struct attribute *attr,
555 struct obd_device *obd = container_of(kobj, struct obd_device,
557 struct obd_import *imp;
560 with_imp_locked(obd, imp, ret)
561 ret = sprintf(buf, "%u\n", imp->imp_idle_timeout);
566 static ssize_t idle_timeout_store(struct kobject *kobj, struct attribute *attr,
567 const char *buffer, size_t count)
569 struct obd_device *obd = container_of(kobj, struct obd_device,
571 struct obd_import *imp;
572 struct ptlrpc_request *req;
573 unsigned int idle_debug = 0;
577 if (strncmp(buffer, "debug", 5) == 0) {
578 idle_debug = D_CONSOLE;
579 } else if (strncmp(buffer, "nodebug", 6) == 0) {
582 rc = kstrtouint(buffer, 10, &val);
586 if (val > CONNECTION_SWITCH_MAX)
590 with_imp_locked(obd, imp, rc) {
592 imp->imp_idle_debug = idle_debug;
595 /* initiate the connection if it's in IDLE state */
596 req = ptlrpc_request_alloc(imp,
599 ptlrpc_req_finished(req);
601 imp->imp_idle_timeout = val;
607 LUSTRE_RW_ATTR(idle_timeout);
609 static ssize_t idle_connect_store(struct kobject *kobj, struct attribute *attr,
610 const char *buffer, size_t count)
612 struct obd_device *obd = container_of(kobj, struct obd_device,
614 struct obd_import *imp;
615 struct ptlrpc_request *req;
618 with_imp_locked(obd, imp, rc) {
619 /* to initiate the connection if it's in IDLE state */
620 req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
622 ptlrpc_req_finished(req);
623 ptlrpc_pinger_force(imp);
628 LUSTRE_WO_ATTR(idle_connect);
630 static ssize_t grant_shrink_show(struct kobject *kobj, struct attribute *attr,
633 struct obd_device *obd = container_of(kobj, struct obd_device,
635 struct obd_import *imp;
638 with_imp_locked(obd, imp, len)
639 len = scnprintf(buf, PAGE_SIZE, "%d\n",
640 !imp->imp_grant_shrink_disabled &&
641 OCD_HAS_FLAG(&imp->imp_connect_data,
647 static ssize_t grant_shrink_store(struct kobject *kobj, struct attribute *attr,
648 const char *buffer, size_t count)
650 struct obd_device *obd = container_of(kobj, struct obd_device,
652 struct obd_import *imp;
659 rc = kstrtobool(buffer, &val);
663 with_imp_locked(obd, imp, rc) {
664 spin_lock(&imp->imp_lock);
665 imp->imp_grant_shrink_disabled = !val;
666 spin_unlock(&imp->imp_lock);
671 LUSTRE_RW_ATTR(grant_shrink);
673 LPROC_SEQ_FOPS_RO_TYPE(osc, connect_flags);
674 LPROC_SEQ_FOPS_RO_TYPE(osc, server_uuid);
675 LPROC_SEQ_FOPS_RO_TYPE(osc, timeouts);
676 LPROC_SEQ_FOPS_RO_TYPE(osc, state);
678 LPROC_SEQ_FOPS_RW_TYPE(osc, import);
679 LPROC_SEQ_FOPS_RW_TYPE(osc, pinger_recov);
681 struct lprocfs_vars lprocfs_osc_obd_vars[] = {
682 { .name = "connect_flags",
683 .fops = &osc_connect_flags_fops },
684 { .name = "ost_server_uuid",
685 .fops = &osc_server_uuid_fops },
686 { .name = "max_pages_per_rpc",
687 .fops = &osc_obd_max_pages_per_rpc_fops },
688 { .name = "osc_cached_mb",
689 .fops = &osc_cached_mb_fops },
690 { .name = "cur_grant_bytes",
691 .fops = &osc_cur_grant_bytes_fops },
692 { .name = "checksum_type",
693 .fops = &osc_checksum_type_fops },
694 { .name = "timeouts",
695 .fops = &osc_timeouts_fops },
697 .fops = &osc_import_fops },
699 .fops = &osc_state_fops },
700 { .name = "pinger_recov",
701 .fops = &osc_pinger_recov_fops },
702 { .name = "unstable_stats",
703 .fops = &osc_unstable_stats_fops },
707 static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
709 struct obd_device *obd = seq->private;
710 struct client_obd *cli = &obd->u.cli;
711 unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
714 spin_lock(&cli->cl_loi_list_lock);
716 lprocfs_stats_header(seq, ktime_get_real(), cli->cl_stats_init, 25,
718 seq_printf(seq, "read RPCs in flight: %d\n",
719 cli->cl_r_in_flight);
720 seq_printf(seq, "write RPCs in flight: %d\n",
721 cli->cl_w_in_flight);
722 seq_printf(seq, "pending write pages: %d\n",
723 atomic_read(&cli->cl_pending_w_pages));
724 seq_printf(seq, "pending read pages: %d\n",
725 atomic_read(&cli->cl_pending_r_pages));
727 seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
728 seq_printf(seq, "pages per rpc rpcs %% cum %% |");
729 seq_printf(seq, " rpcs %% cum %%\n");
731 read_tot = lprocfs_oh_sum(&cli->cl_read_page_hist);
732 write_tot = lprocfs_oh_sum(&cli->cl_write_page_hist);
736 for (i = 0; i < OBD_HIST_MAX; i++) {
737 unsigned long r = cli->cl_read_page_hist.oh_buckets[i];
738 unsigned long w = cli->cl_write_page_hist.oh_buckets[i];
742 seq_printf(seq, "%d:\t\t%10lu %3u %3u | %10lu %3u %3u\n",
743 1 << i, r, pct(r, read_tot),
744 pct(read_cum, read_tot), w,
746 pct(write_cum, write_tot));
747 if (read_cum == read_tot && write_cum == write_tot)
751 seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
752 seq_printf(seq, "rpcs in flight rpcs %% cum %% |");
753 seq_printf(seq, " rpcs %% cum %%\n");
755 read_tot = lprocfs_oh_sum(&cli->cl_read_rpc_hist);
756 write_tot = lprocfs_oh_sum(&cli->cl_write_rpc_hist);
760 for (i = 1; i < OBD_HIST_MAX; i++) {
761 unsigned long r = cli->cl_read_rpc_hist.oh_buckets[i];
762 unsigned long w = cli->cl_write_rpc_hist.oh_buckets[i];
765 seq_printf(seq, "%d:\t\t%10lu %3u %3u | %10lu %3u %3u\n",
766 i, r, pct(r, read_tot),
767 pct(read_cum, read_tot), w,
769 pct(write_cum, write_tot));
770 if (read_cum == read_tot && write_cum == write_tot)
774 seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
775 seq_printf(seq, "offset rpcs %% cum %% |");
776 seq_printf(seq, " rpcs %% cum %%\n");
778 read_tot = lprocfs_oh_sum(&cli->cl_read_offset_hist);
779 write_tot = lprocfs_oh_sum(&cli->cl_write_offset_hist);
783 for (i = 0; i < OBD_HIST_MAX; i++) {
784 unsigned long r = cli->cl_read_offset_hist.oh_buckets[i];
785 unsigned long w = cli->cl_write_offset_hist.oh_buckets[i];
788 seq_printf(seq, "%d:\t\t%10lu %3u %3u | %10lu %3u %3u\n",
789 (i == 0) ? 0 : 1 << (i - 1),
790 r, pct(r, read_tot), pct(read_cum, read_tot),
791 w, pct(w, write_tot), pct(write_cum, write_tot));
792 if (read_cum == read_tot && write_cum == write_tot)
796 spin_unlock(&cli->cl_loi_list_lock);
801 static ssize_t osc_rpc_stats_seq_write(struct file *file,
802 const char __user *buf,
803 size_t len, loff_t *off)
805 struct seq_file *seq = file->private_data;
806 struct obd_device *obd = seq->private;
807 struct client_obd *cli = &obd->u.cli;
809 lprocfs_oh_clear(&cli->cl_read_rpc_hist);
810 lprocfs_oh_clear(&cli->cl_write_rpc_hist);
811 lprocfs_oh_clear(&cli->cl_read_page_hist);
812 lprocfs_oh_clear(&cli->cl_write_page_hist);
813 lprocfs_oh_clear(&cli->cl_read_offset_hist);
814 lprocfs_oh_clear(&cli->cl_write_offset_hist);
815 cli->cl_stats_init = ktime_get_real();
819 LPROC_SEQ_FOPS(osc_rpc_stats);
821 static int osc_stats_seq_show(struct seq_file *seq, void *v)
823 struct obd_device *obd = seq->private;
824 struct osc_stats *stats = &obd2osc_dev(obd)->osc_stats;
826 lprocfs_stats_header(seq, ktime_get_real(), stats->os_init, 25, ":",
828 seq_printf(seq, "lockless_write_bytes\t\t%llu\n",
829 stats->os_lockless_writes);
830 seq_printf(seq, "lockless_read_bytes\t\t%llu\n",
831 stats->os_lockless_reads);
835 static ssize_t osc_stats_seq_write(struct file *file,
836 const char __user *buf,
837 size_t len, loff_t *off)
839 struct seq_file *seq = file->private_data;
840 struct obd_device *obd = seq->private;
841 struct osc_stats *stats = &obd2osc_dev(obd)->osc_stats;
843 memset(stats, 0, sizeof(*stats));
844 stats->os_init = ktime_get_real();
849 LPROC_SEQ_FOPS(osc_stats);
851 static int lprocfs_osc_attach_seqstat(struct obd_device *obd)
855 rc = lprocfs_seq_create(obd->obd_proc_entry, "osc_stats", 0644,
856 &osc_stats_fops, obd);
858 rc = lprocfs_obd_seq_create(obd, "rpc_stats", 0644,
859 &osc_rpc_stats_fops, obd);
863 #endif /* CONFIG_PROC_FS */
865 LUSTRE_OBD_UINT_PARAM_ATTR(at_min);
866 LUSTRE_OBD_UINT_PARAM_ATTR(at_max);
867 LUSTRE_OBD_UINT_PARAM_ATTR(at_history);
868 LUSTRE_OBD_UINT_PARAM_ATTR(ldlm_enqueue_min);
870 static struct attribute *osc_attrs[] = {
871 &lustre_attr_active.attr,
872 &lustre_attr_checksums.attr,
873 &lustre_attr_checksum_dump.attr,
874 &lustre_attr_cur_dirty_bytes.attr,
875 &lustre_attr_cur_lost_grant_bytes.attr,
876 &lustre_attr_cur_dirty_grant_bytes.attr,
877 &lustre_attr_destroys_in_flight.attr,
878 &lustre_attr_grant_shrink_interval.attr,
879 &lustre_attr_max_dirty_mb.attr,
880 &lustre_attr_max_rpcs_in_flight.attr,
881 &lustre_attr_short_io_bytes.attr,
882 &lustre_attr_resend_count.attr,
883 &lustre_attr_ost_conn_uuid.attr,
884 &lustre_attr_conn_uuid.attr,
885 &lustre_attr_ping.attr,
886 &lustre_attr_idle_timeout.attr,
887 &lustre_attr_idle_connect.attr,
888 &lustre_attr_grant_shrink.attr,
889 &lustre_attr_at_max.attr,
890 &lustre_attr_at_min.attr,
891 &lustre_attr_at_history.attr,
892 &lustre_attr_ldlm_enqueue_min.attr,
896 KOBJ_ATTRIBUTE_GROUPS(osc); /* creates osc_groups */
898 int osc_tunables_init(struct obd_device *obd)
902 obd->obd_vars = lprocfs_osc_obd_vars;
903 obd->obd_ktype.default_groups = KOBJ_ATTR_GROUPS(osc);
904 rc = lprocfs_obd_setup(obd, false);
907 #ifdef CONFIG_PROC_FS
908 /* If the basic OSC proc tree construction succeeded then
911 rc = lprocfs_osc_attach_seqstat(obd);
915 #endif /* CONFIG_PROC_FS */
916 rc = sptlrpc_lprocfs_cliobd_attach(obd);
920 ptlrpc_lprocfs_register_obd(obd);
923 lprocfs_obd_cleanup(obd);