4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 #define DEBUG_SUBSYSTEM S_CLASS
33 #include <linux/version.h>
34 #include <asm/statfs.h>
35 #include <obd_cksum.h>
36 #include <obd_class.h>
37 #include <lprocfs_status.h>
38 #include <linux/seq_file.h>
39 #include <lustre_osc.h>
41 #include "osc_internal.h"
43 static ssize_t active_show(struct kobject *kobj, struct attribute *attr,
46 struct obd_device *obd = container_of(kobj, struct obd_device,
48 struct obd_import *imp;
51 with_imp_locked(obd, imp, rc)
52 rc = sprintf(buf, "%d\n", !imp->imp_deactive);
57 static ssize_t active_store(struct kobject *kobj, struct attribute *attr,
58 const char *buffer, size_t count)
60 struct obd_device *obd = container_of(kobj, struct obd_device,
62 struct obd_import *imp, *imp0;
66 rc = kstrtobool(buffer, &val);
70 with_imp_locked(obd, imp0, rc)
71 imp = class_import_get(imp0);
75 if (imp->imp_deactive == val)
76 rc = ptlrpc_set_import_active(imp, val);
78 CDEBUG(D_CONFIG, "activate %u: ignoring repeat request\n",
80 class_import_put(imp);
84 LUSTRE_RW_ATTR(active);
86 static ssize_t max_rpcs_in_flight_show(struct kobject *kobj,
87 struct attribute *attr,
90 struct obd_device *obd = container_of(kobj, struct obd_device,
92 struct client_obd *cli = &obd->u.cli;
94 return scnprintf(buf, PAGE_SIZE, "%u\n", cli->cl_max_rpcs_in_flight);
97 static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
98 struct attribute *attr,
102 struct obd_device *obd = container_of(kobj, struct obd_device,
104 struct client_obd *cli = &obd->u.cli;
105 int adding, added, req_count;
109 rc = kstrtouint(buffer, 0, &val);
113 if (val == 0 || val > OSC_MAX_RIF_MAX)
116 adding = (int)val - cli->cl_max_rpcs_in_flight;
117 req_count = atomic_read(&osc_pool_req_count);
118 if (adding > 0 && req_count < osc_reqpool_maxreqcount) {
120 * There might be some race which will cause over-limit
121 * allocation, but it is fine.
123 if (req_count + adding > osc_reqpool_maxreqcount)
124 adding = osc_reqpool_maxreqcount - req_count;
126 added = osc_rq_pool->prp_populate(osc_rq_pool, adding);
127 atomic_add(added, &osc_pool_req_count);
130 spin_lock(&cli->cl_loi_list_lock);
131 cli->cl_max_rpcs_in_flight = val;
132 client_adjust_max_dirty(cli);
133 spin_unlock(&cli->cl_loi_list_lock);
137 LUSTRE_RW_ATTR(max_rpcs_in_flight);
139 static ssize_t max_dirty_mb_show(struct kobject *kobj,
140 struct attribute *attr,
143 struct obd_device *obd = container_of(kobj, struct obd_device,
145 struct client_obd *cli = &obd->u.cli;
147 return scnprintf(buf, PAGE_SIZE, "%lu\n",
148 PAGES_TO_MiB(cli->cl_dirty_max_pages));
151 static ssize_t max_dirty_mb_store(struct kobject *kobj,
152 struct attribute *attr,
156 struct obd_device *obd = container_of(kobj, struct obd_device,
158 struct client_obd *cli = &obd->u.cli;
159 unsigned long pages_number, max_dirty_mb;
162 rc = kstrtoul(buffer, 10, &max_dirty_mb);
166 pages_number = MiB_TO_PAGES(max_dirty_mb);
168 if (pages_number >= MiB_TO_PAGES(OSC_MAX_DIRTY_MB_MAX) ||
169 pages_number > cfs_totalram_pages() / 4) /* 1/4 of RAM */
172 spin_lock(&cli->cl_loi_list_lock);
173 cli->cl_dirty_max_pages = pages_number;
174 osc_wake_cache_waiters(cli);
175 spin_unlock(&cli->cl_loi_list_lock);
179 LUSTRE_RW_ATTR(max_dirty_mb);
181 LUSTRE_ATTR(ost_conn_uuid, 0444, conn_uuid_show, NULL);
182 LUSTRE_RO_ATTR(conn_uuid);
184 LUSTRE_RW_ATTR(ping);
186 static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
188 struct obd_device *obd = m->private;
189 struct client_obd *cli = &obd->u.cli;
190 int shift = 20 - PAGE_SHIFT;
192 seq_printf(m, "used_mb: %ld\n"
195 (atomic_long_read(&cli->cl_lru_in_list) +
196 atomic_long_read(&cli->cl_lru_busy)) >> shift,
197 atomic_long_read(&cli->cl_lru_busy),
198 cli->cl_lru_reclaim);
203 /* shrink the number of caching pages to a specific number */
204 static ssize_t osc_cached_mb_seq_write(struct file *file,
205 const char __user *buffer,
206 size_t count, loff_t *off)
208 struct seq_file *m = file->private_data;
209 struct obd_device *obd = m->private;
210 struct client_obd *cli = &obd->u.cli;
216 if (count >= sizeof(kernbuf))
219 if (copy_from_user(kernbuf, buffer, count))
223 tmp = lprocfs_find_named_value(kernbuf, "used_mb:", &count);
224 rc = sysfs_memparse(tmp, count, &pages_number, "MiB");
228 pages_number >>= PAGE_SHIFT;
230 rc = atomic_long_read(&cli->cl_lru_in_list) - pages_number;
235 env = cl_env_get(&refcheck);
237 (void)osc_lru_shrink(env, cli, rc, true);
238 cl_env_put(env, &refcheck);
245 LPROC_SEQ_FOPS(osc_cached_mb);
247 static ssize_t cur_dirty_bytes_show(struct kobject *kobj,
248 struct attribute *attr,
251 struct obd_device *obd = container_of(kobj, struct obd_device,
253 struct client_obd *cli = &obd->u.cli;
255 return scnprintf(buf, PAGE_SIZE, "%lu\n",
256 cli->cl_dirty_pages << PAGE_SHIFT);
258 LUSTRE_RO_ATTR(cur_dirty_bytes);
260 static int osc_cur_grant_bytes_seq_show(struct seq_file *m, void *v)
262 struct obd_device *obd = m->private;
263 struct client_obd *cli = &obd->u.cli;
265 seq_printf(m, "%lu\n", cli->cl_avail_grant);
269 static ssize_t osc_cur_grant_bytes_seq_write(struct file *file,
270 const char __user *buffer,
271 size_t count, loff_t *off)
273 struct seq_file *m = file->private_data;
274 struct obd_device *obd = m->private;
275 struct client_obd *cli = &obd->u.cli;
276 struct obd_import *imp;
277 char kernbuf[22] = "";
284 if (count >= sizeof(kernbuf))
287 if (copy_from_user(kernbuf, buffer, count))
291 rc = sysfs_memparse(kernbuf, count, &val, "MiB");
295 /* this is only for shrinking grant */
296 if (val >= cli->cl_avail_grant)
299 with_imp_locked(obd, imp, rc)
300 if (imp->imp_state == LUSTRE_IMP_FULL)
301 rc = osc_shrink_grant_to_target(cli, val);
303 return rc ? rc : count;
305 LPROC_SEQ_FOPS(osc_cur_grant_bytes);
307 static ssize_t cur_lost_grant_bytes_show(struct kobject *kobj,
308 struct attribute *attr,
311 struct obd_device *obd = container_of(kobj, struct obd_device,
313 struct client_obd *cli = &obd->u.cli;
315 return scnprintf(buf, PAGE_SIZE, "%lu\n", cli->cl_lost_grant);
317 LUSTRE_RO_ATTR(cur_lost_grant_bytes);
319 static ssize_t cur_dirty_grant_bytes_show(struct kobject *kobj,
320 struct attribute *attr,
323 struct obd_device *obd = container_of(kobj, struct obd_device,
325 struct client_obd *cli = &obd->u.cli;
327 return scnprintf(buf, PAGE_SIZE, "%lu\n", cli->cl_dirty_grant);
329 LUSTRE_RO_ATTR(cur_dirty_grant_bytes);
331 static ssize_t grant_shrink_interval_show(struct kobject *kobj,
332 struct attribute *attr,
335 struct obd_device *obd = container_of(kobj, struct obd_device,
338 return sprintf(buf, "%lld\n", obd->u.cli.cl_grant_shrink_interval);
341 static ssize_t grant_shrink_interval_store(struct kobject *kobj,
342 struct attribute *attr,
346 struct obd_device *obd = container_of(kobj, struct obd_device,
351 rc = kstrtouint(buffer, 0, &val);
358 obd->u.cli.cl_grant_shrink_interval = val;
359 osc_update_next_shrink(&obd->u.cli);
360 osc_schedule_grant_work();
364 LUSTRE_RW_ATTR(grant_shrink_interval);
366 static ssize_t enable_page_cache_shrink_show(struct kobject *kobj,
367 struct attribute *attr,
370 return scnprintf(buf, PAGE_SIZE, "%d\n", osc_page_cache_shrink_enabled);
373 static ssize_t enable_page_cache_shrink_store(struct kobject *kobj,
374 struct attribute *attr,
381 rc = kstrtobool(buffer, &val);
385 osc_page_cache_shrink_enabled = val;
388 LUSTRE_RW_ATTR(enable_page_cache_shrink);
390 static ssize_t checksums_show(struct kobject *kobj,
391 struct attribute *attr,
394 struct obd_device *obd = container_of(kobj, struct obd_device,
397 return scnprintf(buf, PAGE_SIZE, "%d\n", !!obd->u.cli.cl_checksum);
400 static ssize_t checksums_store(struct kobject *kobj,
401 struct attribute *attr,
405 struct obd_device *obd = container_of(kobj, struct obd_device,
410 rc = kstrtobool(buffer, &val);
414 obd->u.cli.cl_checksum = val;
418 LUSTRE_RW_ATTR(checksums);
422 static int osc_checksum_type_seq_show(struct seq_file *m, void *v)
424 struct obd_device *obd = m->private;
430 for (i = 0; i < ARRAY_SIZE(cksum_name); i++) {
431 if ((BIT(i) & obd->u.cli.cl_supp_cksum_types) == 0)
433 if (obd->u.cli.cl_cksum_type == BIT(i))
434 seq_printf(m, "[%s] ", cksum_name[i]);
436 seq_printf(m, "%s ", cksum_name[i]);
443 static ssize_t osc_checksum_type_seq_write(struct file *file,
444 const char __user *buffer,
445 size_t count, loff_t *off)
447 struct seq_file *m = file->private_data;
448 struct obd_device *obd = m->private;
456 if (count > sizeof(kernbuf) - 1)
458 if (copy_from_user(kernbuf, buffer, count))
461 if (count > 0 && kernbuf[count - 1] == '\n')
462 kernbuf[count - 1] = '\0';
464 kernbuf[count] = '\0';
466 for (i = 0; i < ARRAY_SIZE(cksum_name); i++) {
467 if (strcasecmp(kernbuf, cksum_name[i]) == 0) {
468 obd->u.cli.cl_preferred_cksum_type = BIT(i);
469 if (obd->u.cli.cl_supp_cksum_types & BIT(i)) {
470 obd->u.cli.cl_cksum_type = BIT(i);
480 LPROC_SEQ_FOPS(osc_checksum_type);
482 static ssize_t resend_count_show(struct kobject *kobj,
483 struct attribute *attr,
486 struct obd_device *obd = container_of(kobj, struct obd_device,
489 return sprintf(buf, "%u\n", atomic_read(&obd->u.cli.cl_resends));
492 static ssize_t resend_count_store(struct kobject *kobj,
493 struct attribute *attr,
497 struct obd_device *obd = container_of(kobj, struct obd_device,
502 rc = kstrtouint(buffer, 10, &val);
506 atomic_set(&obd->u.cli.cl_resends, val);
510 LUSTRE_RW_ATTR(resend_count);
512 static ssize_t checksum_dump_show(struct kobject *kobj,
513 struct attribute *attr,
516 struct obd_device *obd = container_of(kobj, struct obd_device,
519 return scnprintf(buf, PAGE_SIZE, "%d\n", !!obd->u.cli.cl_checksum_dump);
522 static ssize_t checksum_dump_store(struct kobject *kobj,
523 struct attribute *attr,
527 struct obd_device *obd = container_of(kobj, struct obd_device,
532 rc = kstrtobool(buffer, &val);
536 obd->u.cli.cl_checksum_dump = val;
540 LUSTRE_RW_ATTR(checksum_dump);
542 static ssize_t destroys_in_flight_show(struct kobject *kobj,
543 struct attribute *attr,
546 struct obd_device *obd = container_of(kobj, struct obd_device,
549 return sprintf(buf, "%u\n",
550 atomic_read(&obd->u.cli.cl_destroy_in_flight));
552 LUSTRE_RO_ATTR(destroys_in_flight);
554 LPROC_SEQ_FOPS_RW_TYPE(osc, obd_max_pages_per_rpc);
556 LUSTRE_RW_ATTR(short_io_bytes);
558 #ifdef CONFIG_PROC_FS
559 static int osc_unstable_stats_seq_show(struct seq_file *m, void *v)
561 struct obd_device *obd = m->private;
562 struct client_obd *cli = &obd->u.cli;
566 pages = atomic_long_read(&cli->cl_unstable_count);
567 mb = (pages * PAGE_SIZE) >> 20;
569 seq_printf(m, "unstable_pages: %20ld\n"
570 "unstable_mb: %10d\n",
574 LPROC_SEQ_FOPS_RO(osc_unstable_stats);
576 static ssize_t idle_timeout_show(struct kobject *kobj, struct attribute *attr,
579 struct obd_device *obd = container_of(kobj, struct obd_device,
581 struct obd_import *imp;
584 with_imp_locked(obd, imp, ret)
585 ret = sprintf(buf, "%u\n", imp->imp_idle_timeout);
590 static ssize_t idle_timeout_store(struct kobject *kobj, struct attribute *attr,
591 const char *buffer, size_t count)
593 struct obd_device *obd = container_of(kobj, struct obd_device,
595 struct obd_import *imp;
596 struct ptlrpc_request *req;
597 unsigned int idle_debug = 0;
601 if (strncmp(buffer, "debug", 5) == 0) {
602 idle_debug = D_CONSOLE;
603 } else if (strncmp(buffer, "nodebug", 6) == 0) {
606 rc = kstrtouint(buffer, 10, &val);
610 if (val > CONNECTION_SWITCH_MAX)
614 with_imp_locked(obd, imp, rc) {
616 imp->imp_idle_debug = idle_debug;
619 /* initiate the connection if it's in IDLE state */
620 req = ptlrpc_request_alloc(imp,
623 ptlrpc_req_finished(req);
625 imp->imp_idle_timeout = val;
631 LUSTRE_RW_ATTR(idle_timeout);
633 static ssize_t idle_connect_store(struct kobject *kobj, struct attribute *attr,
634 const char *buffer, size_t count)
636 struct obd_device *obd = container_of(kobj, struct obd_device,
638 struct obd_import *imp;
639 struct ptlrpc_request *req;
642 with_imp_locked(obd, imp, rc) {
643 /* to initiate the connection if it's in IDLE state */
644 req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
646 ptlrpc_req_finished(req);
647 ptlrpc_pinger_force(imp);
652 LUSTRE_WO_ATTR(idle_connect);
654 static ssize_t grant_shrink_show(struct kobject *kobj, struct attribute *attr,
657 struct obd_device *obd = container_of(kobj, struct obd_device,
659 struct obd_import *imp;
662 with_imp_locked(obd, imp, len)
663 len = scnprintf(buf, PAGE_SIZE, "%d\n",
664 !imp->imp_grant_shrink_disabled &&
665 OCD_HAS_FLAG(&imp->imp_connect_data,
671 static ssize_t grant_shrink_store(struct kobject *kobj, struct attribute *attr,
672 const char *buffer, size_t count)
674 struct obd_device *obd = container_of(kobj, struct obd_device,
676 struct obd_import *imp;
683 rc = kstrtobool(buffer, &val);
687 with_imp_locked(obd, imp, rc) {
688 spin_lock(&imp->imp_lock);
689 imp->imp_grant_shrink_disabled = !val;
690 spin_unlock(&imp->imp_lock);
695 LUSTRE_RW_ATTR(grant_shrink);
697 LPROC_SEQ_FOPS_RO_TYPE(osc, connect_flags);
698 LPROC_SEQ_FOPS_RO_TYPE(osc, server_uuid);
699 LPROC_SEQ_FOPS_RO_TYPE(osc, timeouts);
700 LPROC_SEQ_FOPS_RO_TYPE(osc, state);
702 LPROC_SEQ_FOPS_RW_TYPE(osc, import);
703 LPROC_SEQ_FOPS_RW_TYPE(osc, pinger_recov);
705 struct lprocfs_vars lprocfs_osc_obd_vars[] = {
706 { .name = "connect_flags",
707 .fops = &osc_connect_flags_fops },
708 { .name = "ost_server_uuid",
709 .fops = &osc_server_uuid_fops },
710 { .name = "max_pages_per_rpc",
711 .fops = &osc_obd_max_pages_per_rpc_fops },
712 { .name = "osc_cached_mb",
713 .fops = &osc_cached_mb_fops },
714 { .name = "cur_grant_bytes",
715 .fops = &osc_cur_grant_bytes_fops },
716 { .name = "checksum_type",
717 .fops = &osc_checksum_type_fops },
718 { .name = "timeouts",
719 .fops = &osc_timeouts_fops },
721 .fops = &osc_import_fops },
723 .fops = &osc_state_fops },
724 { .name = "pinger_recov",
725 .fops = &osc_pinger_recov_fops },
726 { .name = "unstable_stats",
727 .fops = &osc_unstable_stats_fops },
731 static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
733 struct obd_device *obd = seq->private;
734 struct client_obd *cli = &obd->u.cli;
735 unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
738 spin_lock(&cli->cl_loi_list_lock);
740 lprocfs_stats_header(seq, ktime_get_real(), cli->cl_stats_init, 25,
742 seq_printf(seq, "read RPCs in flight: %d\n",
743 cli->cl_r_in_flight);
744 seq_printf(seq, "write RPCs in flight: %d\n",
745 cli->cl_w_in_flight);
746 seq_printf(seq, "pending write pages: %d\n",
747 atomic_read(&cli->cl_pending_w_pages));
748 seq_printf(seq, "pending read pages: %d\n",
749 atomic_read(&cli->cl_pending_r_pages));
751 seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
752 seq_printf(seq, "pages per rpc rpcs %% cum %% |");
753 seq_printf(seq, " rpcs %% cum %%\n");
755 read_tot = lprocfs_oh_sum(&cli->cl_read_page_hist);
756 write_tot = lprocfs_oh_sum(&cli->cl_write_page_hist);
760 for (i = 0; i < OBD_HIST_MAX; i++) {
761 unsigned long r = cli->cl_read_page_hist.oh_buckets[i];
762 unsigned long w = cli->cl_write_page_hist.oh_buckets[i];
766 seq_printf(seq, "%d:\t\t%10lu %3u %3u | %10lu %3u %3u\n",
767 1 << i, r, pct(r, read_tot),
768 pct(read_cum, read_tot), w,
770 pct(write_cum, write_tot));
771 if (read_cum == read_tot && write_cum == write_tot)
775 seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
776 seq_printf(seq, "rpcs in flight rpcs %% cum %% |");
777 seq_printf(seq, " rpcs %% cum %%\n");
779 read_tot = lprocfs_oh_sum(&cli->cl_read_rpc_hist);
780 write_tot = lprocfs_oh_sum(&cli->cl_write_rpc_hist);
784 for (i = 1; i < OBD_HIST_MAX; i++) {
785 unsigned long r = cli->cl_read_rpc_hist.oh_buckets[i];
786 unsigned long w = cli->cl_write_rpc_hist.oh_buckets[i];
789 seq_printf(seq, "%d:\t\t%10lu %3u %3u | %10lu %3u %3u\n",
790 i, r, pct(r, read_tot),
791 pct(read_cum, read_tot), w,
793 pct(write_cum, write_tot));
794 if (read_cum == read_tot && write_cum == write_tot)
798 seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
799 seq_printf(seq, "offset rpcs %% cum %% |");
800 seq_printf(seq, " rpcs %% cum %%\n");
802 read_tot = lprocfs_oh_sum(&cli->cl_read_offset_hist);
803 write_tot = lprocfs_oh_sum(&cli->cl_write_offset_hist);
807 for (i = 0; i < OBD_HIST_MAX; i++) {
808 unsigned long r = cli->cl_read_offset_hist.oh_buckets[i];
809 unsigned long w = cli->cl_write_offset_hist.oh_buckets[i];
812 seq_printf(seq, "%d:\t\t%10lu %3u %3u | %10lu %3u %3u\n",
813 (i == 0) ? 0 : 1 << (i - 1),
814 r, pct(r, read_tot), pct(read_cum, read_tot),
815 w, pct(w, write_tot), pct(write_cum, write_tot));
816 if (read_cum == read_tot && write_cum == write_tot)
820 spin_unlock(&cli->cl_loi_list_lock);
825 static ssize_t osc_rpc_stats_seq_write(struct file *file,
826 const char __user *buf,
827 size_t len, loff_t *off)
829 struct seq_file *seq = file->private_data;
830 struct obd_device *obd = seq->private;
831 struct client_obd *cli = &obd->u.cli;
833 lprocfs_oh_clear(&cli->cl_read_rpc_hist);
834 lprocfs_oh_clear(&cli->cl_write_rpc_hist);
835 lprocfs_oh_clear(&cli->cl_read_page_hist);
836 lprocfs_oh_clear(&cli->cl_write_page_hist);
837 lprocfs_oh_clear(&cli->cl_read_offset_hist);
838 lprocfs_oh_clear(&cli->cl_write_offset_hist);
839 cli->cl_stats_init = ktime_get_real();
843 LPROC_SEQ_FOPS(osc_rpc_stats);
845 static int osc_stats_seq_show(struct seq_file *seq, void *v)
847 struct obd_device *obd = seq->private;
848 struct osc_stats *stats = &obd2osc_dev(obd)->osc_stats;
850 lprocfs_stats_header(seq, ktime_get_real(), stats->os_init, 25, ":",
852 seq_printf(seq, "lockless_write_bytes\t\t%llu\n",
853 stats->os_lockless_writes);
854 seq_printf(seq, "lockless_read_bytes\t\t%llu\n",
855 stats->os_lockless_reads);
859 static ssize_t osc_stats_seq_write(struct file *file,
860 const char __user *buf,
861 size_t len, loff_t *off)
863 struct seq_file *seq = file->private_data;
864 struct obd_device *obd = seq->private;
865 struct osc_stats *stats = &obd2osc_dev(obd)->osc_stats;
867 memset(stats, 0, sizeof(*stats));
868 stats->os_init = ktime_get_real();
873 LPROC_SEQ_FOPS(osc_stats);
875 static int lprocfs_osc_attach_seqstat(struct obd_device *obd)
879 rc = lprocfs_seq_create(obd->obd_proc_entry, "osc_stats", 0644,
880 &osc_stats_fops, obd);
882 rc = lprocfs_obd_seq_create(obd, "rpc_stats", 0644,
883 &osc_rpc_stats_fops, obd);
887 #endif /* CONFIG_PROC_FS */
889 LUSTRE_OBD_UINT_PARAM_ATTR(at_min);
890 LUSTRE_OBD_UINT_PARAM_ATTR(at_max);
891 LUSTRE_OBD_UINT_PARAM_ATTR(at_history);
892 LUSTRE_OBD_UINT_PARAM_ATTR(ldlm_enqueue_min);
894 static struct attribute *osc_attrs[] = {
895 &lustre_attr_active.attr,
896 &lustre_attr_enable_page_cache_shrink.attr,
897 &lustre_attr_checksums.attr,
898 &lustre_attr_checksum_dump.attr,
899 &lustre_attr_cur_dirty_bytes.attr,
900 &lustre_attr_cur_lost_grant_bytes.attr,
901 &lustre_attr_cur_dirty_grant_bytes.attr,
902 &lustre_attr_destroys_in_flight.attr,
903 &lustre_attr_grant_shrink_interval.attr,
904 &lustre_attr_max_dirty_mb.attr,
905 &lustre_attr_max_rpcs_in_flight.attr,
906 &lustre_attr_short_io_bytes.attr,
907 &lustre_attr_resend_count.attr,
908 &lustre_attr_ost_conn_uuid.attr,
909 &lustre_attr_conn_uuid.attr,
910 &lustre_attr_ping.attr,
911 &lustre_attr_idle_timeout.attr,
912 &lustre_attr_idle_connect.attr,
913 &lustre_attr_grant_shrink.attr,
914 &lustre_attr_at_max.attr,
915 &lustre_attr_at_min.attr,
916 &lustre_attr_at_history.attr,
917 &lustre_attr_ldlm_enqueue_min.attr,
921 KOBJ_ATTRIBUTE_GROUPS(osc); /* creates osc_groups */
923 int osc_tunables_init(struct obd_device *obd)
927 obd->obd_vars = lprocfs_osc_obd_vars;
928 obd->obd_ktype.default_groups = KOBJ_ATTR_GROUPS(osc);
929 rc = lprocfs_obd_setup(obd, false);
932 #ifdef CONFIG_PROC_FS
933 /* If the basic OSC proc tree construction succeeded then
936 rc = lprocfs_osc_attach_seqstat(obd);
940 #endif /* CONFIG_PROC_FS */
941 rc = sptlrpc_lprocfs_cliobd_attach(obd);
945 ptlrpc_lprocfs_register_obd(obd);
948 lprocfs_obd_cleanup(obd);