4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/osd/osd_lproc.c
33 * Author: Mikhail Pershin <tappro@sun.com>
36 #define DEBUG_SUBSYSTEM S_OSD
38 #include <lprocfs_status.h>
40 #include "osd_internal.h"
44 void osd_brw_stats_update(struct osd_device *osd, struct osd_iobuf *iobuf)
46 struct brw_stats *bs = &osd->od_brw_stats;
47 sector_t *last_block = NULL;
48 struct page **pages = iobuf->dr_pages;
49 struct page *last_page = NULL;
50 unsigned long discont_pages = 0;
51 unsigned long discont_blocks = 0;
52 sector_t *blocks = iobuf->dr_blocks;
53 int i, nr_pages = iobuf->dr_npages;
55 int rw = iobuf->dr_rw;
57 if (unlikely(nr_pages == 0))
60 blocks_per_page = PAGE_SIZE >> osd_sb(osd)->s_blocksize_bits;
62 lprocfs_oh_tally_log2(&bs->bs_hist[BRW_R_PAGES + rw], nr_pages);
64 while (nr_pages-- > 0) {
65 if (last_page && (*pages)->index != (last_page->index + 1))
69 for (i = 0; i < blocks_per_page; i++) {
70 if (last_block && *blocks != (*last_block + 1))
72 last_block = blocks++;
76 lprocfs_oh_tally(&bs->bs_hist[BRW_R_DISCONT_PAGES+rw], discont_pages);
77 lprocfs_oh_tally(&bs->bs_hist[BRW_R_DISCONT_BLOCKS+rw], discont_blocks);
80 static void display_brw_stats(struct seq_file *seq, char *name, char *units,
81 struct obd_histogram *read, struct obd_histogram *write, int scale)
83 unsigned long read_tot, write_tot, r, w, read_cum = 0, write_cum = 0;
86 seq_printf(seq, "\n%26s read | write\n", " ");
87 seq_printf(seq, "%-22s %-5s %% cum %% | %-11s %% cum %%\n",
90 read_tot = lprocfs_oh_sum(read);
91 write_tot = lprocfs_oh_sum(write);
92 for (i = 0; i < OBD_HIST_MAX; i++) {
93 r = read->oh_buckets[i];
94 w = write->oh_buckets[i];
97 if (read_cum == 0 && write_cum == 0)
101 seq_printf(seq, "%u", i);
103 seq_printf(seq, "%u", scale << i);
105 seq_printf(seq, "%uK", scale << (i-10));
107 seq_printf(seq, "%uM", scale << (i-20));
109 seq_printf(seq, ":\t\t%10lu %3u %3u | %4lu %3u %3u\n",
110 r, pct(r, read_tot), pct(read_cum, read_tot),
111 w, pct(w, write_tot), pct(write_cum, write_tot));
113 if (read_cum == read_tot && write_cum == write_tot)
118 static void brw_stats_show(struct seq_file *seq, struct brw_stats *brw_stats)
120 /* this sampling races with updates */
121 lprocfs_stats_header(seq, ktime_get(), brw_stats->bs_init, 25, ":", 1);
123 display_brw_stats(seq, "pages per bulk r/w", "rpcs",
124 &brw_stats->bs_hist[BRW_R_PAGES],
125 &brw_stats->bs_hist[BRW_W_PAGES], 1);
127 display_brw_stats(seq, "discontiguous pages", "rpcs",
128 &brw_stats->bs_hist[BRW_R_DISCONT_PAGES],
129 &brw_stats->bs_hist[BRW_W_DISCONT_PAGES], 0);
131 display_brw_stats(seq, "discontiguous blocks", "rpcs",
132 &brw_stats->bs_hist[BRW_R_DISCONT_BLOCKS],
133 &brw_stats->bs_hist[BRW_W_DISCONT_BLOCKS], 0);
135 display_brw_stats(seq, "disk fragmented I/Os", "ios",
136 &brw_stats->bs_hist[BRW_R_DIO_FRAGS],
137 &brw_stats->bs_hist[BRW_W_DIO_FRAGS], 0);
139 display_brw_stats(seq, "disk I/Os in flight", "ios",
140 &brw_stats->bs_hist[BRW_R_RPC_HIST],
141 &brw_stats->bs_hist[BRW_W_RPC_HIST], 0);
143 display_brw_stats(seq, "I/O time (1/1000s)", "ios",
144 &brw_stats->bs_hist[BRW_R_IO_TIME],
145 &brw_stats->bs_hist[BRW_W_IO_TIME], 1);
147 display_brw_stats(seq, "disk I/O size", "ios",
148 &brw_stats->bs_hist[BRW_R_DISK_IOSIZE],
149 &brw_stats->bs_hist[BRW_W_DISK_IOSIZE], 1);
152 static int osd_brw_stats_seq_show(struct seq_file *seq, void *v)
154 struct osd_device *osd = seq->private;
156 brw_stats_show(seq, &osd->od_brw_stats);
161 static ssize_t osd_brw_stats_seq_write(struct file *file,
162 const char __user *buf,
163 size_t len, loff_t *off)
165 struct seq_file *seq = file->private_data;
166 struct osd_device *osd = seq->private;
169 for (i = 0; i < BRW_LAST; i++)
170 lprocfs_oh_clear(&osd->od_brw_stats.bs_hist[i]);
171 osd->od_brw_stats.bs_init = ktime_get();
176 LPROC_SEQ_FOPS(osd_brw_stats);
178 static int osd_stats_init(struct osd_device *osd)
183 osd->od_brw_stats.bs_init = ktime_get();
185 for (i = 0; i < BRW_LAST; i++)
186 spin_lock_init(&osd->od_brw_stats.bs_hist[i].oh_lock);
188 osd->od_stats = lprocfs_alloc_stats(LPROC_OSD_LAST, 0);
189 if (osd->od_stats != NULL) {
190 result = lprocfs_register_stats(osd->od_proc_entry, "stats",
195 lprocfs_counter_init(osd->od_stats, LPROC_OSD_GET_PAGE,
196 LPROCFS_CNTR_AVGMINMAX|LPROCFS_CNTR_STDDEV,
198 lprocfs_counter_init(osd->od_stats, LPROC_OSD_NO_PAGE,
199 LPROCFS_CNTR_AVGMINMAX,
200 "get_page_failures", "num");
201 lprocfs_counter_init(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
202 LPROCFS_CNTR_AVGMINMAX,
203 "cache_access", "pages");
204 lprocfs_counter_init(osd->od_stats, LPROC_OSD_CACHE_HIT,
205 LPROCFS_CNTR_AVGMINMAX,
206 "cache_hit", "pages");
207 lprocfs_counter_init(osd->od_stats, LPROC_OSD_CACHE_MISS,
208 LPROCFS_CNTR_AVGMINMAX,
209 "cache_miss", "pages");
210 #if OSD_THANDLE_STATS
211 lprocfs_counter_init(osd->od_stats, LPROC_OSD_THANDLE_STARTING,
212 LPROCFS_CNTR_AVGMINMAX,
213 "thandle starting", "usec");
214 lprocfs_counter_init(osd->od_stats, LPROC_OSD_THANDLE_OPEN,
215 LPROCFS_CNTR_AVGMINMAX,
216 "thandle open", "usec");
217 lprocfs_counter_init(osd->od_stats, LPROC_OSD_THANDLE_CLOSING,
218 LPROCFS_CNTR_AVGMINMAX,
219 "thandle closing", "usec");
221 result = lprocfs_seq_create(osd->od_proc_entry, "brw_stats",
222 0644, &osd_brw_stats_fops, osd);
230 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
233 return sprintf(buf, "ldiskfs\n");
235 LUSTRE_RO_ATTR(fstype);
237 static ssize_t mntdev_show(struct kobject *kobj, struct attribute *attr,
240 struct dt_device *dt = container_of(kobj, struct dt_device,
242 struct osd_device *osd = osd_dt_dev(dt);
245 if (unlikely(!osd->od_mnt))
248 return sprintf(buf, "%s\n", osd->od_mntdev);
250 LUSTRE_RO_ATTR(mntdev);
252 static ssize_t read_cache_enable_show(struct kobject *kobj,
253 struct attribute *attr,
256 struct dt_device *dt = container_of(kobj, struct dt_device,
258 struct osd_device *osd = osd_dt_dev(dt);
261 if (unlikely(!osd->od_mnt))
264 return sprintf(buf, "%u\n", osd->od_read_cache);
267 static ssize_t read_cache_enable_store(struct kobject *kobj,
268 struct attribute *attr,
269 const char *buffer, size_t count)
271 struct dt_device *dt = container_of(kobj, struct dt_device,
273 struct osd_device *osd = osd_dt_dev(dt);
278 if (unlikely(!osd->od_mnt))
281 rc = kstrtobool(buffer, &val);
285 osd->od_read_cache = !!val;
288 LUSTRE_RW_ATTR(read_cache_enable);
290 static ssize_t writethrough_cache_enable_show(struct kobject *kobj,
291 struct attribute *attr,
294 struct dt_device *dt = container_of(kobj, struct dt_device,
296 struct osd_device *osd = osd_dt_dev(dt);
299 if (unlikely(!osd->od_mnt))
302 return sprintf(buf, "%u\n", osd->od_writethrough_cache);
305 static ssize_t writethrough_cache_enable_store(struct kobject *kobj,
306 struct attribute *attr,
310 struct dt_device *dt = container_of(kobj, struct dt_device,
312 struct osd_device *osd = osd_dt_dev(dt);
317 if (unlikely(!osd->od_mnt))
320 rc = kstrtobool(buffer, &val);
324 osd->od_writethrough_cache = !!val;
327 LUSTRE_RW_ATTR(writethrough_cache_enable);
329 static ssize_t fallocate_zero_blocks_show(struct kobject *kobj,
330 struct attribute *attr,
333 struct dt_device *dt = container_of(kobj, struct dt_device,
335 struct osd_device *osd = osd_dt_dev(dt);
338 if (unlikely(!osd->od_mnt))
341 return scnprintf(buf, PAGE_SIZE, "%d\n", osd->od_fallocate_zero_blocks);
345 * Set how fallocate() interacts with the backing filesystem:
346 * -1: fallocate is disabled and returns -EOPNOTSUPP
347 * 0: fallocate allocates unwritten extents (like ext4)
348 * 1: fallocate zeroes allocated extents on disk
350 static ssize_t fallocate_zero_blocks_store(struct kobject *kobj,
351 struct attribute *attr,
352 const char *buffer, size_t count)
354 struct dt_device *dt = container_of(kobj, struct dt_device,
356 struct osd_device *osd = osd_dt_dev(dt);
361 if (unlikely(!osd->od_mnt))
364 rc = kstrtol(buffer, 0, &val);
368 if (val < -1 || val > 1)
371 osd->od_fallocate_zero_blocks = val;
374 LUSTRE_RW_ATTR(fallocate_zero_blocks);
376 ssize_t force_sync_store(struct kobject *kobj, struct attribute *attr,
377 const char *buffer, size_t count)
379 struct dt_device *dt = container_of(kobj, struct dt_device,
381 struct osd_device *osd = osd_dt_dev(dt);
386 if (unlikely(!osd->od_mnt))
389 rc = lu_env_init(&env, LCT_LOCAL);
393 rc = dt_sync(&env, dt);
396 return rc == 0 ? count : rc;
398 LUSTRE_WO_ATTR(force_sync);
400 static ssize_t nonrotational_show(struct kobject *kobj, struct attribute *attr,
403 struct dt_device *dt = container_of(kobj, struct dt_device,
405 struct osd_device *osd = osd_dt_dev(dt);
408 if (unlikely(!osd->od_mnt))
411 return sprintf(buf, "%u\n", osd->od_nonrotational);
414 static ssize_t nonrotational_store(struct kobject *kobj,
415 struct attribute *attr, const char *buffer,
418 struct dt_device *dt = container_of(kobj, struct dt_device,
420 struct osd_device *osd = osd_dt_dev(dt);
425 if (unlikely(!osd->od_mnt))
428 rc = kstrtobool(buffer, &val);
432 osd->od_nonrotational = val;
435 LUSTRE_RW_ATTR(nonrotational);
437 static ssize_t pdo_show(struct kobject *kobj, struct attribute *attr,
440 return sprintf(buf, "%s\n", ldiskfs_pdo ? "ON" : "OFF");
443 static ssize_t pdo_store(struct kobject *kobj, struct attribute *attr,
444 const char *buffer, size_t count)
449 rc = kstrtobool(buffer, &pdo);
459 static ssize_t auto_scrub_show(struct kobject *kobj, struct attribute *attr,
462 struct dt_device *dt = container_of(kobj, struct dt_device,
464 struct osd_device *dev = osd_dt_dev(dt);
467 if (unlikely(!dev->od_mnt))
470 return scnprintf(buf, PAGE_SIZE, "%lld\n",
471 dev->od_scrub.os_scrub.os_auto_scrub_interval);
474 static ssize_t auto_scrub_store(struct kobject *kobj, struct attribute *attr,
475 const char *buffer, size_t count)
477 struct dt_device *dt = container_of(kobj, struct dt_device,
479 struct osd_device *dev = osd_dt_dev(dt);
484 if (unlikely(!dev->od_mnt))
487 rc = kstrtoll(buffer, 0, &val);
491 dev->od_scrub.os_scrub.os_auto_scrub_interval = val;
494 LUSTRE_RW_ATTR(auto_scrub);
496 static ssize_t full_scrub_ratio_show(struct kobject *kobj,
497 struct attribute *attr,
500 struct dt_device *dt = container_of(kobj, struct dt_device,
502 struct osd_device *dev = osd_dt_dev(dt);
505 if (unlikely(!dev->od_mnt))
508 return sprintf(buf, "%llu\n", dev->od_full_scrub_ratio);
511 static ssize_t full_scrub_ratio_store(struct kobject *kobj,
512 struct attribute *attr,
513 const char *buffer, size_t count)
515 struct dt_device *dt = container_of(kobj, struct dt_device,
517 struct osd_device *dev = osd_dt_dev(dt);
522 if (unlikely(!dev->od_mnt))
525 rc = kstrtoll(buffer, 0, &val);
532 dev->od_full_scrub_ratio = val;
535 LUSTRE_RW_ATTR(full_scrub_ratio);
537 static ssize_t full_scrub_threshold_rate_show(struct kobject *kobj,
538 struct attribute *attr,
541 struct dt_device *dt = container_of(kobj, struct dt_device,
543 struct osd_device *dev = osd_dt_dev(dt);
546 if (unlikely(!dev->od_mnt))
549 return sprintf(buf, "%llu (bad OI mappings/minute)\n",
550 dev->od_full_scrub_threshold_rate);
553 static ssize_t full_scrub_threshold_rate_store(struct kobject *kobj,
554 struct attribute *attr,
555 const char *buffer, size_t count)
557 struct dt_device *dt = container_of(kobj, struct dt_device,
559 struct osd_device *dev = osd_dt_dev(dt);
564 if (unlikely(!dev->od_mnt))
567 rc = kstrtoull(buffer, 0, &val);
571 dev->od_full_scrub_threshold_rate = val;
574 LUSTRE_RW_ATTR(full_scrub_threshold_rate);
576 static ssize_t extent_bytes_allocation_show(struct kobject *kobj,
577 struct attribute *attr, char *buf)
579 struct dt_device *dt = container_of(kobj, struct dt_device,
581 struct osd_device *dev = osd_dt_dev(dt);
583 unsigned int min = (unsigned int)(~0), cur;
585 for_each_online_cpu(i) {
586 cur = *per_cpu_ptr(dev->od_extent_bytes_percpu, i);
590 return snprintf(buf, PAGE_SIZE, "%u\n", min);
592 LUSTRE_RO_ATTR(extent_bytes_allocation);
594 static int ldiskfs_osd_oi_scrub_seq_show(struct seq_file *m, void *data)
596 struct osd_device *dev = osd_dt_dev((struct dt_device *)m->private);
598 LASSERT(dev != NULL);
599 if (unlikely(dev->od_mnt == NULL))
602 osd_scrub_dump(m, dev);
606 LDEBUGFS_SEQ_FOPS_RO(ldiskfs_osd_oi_scrub);
608 static int ldiskfs_osd_readcache_seq_show(struct seq_file *m, void *data)
610 struct osd_device *osd = osd_dt_dev((struct dt_device *)m->private);
612 LASSERT(osd != NULL);
613 if (unlikely(osd->od_mnt == NULL))
616 seq_printf(m, "%llu\n", osd->od_readcache_max_filesize);
621 ldiskfs_osd_readcache_seq_write(struct file *file, const char __user *buffer,
622 size_t count, loff_t *off)
624 struct seq_file *m = file->private_data;
625 struct dt_device *dt = m->private;
626 struct osd_device *osd = osd_dt_dev(dt);
627 char kernbuf[22] = "";
631 LASSERT(osd != NULL);
632 if (unlikely(osd->od_mnt == NULL))
635 if (count >= sizeof(kernbuf))
638 if (copy_from_user(kernbuf, buffer, count))
642 rc = sysfs_memparse(kernbuf, count, &val, "B");
646 osd->od_readcache_max_filesize = val > OSD_MAX_CACHE_SIZE ?
647 OSD_MAX_CACHE_SIZE : val;
651 LDEBUGFS_SEQ_FOPS(ldiskfs_osd_readcache);
653 static int ldiskfs_osd_readcache_max_io_seq_show(struct seq_file *m, void *data)
655 struct osd_device *osd = osd_dt_dev((struct dt_device *)m->private);
657 LASSERT(osd != NULL);
658 if (unlikely(osd->od_mnt == NULL))
661 seq_printf(m, "%lu\n", osd->od_readcache_max_iosize >> 20);
666 ldiskfs_osd_readcache_max_io_seq_write(struct file *file,
667 const char __user *buffer,
668 size_t count, loff_t *off)
670 struct seq_file *m = file->private_data;
671 struct dt_device *dt = m->private;
672 struct osd_device *osd = osd_dt_dev(dt);
673 char kernbuf[22] = "";
677 LASSERT(osd != NULL);
678 if (unlikely(osd->od_mnt == NULL))
681 if (count >= sizeof(kernbuf))
684 if (copy_from_user(kernbuf, buffer, count))
688 rc = sysfs_memparse(kernbuf, count, &val, "MiB");
692 if (val > PTLRPC_MAX_BRW_SIZE)
694 osd->od_readcache_max_iosize = val;
698 LDEBUGFS_SEQ_FOPS(ldiskfs_osd_readcache_max_io);
700 static int ldiskfs_osd_writethrough_max_io_seq_show(struct seq_file *m,
703 struct osd_device *osd = osd_dt_dev((struct dt_device *)m->private);
705 LASSERT(osd != NULL);
706 if (unlikely(osd->od_mnt == NULL))
709 seq_printf(m, "%lu\n", osd->od_writethrough_max_iosize >> 20);
714 ldiskfs_osd_writethrough_max_io_seq_write(struct file *file,
715 const char __user *buffer,
716 size_t count, loff_t *off)
718 struct seq_file *m = file->private_data;
719 struct dt_device *dt = m->private;
720 struct osd_device *osd = osd_dt_dev(dt);
721 char kernbuf[22] = "";
725 LASSERT(osd != NULL);
726 if (unlikely(osd->od_mnt == NULL))
729 if (count >= sizeof(kernbuf))
732 if (copy_from_user(kernbuf, buffer, count))
736 rc = sysfs_memparse(kernbuf, count, &val, "MiB");
740 if (val > PTLRPC_MAX_BRW_SIZE)
742 osd->od_writethrough_max_iosize = val;
746 LDEBUGFS_SEQ_FOPS(ldiskfs_osd_writethrough_max_io);
748 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 0, 52, 0)
749 static ssize_t index_in_idif_show(struct kobject *kobj, struct attribute *attr,
752 struct dt_device *dt = container_of(kobj, struct dt_device,
754 struct osd_device *dev = osd_dt_dev(dt);
757 if (unlikely(!dev->od_mnt))
760 return sprintf(buf, "%d\n", (int)(dev->od_index_in_idif));
763 static ssize_t index_in_idif_store(struct kobject *kobj,
764 struct attribute *attr,
765 const char *buffer, size_t count)
767 struct dt_device *dt = container_of(kobj, struct dt_device,
769 struct osd_device *dev = osd_dt_dev(dt);
770 struct lu_target *tgt;
776 if (unlikely(!dev->od_mnt))
779 rc = kstrtobool(buffer, &val);
783 if (dev->od_index_in_idif) {
787 LCONSOLE_WARN("%s: OST-index in IDIF has been enabled, "
788 "it cannot be reverted back.\n", osd_name(dev));
795 rc = lu_env_init(&env, LCT_DT_THREAD);
799 tgt = dev->od_dt_dev.dd_lu_dev.ld_site->ls_tgt;
800 tgt->lut_lsd.lsd_feature_rocompat |= OBD_ROCOMPAT_IDX_IN_IDIF;
801 rc = tgt_server_data_update(&env, tgt, 1);
806 LCONSOLE_INFO("%s: enable OST-index in IDIF successfully, "
807 "it cannot be reverted back.\n", osd_name(dev));
809 dev->od_index_in_idif = 1;
812 LUSTRE_RW_ATTR(index_in_idif);
814 int osd_register_proc_index_in_idif(struct osd_device *osd)
816 struct dt_device *dt = &osd->od_dt_dev;
818 return sysfs_create_file(&dt->dd_kobj, &lustre_attr_index_in_idif.attr);
822 static ssize_t index_backup_show(struct kobject *kobj, struct attribute *attr,
825 struct dt_device *dt = container_of(kobj, struct dt_device,
827 struct osd_device *dev = osd_dt_dev(dt);
830 if (unlikely(!dev->od_mnt))
833 return sprintf(buf, "%d\n", dev->od_index_backup_policy);
836 ssize_t index_backup_store(struct kobject *kobj, struct attribute *attr,
837 const char *buffer, size_t count)
839 struct dt_device *dt = container_of(kobj, struct dt_device,
841 struct osd_device *dev = osd_dt_dev(dt);
846 if (unlikely(!dev->od_mnt))
849 rc = kstrtoint(buffer, 0, &val);
853 dev->od_index_backup_policy = val;
856 LUSTRE_RW_ATTR(index_backup);
858 struct ldebugfs_vars ldebugfs_osd_obd_vars[] = {
859 { .name = "oi_scrub",
860 .fops = &ldiskfs_osd_oi_scrub_fops },
861 { .name = "readcache_max_filesize",
862 .fops = &ldiskfs_osd_readcache_fops },
863 { .name = "readcache_max_io_mb",
864 .fops = &ldiskfs_osd_readcache_max_io_fops },
865 { .name = "writethrough_max_io_mb",
866 .fops = &ldiskfs_osd_writethrough_max_io_fops },
870 static struct attribute *ldiskfs_attrs[] = {
871 &lustre_attr_read_cache_enable.attr,
872 &lustre_attr_writethrough_cache_enable.attr,
873 &lustre_attr_fstype.attr,
874 &lustre_attr_mntdev.attr,
875 &lustre_attr_fallocate_zero_blocks.attr,
876 &lustre_attr_force_sync.attr,
877 &lustre_attr_nonrotational.attr,
878 &lustre_attr_index_backup.attr,
879 &lustre_attr_auto_scrub.attr,
880 &lustre_attr_pdo.attr,
881 &lustre_attr_full_scrub_ratio.attr,
882 &lustre_attr_full_scrub_threshold_rate.attr,
883 &lustre_attr_extent_bytes_allocation.attr,
887 int osd_procfs_init(struct osd_device *osd, const char *name)
889 struct obd_type *type;
894 /* at the moment there is no linkage between lu_type
895 * and obd_type, so we lookup obd_type this way
897 type = class_search_type(LUSTRE_OSD_LDISKFS_NAME);
902 CDEBUG(D_CONFIG, "%s: register osd-ldiskfs tunable parameters\n", name);
904 /* put reference taken by class_search_type */
905 kobject_put(&type->typ_kobj);
907 osd->od_dt_dev.dd_ktype.default_attrs = ldiskfs_attrs;
908 rc = dt_tunables_init(&osd->od_dt_dev, type, name,
909 ldebugfs_osd_obd_vars);
911 CERROR("%s: cannot setup sysfs / debugfs entry: %d\n",
916 if (osd->od_proc_entry)
919 /* Find the type procroot and add the proc entry for this device */
920 osd->od_proc_entry = lprocfs_register(name, type->typ_procroot,
921 NULL, &osd->od_dt_dev);
922 if (IS_ERR(osd->od_proc_entry)) {
923 rc = PTR_ERR(osd->od_proc_entry);
924 CERROR("Error %d setting up lprocfs for %s\n",
926 osd->od_proc_entry = NULL;
930 rc = osd_stats_init(osd);
935 osd_procfs_fini(osd);
939 int osd_procfs_fini(struct osd_device *osd)
942 lprocfs_free_stats(&osd->od_stats);
944 if (osd->od_proc_entry)
945 lprocfs_remove(&osd->od_proc_entry);
947 return dt_tunables_fini(&osd->od_dt_dev);