Whamcloud - gitweb
LU-14927 scrub: create shared scrub_needs_check() function.
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_lproc.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/osd/osd_lproc.c
32  *
33  * Author: Mikhail Pershin <tappro@sun.com>
34  */
35
36 #define DEBUG_SUBSYSTEM S_OSD
37
38 #include <lprocfs_status.h>
39
40 #include "osd_internal.h"
41
42 #ifdef CONFIG_PROC_FS
43
44 void osd_brw_stats_update(struct osd_device *osd, struct osd_iobuf *iobuf)
45 {
46         struct brw_stats *s = &osd->od_brw_stats;
47         sector_t         *last_block = NULL;
48         struct page     **pages = iobuf->dr_pages;
49         struct page      *last_page = NULL;
50         unsigned long     discont_pages = 0;
51         unsigned long     discont_blocks = 0;
52         sector_t         *blocks = iobuf->dr_blocks;
53         int               i, nr_pages = iobuf->dr_npages;
54         int               blocks_per_page;
55         int               rw = iobuf->dr_rw;
56
57         if (unlikely(nr_pages == 0))
58                 return;
59
60         blocks_per_page = PAGE_SIZE >> osd_sb(osd)->s_blocksize_bits;
61
62         lprocfs_oh_tally_log2(&s->hist[BRW_R_PAGES+rw], nr_pages);
63
64         while (nr_pages-- > 0) {
65                 if (last_page && (*pages)->index != (last_page->index + 1))
66                         discont_pages++;
67                 last_page = *pages;
68                 pages++;
69                 for (i = 0; i < blocks_per_page; i++) {
70                         if (last_block && *blocks != (*last_block + 1))
71                                 discont_blocks++;
72                         last_block = blocks++;
73                 }
74         }
75
76         lprocfs_oh_tally(&s->hist[BRW_R_DISCONT_PAGES+rw], discont_pages);
77         lprocfs_oh_tally(&s->hist[BRW_R_DISCONT_BLOCKS+rw], discont_blocks);
78 }
79
80 static void display_brw_stats(struct seq_file *seq, char *name, char *units,
81         struct obd_histogram *read, struct obd_histogram *write, int scale)
82 {
83         unsigned long read_tot, write_tot, r, w, read_cum = 0, write_cum = 0;
84         int i;
85
86         seq_printf(seq, "\n%26s read      |     write\n", " ");
87         seq_printf(seq, "%-22s %-5s %% cum %% |  %-11s %% cum %%\n",
88                    name, units, units);
89
90         read_tot = lprocfs_oh_sum(read);
91         write_tot = lprocfs_oh_sum(write);
92         for (i = 0; i < OBD_HIST_MAX; i++) {
93                 r = read->oh_buckets[i];
94                 w = write->oh_buckets[i];
95                 read_cum += r;
96                 write_cum += w;
97                 if (read_cum == 0 && write_cum == 0)
98                         continue;
99
100                 if (!scale)
101                         seq_printf(seq, "%u", i);
102                 else if (i < 10)
103                         seq_printf(seq, "%u", scale << i);
104                 else if (i < 20)
105                         seq_printf(seq, "%uK", scale << (i-10));
106                 else
107                         seq_printf(seq, "%uM", scale << (i-20));
108
109                 seq_printf(seq, ":\t\t%10lu %3u %3u   | %4lu %3u %3u\n",
110                            r, pct(r, read_tot), pct(read_cum, read_tot),
111                            w, pct(w, write_tot), pct(write_cum, write_tot));
112
113                 if (read_cum == read_tot && write_cum == write_tot)
114                         break;
115         }
116 }
117
118 static void brw_stats_show(struct seq_file *seq, struct brw_stats *brw_stats)
119 {
120         struct timespec64 now;
121
122         /* this sampling races with updates */
123         ktime_get_real_ts64(&now);
124
125         seq_printf(seq, "snapshot_time:         %lld.%09ld (secs.nsecs)\n",
126                    (s64)now.tv_sec, now.tv_nsec);
127
128         display_brw_stats(seq, "pages per bulk r/w", "rpcs",
129                           &brw_stats->hist[BRW_R_PAGES],
130                           &brw_stats->hist[BRW_W_PAGES], 1);
131
132         display_brw_stats(seq, "discontiguous pages", "rpcs",
133                           &brw_stats->hist[BRW_R_DISCONT_PAGES],
134                           &brw_stats->hist[BRW_W_DISCONT_PAGES], 0);
135
136         display_brw_stats(seq, "discontiguous blocks", "rpcs",
137                           &brw_stats->hist[BRW_R_DISCONT_BLOCKS],
138                           &brw_stats->hist[BRW_W_DISCONT_BLOCKS], 0);
139
140         display_brw_stats(seq, "disk fragmented I/Os", "ios",
141                           &brw_stats->hist[BRW_R_DIO_FRAGS],
142                           &brw_stats->hist[BRW_W_DIO_FRAGS], 0);
143
144         display_brw_stats(seq, "disk I/Os in flight", "ios",
145                           &brw_stats->hist[BRW_R_RPC_HIST],
146                           &brw_stats->hist[BRW_W_RPC_HIST], 0);
147
148         display_brw_stats(seq, "I/O time (1/1000s)", "ios",
149                           &brw_stats->hist[BRW_R_IO_TIME],
150                           &brw_stats->hist[BRW_W_IO_TIME], 1);
151
152         display_brw_stats(seq, "disk I/O size", "ios",
153                           &brw_stats->hist[BRW_R_DISK_IOSIZE],
154                           &brw_stats->hist[BRW_W_DISK_IOSIZE], 1);
155 }
156
157 static int osd_brw_stats_seq_show(struct seq_file *seq, void *v)
158 {
159         struct osd_device *osd = seq->private;
160
161         brw_stats_show(seq, &osd->od_brw_stats);
162
163         return 0;
164 }
165
166 static ssize_t osd_brw_stats_seq_write(struct file *file,
167                                        const char __user *buf,
168                                        size_t len, loff_t *off)
169 {
170         struct seq_file *seq = file->private_data;
171         struct osd_device *osd = seq->private;
172         int i;
173
174         for (i = 0; i < BRW_LAST; i++)
175                 lprocfs_oh_clear(&osd->od_brw_stats.hist[i]);
176
177         return len;
178 }
179
180 LPROC_SEQ_FOPS(osd_brw_stats);
181
182 static int osd_stats_init(struct osd_device *osd)
183 {
184         int i, result;
185         ENTRY;
186
187         for (i = 0; i < BRW_LAST; i++)
188                 spin_lock_init(&osd->od_brw_stats.hist[i].oh_lock);
189
190         osd->od_stats = lprocfs_alloc_stats(LPROC_OSD_LAST, 0);
191         if (osd->od_stats != NULL) {
192                 result = lprocfs_register_stats(osd->od_proc_entry, "stats",
193                                                 osd->od_stats);
194                 if (result)
195                         GOTO(out, result);
196
197                 lprocfs_counter_init(osd->od_stats, LPROC_OSD_GET_PAGE,
198                                      LPROCFS_CNTR_AVGMINMAX|LPROCFS_CNTR_STDDEV,
199                                      "get_page", "usec");
200                 lprocfs_counter_init(osd->od_stats, LPROC_OSD_NO_PAGE,
201                                      LPROCFS_CNTR_AVGMINMAX,
202                                      "get_page_failures", "num");
203                 lprocfs_counter_init(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
204                                      LPROCFS_CNTR_AVGMINMAX,
205                                      "cache_access", "pages");
206                 lprocfs_counter_init(osd->od_stats, LPROC_OSD_CACHE_HIT,
207                                      LPROCFS_CNTR_AVGMINMAX,
208                                      "cache_hit", "pages");
209                 lprocfs_counter_init(osd->od_stats, LPROC_OSD_CACHE_MISS,
210                                      LPROCFS_CNTR_AVGMINMAX,
211                                      "cache_miss", "pages");
212 #if OSD_THANDLE_STATS
213                 lprocfs_counter_init(osd->od_stats, LPROC_OSD_THANDLE_STARTING,
214                                      LPROCFS_CNTR_AVGMINMAX,
215                                      "thandle starting", "usec");
216                 lprocfs_counter_init(osd->od_stats, LPROC_OSD_THANDLE_OPEN,
217                                      LPROCFS_CNTR_AVGMINMAX,
218                                      "thandle open", "usec");
219                 lprocfs_counter_init(osd->od_stats, LPROC_OSD_THANDLE_CLOSING,
220                                      LPROCFS_CNTR_AVGMINMAX,
221                                      "thandle closing", "usec");
222 #endif
223                 result = lprocfs_seq_create(osd->od_proc_entry, "brw_stats",
224                                             0644, &osd_brw_stats_fops, osd);
225         } else
226                 result = -ENOMEM;
227
228 out:
229         RETURN(result);
230 }
231
232 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
233                            char *buf)
234 {
235         return sprintf(buf, "ldiskfs\n");
236 }
237 LUSTRE_RO_ATTR(fstype);
238
239 static ssize_t mntdev_show(struct kobject *kobj, struct attribute *attr,
240                            char *buf)
241 {
242         struct dt_device *dt = container_of(kobj, struct dt_device,
243                                             dd_kobj);
244         struct osd_device *osd = osd_dt_dev(dt);
245
246         LASSERT(osd);
247         if (unlikely(!osd->od_mnt))
248                 return -EINPROGRESS;
249
250         return sprintf(buf, "%s\n", osd->od_mntdev);
251 }
252 LUSTRE_RO_ATTR(mntdev);
253
254 static ssize_t read_cache_enable_show(struct kobject *kobj,
255                                       struct attribute *attr,
256                                       char *buf)
257 {
258         struct dt_device *dt = container_of(kobj, struct dt_device,
259                                             dd_kobj);
260         struct osd_device *osd = osd_dt_dev(dt);
261
262         LASSERT(osd);
263         if (unlikely(!osd->od_mnt))
264                 return -EINPROGRESS;
265
266         return sprintf(buf, "%u\n", osd->od_read_cache);
267 }
268
269 static ssize_t read_cache_enable_store(struct kobject *kobj,
270                                        struct attribute *attr,
271                                        const char *buffer, size_t count)
272 {
273         struct dt_device *dt = container_of(kobj, struct dt_device,
274                                             dd_kobj);
275         struct osd_device *osd = osd_dt_dev(dt);
276         bool val;
277         int rc;
278
279         LASSERT(osd);
280         if (unlikely(!osd->od_mnt))
281                 return -EINPROGRESS;
282
283         rc = kstrtobool(buffer, &val);
284         if (rc)
285                 return rc;
286
287         osd->od_read_cache = !!val;
288         return count;
289 }
290 LUSTRE_RW_ATTR(read_cache_enable);
291
292 static ssize_t writethrough_cache_enable_show(struct kobject *kobj,
293                                               struct attribute *attr,
294                                               char *buf)
295 {
296         struct dt_device *dt = container_of(kobj, struct dt_device,
297                                             dd_kobj);
298         struct osd_device *osd = osd_dt_dev(dt);
299
300         LASSERT(osd);
301         if (unlikely(!osd->od_mnt))
302                 return -EINPROGRESS;
303
304         return sprintf(buf, "%u\n", osd->od_writethrough_cache);
305 }
306
307 static ssize_t writethrough_cache_enable_store(struct kobject *kobj,
308                                                struct attribute *attr,
309                                                const char *buffer,
310                                                size_t count)
311 {
312         struct dt_device *dt = container_of(kobj, struct dt_device,
313                                             dd_kobj);
314         struct osd_device *osd = osd_dt_dev(dt);
315         bool val;
316         int rc;
317
318         LASSERT(osd);
319         if (unlikely(!osd->od_mnt))
320                 return -EINPROGRESS;
321
322         rc = kstrtobool(buffer, &val);
323         if (rc)
324                 return rc;
325
326         osd->od_writethrough_cache = !!val;
327         return count;
328 }
329 LUSTRE_RW_ATTR(writethrough_cache_enable);
330
331 static ssize_t fallocate_zero_blocks_show(struct kobject *kobj,
332                                           struct attribute *attr,
333                                           char *buf)
334 {
335         struct dt_device *dt = container_of(kobj, struct dt_device,
336                                             dd_kobj);
337         struct osd_device *osd = osd_dt_dev(dt);
338
339         LASSERT(osd);
340         if (unlikely(!osd->od_mnt))
341                 return -EINPROGRESS;
342
343         return scnprintf(buf, PAGE_SIZE, "%d\n", osd->od_fallocate_zero_blocks);
344 }
345
346 /*
347  * Set how fallocate() interacts with the backing filesystem:
348  * -1: fallocate is disabled and returns -EOPNOTSUPP
349  *  0: fallocate allocates unwritten extents (like ext4)
350  *  1: fallocate zeroes allocated extents on disk
351  */
352 static ssize_t fallocate_zero_blocks_store(struct kobject *kobj,
353                                            struct attribute *attr,
354                                            const char *buffer, size_t count)
355 {
356         struct dt_device *dt = container_of(kobj, struct dt_device,
357                                             dd_kobj);
358         struct osd_device *osd = osd_dt_dev(dt);
359         long val;
360         int rc;
361
362         LASSERT(osd);
363         if (unlikely(!osd->od_mnt))
364                 return -EINPROGRESS;
365
366         rc = kstrtol(buffer, 0, &val);
367         if (rc)
368                 return rc;
369
370         if (val < -1 || val > 1)
371                 return -EINVAL;
372
373         osd->od_fallocate_zero_blocks = val;
374         return count;
375 }
376 LUSTRE_RW_ATTR(fallocate_zero_blocks);
377
378 ssize_t force_sync_store(struct kobject *kobj, struct attribute *attr,
379                          const char *buffer, size_t count)
380 {
381         struct dt_device *dt = container_of(kobj, struct dt_device,
382                                             dd_kobj);
383         struct osd_device *osd = osd_dt_dev(dt);
384         struct lu_env env;
385         int rc;
386
387         LASSERT(osd);
388         if (unlikely(!osd->od_mnt))
389                 return -EINPROGRESS;
390
391         rc = lu_env_init(&env, LCT_LOCAL);
392         if (rc)
393                 return rc;
394
395         rc = dt_sync(&env, dt);
396         lu_env_fini(&env);
397
398         return rc == 0 ? count : rc;
399 }
400 LUSTRE_WO_ATTR(force_sync);
401
402 static ssize_t nonrotational_show(struct kobject *kobj, struct attribute *attr,
403                                   char *buf)
404 {
405         struct dt_device *dt = container_of(kobj, struct dt_device,
406                                             dd_kobj);
407         struct osd_device *osd = osd_dt_dev(dt);
408
409         LASSERT(osd);
410         if (unlikely(!osd->od_mnt))
411                 return -EINPROGRESS;
412
413         return sprintf(buf, "%u\n", osd->od_nonrotational);
414 }
415
416 static ssize_t nonrotational_store(struct kobject *kobj,
417                                    struct attribute *attr, const char *buffer,
418                                    size_t count)
419 {
420         struct dt_device *dt = container_of(kobj, struct dt_device,
421                                             dd_kobj);
422         struct osd_device *osd = osd_dt_dev(dt);
423         bool val;
424         int rc;
425
426         LASSERT(osd);
427         if (unlikely(!osd->od_mnt))
428                 return -EINPROGRESS;
429
430         rc = kstrtobool(buffer, &val);
431         if (rc)
432                 return rc;
433
434         osd->od_nonrotational = val;
435         return count;
436 }
437 LUSTRE_RW_ATTR(nonrotational);
438
439 static ssize_t pdo_show(struct kobject *kobj, struct attribute *attr,
440                         char *buf)
441 {
442         return sprintf(buf, "%s\n", ldiskfs_pdo ? "ON" : "OFF");
443 }
444
445 static ssize_t pdo_store(struct kobject *kobj, struct attribute *attr,
446                          const char *buffer, size_t count)
447 {
448         bool pdo;
449         int rc;
450
451         rc = kstrtobool(buffer, &pdo);
452         if (rc != 0)
453                 return rc;
454
455         ldiskfs_pdo = pdo;
456
457         return count;
458 }
459 LUSTRE_RW_ATTR(pdo);
460
461 static ssize_t auto_scrub_show(struct kobject *kobj, struct attribute *attr,
462                                char *buf)
463 {
464         struct dt_device *dt = container_of(kobj, struct dt_device,
465                                             dd_kobj);
466         struct osd_device *dev = osd_dt_dev(dt);
467
468         LASSERT(dev);
469         if (unlikely(!dev->od_mnt))
470                 return -EINPROGRESS;
471
472         return scnprintf(buf, PAGE_SIZE, "%lld\n",
473                          dev->od_scrub.os_scrub.os_auto_scrub_interval);
474 }
475
476 static ssize_t auto_scrub_store(struct kobject *kobj, struct attribute *attr,
477                                 const char *buffer, size_t count)
478 {
479         struct dt_device *dt = container_of(kobj, struct dt_device,
480                                             dd_kobj);
481         struct osd_device *dev = osd_dt_dev(dt);
482         s64 val;
483         int rc;
484
485         LASSERT(dev);
486         if (unlikely(!dev->od_mnt))
487                 return -EINPROGRESS;
488
489         rc = kstrtoll(buffer, 0, &val);
490         if (rc)
491                 return rc;
492
493         dev->od_scrub.os_scrub.os_auto_scrub_interval = val;
494         return count;
495 }
496 LUSTRE_RW_ATTR(auto_scrub);
497
498 static ssize_t full_scrub_ratio_show(struct kobject *kobj,
499                                      struct attribute *attr,
500                                      char *buf)
501 {
502         struct dt_device *dt = container_of(kobj, struct dt_device,
503                                             dd_kobj);
504         struct osd_device *dev = osd_dt_dev(dt);
505
506         LASSERT(dev);
507         if (unlikely(!dev->od_mnt))
508                 return -EINPROGRESS;
509
510         return sprintf(buf, "%llu\n", dev->od_full_scrub_ratio);
511 }
512
513 static ssize_t full_scrub_ratio_store(struct kobject *kobj,
514                                       struct attribute *attr,
515                                       const char *buffer, size_t count)
516 {
517         struct dt_device *dt = container_of(kobj, struct dt_device,
518                                             dd_kobj);
519         struct osd_device *dev = osd_dt_dev(dt);
520         s64 val;
521         int rc;
522
523         LASSERT(dev);
524         if (unlikely(!dev->od_mnt))
525                 return -EINPROGRESS;
526
527         rc = kstrtoll(buffer, 0, &val);
528         if (rc)
529                 return rc;
530
531         if (val < 0)
532                 return -EINVAL;
533
534         dev->od_full_scrub_ratio = val;
535         return count;
536 }
537 LUSTRE_RW_ATTR(full_scrub_ratio);
538
539 static ssize_t full_scrub_threshold_rate_show(struct kobject *kobj,
540                                               struct attribute *attr,
541                                               char *buf)
542 {
543         struct dt_device *dt = container_of(kobj, struct dt_device,
544                                             dd_kobj);
545         struct osd_device *dev = osd_dt_dev(dt);
546
547         LASSERT(dev);
548         if (unlikely(!dev->od_mnt))
549                 return -EINPROGRESS;
550
551         return sprintf(buf, "%llu (bad OI mappings/minute)\n",
552                        dev->od_full_scrub_threshold_rate);
553 }
554
555 static ssize_t full_scrub_threshold_rate_store(struct kobject *kobj,
556                                                struct attribute *attr,
557                                                const char *buffer, size_t count)
558 {
559         struct dt_device *dt = container_of(kobj, struct dt_device,
560                                             dd_kobj);
561         struct osd_device *dev = osd_dt_dev(dt);
562         u64 val;
563         int rc;
564
565         LASSERT(dev);
566         if (unlikely(!dev->od_mnt))
567                 return -EINPROGRESS;
568
569         rc = kstrtoull(buffer, 0, &val);
570         if (rc != 0)
571                 return rc;
572
573         dev->od_full_scrub_threshold_rate = val;
574         return count;
575 }
576 LUSTRE_RW_ATTR(full_scrub_threshold_rate);
577
578 static ssize_t extent_bytes_allocation_show(struct kobject *kobj,
579                                             struct attribute *attr, char *buf)
580 {
581         struct dt_device *dt = container_of(kobj, struct dt_device,
582                                             dd_kobj);
583         struct osd_device *dev = osd_dt_dev(dt);
584         int i;
585         unsigned int min = (unsigned int)(~0), cur;
586
587         for_each_online_cpu(i) {
588                 cur = *per_cpu_ptr(dev->od_extent_bytes_percpu, i);
589                 if (cur < min)
590                         min = cur;
591         }
592         return snprintf(buf, PAGE_SIZE, "%u\n", min);
593 }
594 LUSTRE_RO_ATTR(extent_bytes_allocation);
595
596 static int ldiskfs_osd_oi_scrub_seq_show(struct seq_file *m, void *data)
597 {
598         struct osd_device *dev = osd_dt_dev((struct dt_device *)m->private);
599
600         LASSERT(dev != NULL);
601         if (unlikely(dev->od_mnt == NULL))
602                 return -EINPROGRESS;
603
604         osd_scrub_dump(m, dev);
605         return 0;
606 }
607
608 LDEBUGFS_SEQ_FOPS_RO(ldiskfs_osd_oi_scrub);
609
610 static int ldiskfs_osd_readcache_seq_show(struct seq_file *m, void *data)
611 {
612         struct osd_device *osd = osd_dt_dev((struct dt_device *)m->private);
613
614         LASSERT(osd != NULL);
615         if (unlikely(osd->od_mnt == NULL))
616                 return -EINPROGRESS;
617
618         seq_printf(m, "%llu\n", osd->od_readcache_max_filesize);
619         return 0;
620 }
621
622 static ssize_t
623 ldiskfs_osd_readcache_seq_write(struct file *file, const char __user *buffer,
624                                 size_t count, loff_t *off)
625 {
626         struct seq_file *m = file->private_data;
627         struct dt_device *dt = m->private;
628         struct osd_device *osd = osd_dt_dev(dt);
629         char kernbuf[22] = "";
630         u64 val;
631         int rc;
632
633         LASSERT(osd != NULL);
634         if (unlikely(osd->od_mnt == NULL))
635                 return -EINPROGRESS;
636
637         if (count >= sizeof(kernbuf))
638                 return -EINVAL;
639
640         if (copy_from_user(kernbuf, buffer, count))
641                 return -EFAULT;
642         kernbuf[count] = 0;
643
644         rc = sysfs_memparse(kernbuf, count, &val, "B");
645         if (rc < 0)
646                 return rc;
647
648         osd->od_readcache_max_filesize = val > OSD_MAX_CACHE_SIZE ?
649                                          OSD_MAX_CACHE_SIZE : val;
650         return count;
651 }
652
653 LDEBUGFS_SEQ_FOPS(ldiskfs_osd_readcache);
654
655 static int ldiskfs_osd_readcache_max_io_seq_show(struct seq_file *m, void *data)
656 {
657         struct osd_device *osd = osd_dt_dev((struct dt_device *)m->private);
658
659         LASSERT(osd != NULL);
660         if (unlikely(osd->od_mnt == NULL))
661                 return -EINPROGRESS;
662
663         seq_printf(m, "%lu\n", osd->od_readcache_max_iosize >> 20);
664         return 0;
665 }
666
667 static ssize_t
668 ldiskfs_osd_readcache_max_io_seq_write(struct file *file,
669                                        const char __user *buffer,
670                                        size_t count, loff_t *off)
671 {
672         struct seq_file *m = file->private_data;
673         struct dt_device *dt = m->private;
674         struct osd_device *osd = osd_dt_dev(dt);
675         char kernbuf[22] = "";
676         u64 val;
677         int rc;
678
679         LASSERT(osd != NULL);
680         if (unlikely(osd->od_mnt == NULL))
681                 return -EINPROGRESS;
682
683         if (count >= sizeof(kernbuf))
684                 return -EINVAL;
685
686         if (copy_from_user(kernbuf, buffer, count))
687                 return -EFAULT;
688         kernbuf[count] = 0;
689
690         rc = sysfs_memparse(kernbuf, count, &val, "MiB");
691         if (rc < 0)
692                 return rc;
693
694         if (val > PTLRPC_MAX_BRW_SIZE)
695                 return -ERANGE;
696         osd->od_readcache_max_iosize = val;
697         return count;
698 }
699
700 LDEBUGFS_SEQ_FOPS(ldiskfs_osd_readcache_max_io);
701
702 static int ldiskfs_osd_writethrough_max_io_seq_show(struct seq_file *m,
703                                                     void *data)
704 {
705         struct osd_device *osd = osd_dt_dev((struct dt_device *)m->private);
706
707         LASSERT(osd != NULL);
708         if (unlikely(osd->od_mnt == NULL))
709                 return -EINPROGRESS;
710
711         seq_printf(m, "%lu\n", osd->od_writethrough_max_iosize >> 20);
712         return 0;
713 }
714
715 static ssize_t
716 ldiskfs_osd_writethrough_max_io_seq_write(struct file *file,
717                                        const char __user *buffer,
718                                        size_t count, loff_t *off)
719 {
720         struct seq_file *m = file->private_data;
721         struct dt_device *dt = m->private;
722         struct osd_device *osd = osd_dt_dev(dt);
723         char kernbuf[22] = "";
724         u64 val;
725         int rc;
726
727         LASSERT(osd != NULL);
728         if (unlikely(osd->od_mnt == NULL))
729                 return -EINPROGRESS;
730
731         if (count >= sizeof(kernbuf))
732                 return -EINVAL;
733
734         if (copy_from_user(kernbuf, buffer, count))
735                 return -EFAULT;
736         kernbuf[count] = 0;
737
738         rc = sysfs_memparse(kernbuf, count, &val, "MiB");
739         if (rc < 0)
740                 return rc;
741
742         if (val > PTLRPC_MAX_BRW_SIZE)
743                 return -ERANGE;
744         osd->od_writethrough_max_iosize = val;
745         return count;
746 }
747
748 LDEBUGFS_SEQ_FOPS(ldiskfs_osd_writethrough_max_io);
749
750 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 0, 52, 0)
751 static ssize_t index_in_idif_show(struct kobject *kobj, struct attribute *attr,
752                                   char *buf)
753 {
754         struct dt_device *dt = container_of(kobj, struct dt_device,
755                                             dd_kobj);
756         struct osd_device *dev = osd_dt_dev(dt);
757
758         LASSERT(dev);
759         if (unlikely(!dev->od_mnt))
760                 return -EINPROGRESS;
761
762         return sprintf(buf, "%d\n", (int)(dev->od_index_in_idif));
763 }
764
765 static ssize_t index_in_idif_store(struct kobject *kobj,
766                                    struct attribute *attr,
767                                    const char *buffer, size_t count)
768 {
769         struct dt_device *dt = container_of(kobj, struct dt_device,
770                                             dd_kobj);
771         struct osd_device *dev = osd_dt_dev(dt);
772         struct lu_target *tgt;
773         struct lu_env env;
774         bool val;
775         int rc;
776
777         LASSERT(dev);
778         if (unlikely(!dev->od_mnt))
779                 return -EINPROGRESS;
780
781         rc = kstrtobool(buffer, &val);
782         if (rc)
783                 return rc;
784
785         if (dev->od_index_in_idif) {
786                 if (val)
787                         return count;
788
789                 LCONSOLE_WARN("%s: OST-index in IDIF has been enabled, "
790                               "it cannot be reverted back.\n", osd_name(dev));
791                 return -EPERM;
792         }
793
794         if (!val)
795                 return count;
796
797         rc = lu_env_init(&env, LCT_DT_THREAD);
798         if (rc)
799                 return rc;
800
801         tgt = dev->od_dt_dev.dd_lu_dev.ld_site->ls_tgt;
802         tgt->lut_lsd.lsd_feature_rocompat |= OBD_ROCOMPAT_IDX_IN_IDIF;
803         rc = tgt_server_data_update(&env, tgt, 1);
804         lu_env_fini(&env);
805         if (rc < 0)
806                 return rc;
807
808         LCONSOLE_INFO("%s: enable OST-index in IDIF successfully, "
809                       "it cannot be reverted back.\n", osd_name(dev));
810
811         dev->od_index_in_idif = 1;
812         return count;
813 }
814 LUSTRE_RW_ATTR(index_in_idif);
815
816 int osd_register_proc_index_in_idif(struct osd_device *osd)
817 {
818         struct dt_device *dt = &osd->od_dt_dev;
819
820         return sysfs_create_file(&dt->dd_kobj, &lustre_attr_index_in_idif.attr);
821 }
822 #endif
823
824 static ssize_t index_backup_show(struct kobject *kobj, struct attribute *attr,
825                                  char *buf)
826 {
827         struct dt_device *dt = container_of(kobj, struct dt_device,
828                                             dd_kobj);
829         struct osd_device *dev = osd_dt_dev(dt);
830
831         LASSERT(dev);
832         if (unlikely(!dev->od_mnt))
833                 return -EINPROGRESS;
834
835         return sprintf(buf, "%d\n", dev->od_index_backup_policy);
836 }
837
838 ssize_t index_backup_store(struct kobject *kobj, struct attribute *attr,
839                            const char *buffer, size_t count)
840 {
841         struct dt_device *dt = container_of(kobj, struct dt_device,
842                                            dd_kobj);
843         struct osd_device *dev = osd_dt_dev(dt);
844         int val;
845         int rc;
846
847         LASSERT(dev);
848         if (unlikely(!dev->od_mnt))
849                 return -EINPROGRESS;
850
851         rc = kstrtoint(buffer, 0, &val);
852         if (rc)
853                 return rc;
854
855         dev->od_index_backup_policy = val;
856         return count;
857 }
858 LUSTRE_RW_ATTR(index_backup);
859
860 struct ldebugfs_vars ldebugfs_osd_obd_vars[] = {
861         { .name =       "oi_scrub",
862           .fops =       &ldiskfs_osd_oi_scrub_fops      },
863         { .name =       "readcache_max_filesize",
864           .fops =       &ldiskfs_osd_readcache_fops     },
865         { .name =       "readcache_max_io_mb",
866           .fops =       &ldiskfs_osd_readcache_max_io_fops      },
867         { .name =       "writethrough_max_io_mb",
868           .fops =       &ldiskfs_osd_writethrough_max_io_fops   },
869         { NULL }
870 };
871
872 static struct attribute *ldiskfs_attrs[] = {
873         &lustre_attr_read_cache_enable.attr,
874         &lustre_attr_writethrough_cache_enable.attr,
875         &lustre_attr_fstype.attr,
876         &lustre_attr_mntdev.attr,
877         &lustre_attr_fallocate_zero_blocks.attr,
878         &lustre_attr_force_sync.attr,
879         &lustre_attr_nonrotational.attr,
880         &lustre_attr_index_backup.attr,
881         &lustre_attr_auto_scrub.attr,
882         &lustre_attr_pdo.attr,
883         &lustre_attr_full_scrub_ratio.attr,
884         &lustre_attr_full_scrub_threshold_rate.attr,
885         &lustre_attr_extent_bytes_allocation.attr,
886         NULL,
887 };
888
889 int osd_procfs_init(struct osd_device *osd, const char *name)
890 {
891         struct obd_type *type;
892         int rc;
893
894         ENTRY;
895
896         /* at the moment there is no linkage between lu_type
897          * and obd_type, so we lookup obd_type this way
898          */
899         type = class_search_type(LUSTRE_OSD_LDISKFS_NAME);
900
901         LASSERT(name);
902         LASSERT(type);
903
904         CDEBUG(D_CONFIG, "%s: register osd-ldiskfs tunable parameters\n", name);
905
906         /* put reference taken by class_search_type */
907         kobject_put(&type->typ_kobj);
908
909         osd->od_dt_dev.dd_ktype.default_attrs = ldiskfs_attrs;
910         rc = dt_tunables_init(&osd->od_dt_dev, type, name,
911                               ldebugfs_osd_obd_vars);
912         if (rc) {
913                 CERROR("%s: cannot setup sysfs / debugfs entry: %d\n",
914                        name, rc);
915                 GOTO(out, rc);
916         }
917
918         if (osd->od_proc_entry)
919                 RETURN(0);
920
921         /* Find the type procroot and add the proc entry for this device */
922         osd->od_proc_entry = lprocfs_register(name, type->typ_procroot,
923                                               NULL, &osd->od_dt_dev);
924         if (IS_ERR(osd->od_proc_entry)) {
925                 rc = PTR_ERR(osd->od_proc_entry);
926                 CERROR("Error %d setting up lprocfs for %s\n",
927                        rc, name);
928                 osd->od_proc_entry = NULL;
929                 GOTO(out, rc);
930         }
931
932         rc = osd_stats_init(osd);
933
934         EXIT;
935 out:
936         if (rc)
937                 osd_procfs_fini(osd);
938         return rc;
939 }
940
941 int osd_procfs_fini(struct osd_device *osd)
942 {
943         if (osd->od_stats)
944                 lprocfs_free_stats(&osd->od_stats);
945
946         if (osd->od_proc_entry)
947                 lprocfs_remove(&osd->od_proc_entry);
948
949         return dt_tunables_fini(&osd->od_dt_dev);
950 }
951 #endif