Whamcloud - gitweb
LU-13096 llite: fix potential overflow in ll_max_cached_mb_seq_write()
[fs/lustre-release.git] / lustre / llite / lproc_llite.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32 #define DEBUG_SUBSYSTEM S_LLITE
33
34 #include <linux/version.h>
35 #include <linux/user_namespace.h>
36 #include <linux/uidgid.h>
37
38 #include <uapi/linux/lustre/lustre_param.h>
39 #include <lprocfs_status.h>
40 #include <obd_support.h>
41
42 #include "llite_internal.h"
43 #include "vvp_internal.h"
44
45 static struct kobject *llite_kobj;
46 static struct dentry *llite_root;
47
48 static void llite_kobj_release(struct kobject *kobj)
49 {
50         if (!IS_ERR_OR_NULL(llite_root)) {
51                 debugfs_remove(llite_root);
52                 llite_root = NULL;
53         }
54
55         kfree(kobj);
56 }
57
58 static struct kobj_type llite_kobj_ktype = {
59         .release        = llite_kobj_release,
60         .sysfs_ops      = &lustre_sysfs_ops,
61 };
62
63 int llite_tunables_register(void)
64 {
65         int rc;
66
67         llite_kobj = kzalloc(sizeof(*llite_kobj), GFP_KERNEL);
68         if (!llite_kobj)
69                 return -ENOMEM;
70
71         llite_kobj->kset = lustre_kset;
72         rc = kobject_init_and_add(llite_kobj, &llite_kobj_ktype,
73                                   &lustre_kset->kobj, "%s", "llite");
74         if (rc)
75                 goto free_kobj;
76
77         llite_root = debugfs_create_dir("llite", debugfs_lustre_root);
78         if (IS_ERR_OR_NULL(llite_root)) {
79                 rc = llite_root ? PTR_ERR(llite_root) : -ENOMEM;
80                 llite_root = NULL;
81 free_kobj:
82                 kobject_put(llite_kobj);
83                 llite_kobj = NULL;
84         }
85
86         return rc;
87 }
88
89 void llite_tunables_unregister(void)
90 {
91         kobject_put(llite_kobj);
92         llite_kobj = NULL;
93 }
94
95 /* <debugfs>/lustre/llite mount point registration */
96 static const struct file_operations ll_rw_extents_stats_fops;
97 static const struct file_operations ll_rw_extents_stats_pp_fops;
98 static const struct file_operations ll_rw_offset_stats_fops;
99
100 /**
101  * ll_stats_pid_write() - Determine if stats collection should be enabled
102  * @buf: Buffer containing the data written
103  * @len: Number of bytes in the buffer
104  *
105  * Several proc files begin collecting stats when a value is written, and stop
106  * collecting when either '0' or 'disable' is written. This function checks the
107  * written value to see if collection should be enabled or disabled.
108  *
109  * Return: If '0' or 'disable' is provided, 0 is returned. If the text
110  * equivalent of a number is written, that number is returned. Otherwise,
111  * 1 is returned. Non-zero return values indicate collection should be enabled.
112  */
113 static s64 ll_stats_pid_write(const char __user *buf, size_t len)
114 {
115         unsigned long long value = 1;
116         char kernbuf[16];
117         int rc;
118
119         rc = kstrtoull_from_user(buf, len, 0, &value);
120         if (rc < 0 && len < sizeof(kernbuf)) {
121                 if (copy_from_user(kernbuf, buf, len))
122                         return -EFAULT;
123                 kernbuf[len] = 0;
124
125                 if (kernbuf[len - 1] == '\n')
126                         kernbuf[len - 1] = 0;
127
128                 if (strncasecmp(kernbuf, "disable", 7) == 0)
129                         value = 0;
130         }
131
132         return value;
133 }
134
135 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
136                               char *buf)
137 {
138         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
139                                               ll_kset.kobj);
140         struct obd_statfs osfs;
141         int rc;
142
143         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
144         if (rc)
145                 return rc;
146
147         return sprintf(buf, "%u\n", osfs.os_bsize);
148 }
149 LUSTRE_RO_ATTR(blocksize);
150
151 static ssize_t stat_blocksize_show(struct kobject *kobj, struct attribute *attr,
152                                    char *buf)
153 {
154         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
155                                               ll_kset.kobj);
156
157         return sprintf(buf, "%u\n", sbi->ll_stat_blksize);
158 }
159
160 static ssize_t stat_blocksize_store(struct kobject *kobj,
161                                     struct attribute *attr,
162                                     const char *buffer,
163                                     size_t count)
164 {
165         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
166                                               ll_kset.kobj);
167         unsigned int val;
168         int rc;
169
170         rc = kstrtouint(buffer, 10, &val);
171         if (rc)
172                 return rc;
173
174         if (val != 0 && (val < PAGE_SIZE || (val & (val - 1))) != 0)
175                 return -ERANGE;
176
177         sbi->ll_stat_blksize = val;
178
179         return count;
180 }
181 LUSTRE_RW_ATTR(stat_blocksize);
182
183 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
184                                 char *buf)
185 {
186         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
187                                               ll_kset.kobj);
188         struct obd_statfs osfs;
189         u32 blk_size;
190         u64 result;
191         int rc;
192
193         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
194         if (rc)
195                 return rc;
196
197         blk_size = osfs.os_bsize >> 10;
198         result = osfs.os_blocks;
199
200         while (blk_size >>= 1)
201                 result <<= 1;
202
203         return sprintf(buf, "%llu\n", result);
204 }
205 LUSTRE_RO_ATTR(kbytestotal);
206
207 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
208                                char *buf)
209 {
210         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
211                                               ll_kset.kobj);
212         struct obd_statfs osfs;
213         u32 blk_size;
214         u64 result;
215         int rc;
216
217         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
218         if (rc)
219                 return rc;
220
221         blk_size = osfs.os_bsize >> 10;
222         result = osfs.os_bfree;
223
224         while (blk_size >>= 1)
225                 result <<= 1;
226
227         return sprintf(buf, "%llu\n", result);
228 }
229 LUSTRE_RO_ATTR(kbytesfree);
230
231 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
232                                 char *buf)
233 {
234         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
235                                               ll_kset.kobj);
236         struct obd_statfs osfs;
237         u32 blk_size;
238         u64 result;
239         int rc;
240
241         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
242         if (rc)
243                 return rc;
244
245         blk_size = osfs.os_bsize >> 10;
246         result = osfs.os_bavail;
247
248         while (blk_size >>= 1)
249                 result <<= 1;
250
251         return sprintf(buf, "%llu\n", result);
252 }
253 LUSTRE_RO_ATTR(kbytesavail);
254
255 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
256                                char *buf)
257 {
258         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
259                                               ll_kset.kobj);
260         struct obd_statfs osfs;
261         int rc;
262
263         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
264         if (rc)
265                 return rc;
266
267         return sprintf(buf, "%llu\n", osfs.os_files);
268 }
269 LUSTRE_RO_ATTR(filestotal);
270
271 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
272                               char *buf)
273 {
274         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
275                                               ll_kset.kobj);
276         struct obd_statfs osfs;
277         int rc;
278
279         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
280         if (rc)
281                 return rc;
282
283         return sprintf(buf, "%llu\n", osfs.os_ffree);
284 }
285 LUSTRE_RO_ATTR(filesfree);
286
287 static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
288                                 char *buf)
289 {
290         return sprintf(buf, "local client\n");
291 }
292 LUSTRE_RO_ATTR(client_type);
293
294 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
295                            char *buf)
296 {
297         return sprintf(buf, "lustre\n");
298 }
299 LUSTRE_RO_ATTR(fstype);
300
301 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
302                          char *buf)
303 {
304         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
305                                               ll_kset.kobj);
306
307         return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
308 }
309 LUSTRE_RO_ATTR(uuid);
310
311 static int ll_site_stats_seq_show(struct seq_file *m, void *v)
312 {
313         struct super_block *sb = m->private;
314
315         /*
316          * See description of statistical counters in struct cl_site, and
317          * struct lu_site.
318          */
319         return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
320 }
321
322 LDEBUGFS_SEQ_FOPS_RO(ll_site_stats);
323
324 static int ll_max_readahead_mb_seq_show(struct seq_file *m, void *v)
325 {
326         struct super_block *sb = m->private;
327         struct ll_sb_info *sbi = ll_s2sbi(sb);
328         unsigned long ra_max_mb;
329
330         spin_lock(&sbi->ll_lock);
331         ra_max_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages);
332         spin_unlock(&sbi->ll_lock);
333
334         seq_printf(m, "%lu\n", ra_max_mb);
335         return 0;
336 }
337
338 static ssize_t
339 ll_max_readahead_mb_seq_write(struct file *file, const char __user *buffer,
340                               size_t count, loff_t *off)
341 {
342         struct seq_file *m = file->private_data;
343         struct super_block *sb = m->private;
344         struct ll_sb_info *sbi = ll_s2sbi(sb);
345         s64 ra_max_mb, pages_number;
346         int rc;
347
348         rc = lprocfs_str_with_units_to_s64(buffer, count, &ra_max_mb, 'M');
349         if (rc)
350                 return rc;
351
352         pages_number = round_up(ra_max_mb, 1024 * 1024) >> PAGE_SHIFT;
353         if (pages_number < 0 || pages_number > cfs_totalram_pages() / 2) {
354                 /* 1/2 of RAM */
355                 CERROR("%s: can't set max_readahead_mb=%llu > %luMB\n",
356                        sbi->ll_fsname, PAGES_TO_MiB(pages_number),
357                        PAGES_TO_MiB(cfs_totalram_pages()));
358                 return -ERANGE;
359         }
360
361         spin_lock(&sbi->ll_lock);
362         sbi->ll_ra_info.ra_max_pages = pages_number;
363         spin_unlock(&sbi->ll_lock);
364
365         return count;
366 }
367
368 LDEBUGFS_SEQ_FOPS(ll_max_readahead_mb);
369
370 static int ll_max_readahead_per_file_mb_seq_show(struct seq_file *m, void *v)
371 {
372         struct super_block *sb = m->private;
373         struct ll_sb_info *sbi = ll_s2sbi(sb);
374         unsigned long ra_max_file_mb;
375
376         spin_lock(&sbi->ll_lock);
377         ra_max_file_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages_per_file);
378         spin_unlock(&sbi->ll_lock);
379
380         seq_printf(m, "%lu\n", ra_max_file_mb);
381         return 0;
382 }
383
384 static ssize_t
385 ll_max_readahead_per_file_mb_seq_write(struct file *file,
386                                        const char __user *buffer,
387                                        size_t count, loff_t *off)
388 {
389         struct seq_file *m = file->private_data;
390         struct super_block *sb = m->private;
391         struct ll_sb_info *sbi = ll_s2sbi(sb);
392         s64 ra_max_file_mb, pages_number;
393         int rc;
394
395         rc = lprocfs_str_with_units_to_s64(buffer, count, &ra_max_file_mb,
396                                            'M');
397         if (rc)
398                 return rc;
399
400         pages_number = round_up(ra_max_file_mb, 1024 * 1024) >> PAGE_SHIFT;
401         if (pages_number < 0 || pages_number > sbi->ll_ra_info.ra_max_pages) {
402                 CERROR("%s: can't set max_readahead_per_file_mb=%llu > max_read_ahead_mb=%lu\n",
403                        sbi->ll_fsname, PAGES_TO_MiB(pages_number),
404                        PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages));
405                 return -ERANGE;
406         }
407
408         spin_lock(&sbi->ll_lock);
409         sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
410         spin_unlock(&sbi->ll_lock);
411
412         return count;
413 }
414
415 LDEBUGFS_SEQ_FOPS(ll_max_readahead_per_file_mb);
416
417 static int ll_max_read_ahead_whole_mb_seq_show(struct seq_file *m, void *v)
418 {
419         struct super_block *sb = m->private;
420         struct ll_sb_info *sbi = ll_s2sbi(sb);
421         unsigned long ra_max_whole_mb;
422
423         spin_lock(&sbi->ll_lock);
424         ra_max_whole_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_read_ahead_whole_pages);
425         spin_unlock(&sbi->ll_lock);
426
427         seq_printf(m, "%lu\n", ra_max_whole_mb);
428         return 0;
429 }
430
431 static ssize_t
432 ll_max_read_ahead_whole_mb_seq_write(struct file *file,
433                                      const char __user *buffer,
434                                      size_t count, loff_t *off)
435 {
436         struct seq_file *m = file->private_data;
437         struct super_block *sb = m->private;
438         struct ll_sb_info *sbi = ll_s2sbi(sb);
439         s64 ra_max_whole_mb, pages_number;
440         int rc;
441
442         rc = lprocfs_str_with_units_to_s64(buffer, count, &ra_max_whole_mb,
443                                            'M');
444         if (rc)
445                 return rc;
446
447         pages_number = round_up(ra_max_whole_mb, 1024 * 1024) >> PAGE_SHIFT;
448         /* Cap this at the current max readahead window size, the readahead
449          * algorithm does this anyway so it's pointless to set it larger.
450          */
451         if (pages_number < 0 ||
452             pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
453                 CERROR("%s: can't set max_read_ahead_whole_mb=%llu > max_read_ahead_per_file_mb=%lu\n",
454                        sbi->ll_fsname, PAGES_TO_MiB(pages_number),
455                        PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages_per_file));
456                 return -ERANGE;
457         }
458
459         spin_lock(&sbi->ll_lock);
460         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
461         spin_unlock(&sbi->ll_lock);
462
463         return count;
464 }
465
466 LDEBUGFS_SEQ_FOPS(ll_max_read_ahead_whole_mb);
467
468 static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
469 {
470         struct super_block     *sb    = m->private;
471         struct ll_sb_info      *sbi   = ll_s2sbi(sb);
472         struct cl_client_cache *cache = sbi->ll_cache;
473         long max_cached_mb;
474         long unused_mb;
475
476         max_cached_mb = PAGES_TO_MiB(cache->ccc_lru_max);
477         unused_mb = PAGES_TO_MiB(atomic_long_read(&cache->ccc_lru_left));
478         seq_printf(m, "users: %d\n"
479                       "max_cached_mb: %ld\n"
480                       "used_mb: %ld\n"
481                       "unused_mb: %ld\n"
482                       "reclaim_count: %u\n",
483                    atomic_read(&cache->ccc_users),
484                    max_cached_mb,
485                    max_cached_mb - unused_mb,
486                    unused_mb,
487                    cache->ccc_lru_shrinkers);
488         return 0;
489 }
490
491 static ssize_t ll_max_cached_mb_seq_write(struct file *file,
492                                           const char __user *buffer,
493                                           size_t count, loff_t *off)
494 {
495         struct seq_file *m = file->private_data;
496         struct super_block *sb = m->private;
497         struct ll_sb_info *sbi = ll_s2sbi(sb);
498         struct cl_client_cache *cache = sbi->ll_cache;
499         struct lu_env *env;
500         long diff = 0;
501         long nrpages = 0;
502         __u16 refcheck;
503         __s64 pages_number;
504         int rc;
505         char kernbuf[128];
506
507         ENTRY;
508         if (count >= sizeof(kernbuf))
509                 RETURN(-EINVAL);
510
511         if (copy_from_user(kernbuf, buffer, count))
512                 RETURN(-EFAULT);
513         kernbuf[count] = 0;
514
515         buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
516                   kernbuf;
517         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
518         if (rc)
519                 RETURN(rc);
520
521         pages_number >>= PAGE_SHIFT;
522
523         if (pages_number < 0 || pages_number > cfs_totalram_pages()) {
524                 CERROR("%s: can't set max cache more than %lu MB\n",
525                        sbi->ll_fsname,
526                        PAGES_TO_MiB(cfs_totalram_pages()));
527                 RETURN(-ERANGE);
528         }
529         /* Allow enough cache so clients can make well-formed RPCs */
530         pages_number = max_t(long, pages_number, PTLRPC_MAX_BRW_PAGES);
531
532         spin_lock(&sbi->ll_lock);
533         diff = pages_number - cache->ccc_lru_max;
534         spin_unlock(&sbi->ll_lock);
535
536         /* easy - add more LRU slots. */
537         if (diff >= 0) {
538                 atomic_long_add(diff, &cache->ccc_lru_left);
539                 GOTO(out, rc = 0);
540         }
541
542         env = cl_env_get(&refcheck);
543         if (IS_ERR(env))
544                 RETURN(PTR_ERR(env));
545
546         diff = -diff;
547         while (diff > 0) {
548                 long tmp;
549
550                 /* reduce LRU budget from free slots. */
551                 do {
552                         long ov, nv, retv;
553
554                         ov = atomic_long_read(&cache->ccc_lru_left);
555                         if (ov == 0)
556                                 break;
557
558                         nv = ov > diff ? ov - diff : 0;
559                         retv = atomic_long_cmpxchg(&cache->ccc_lru_left,
560                                                    ov, nv);
561                         if (likely(ov == retv)) {
562                                 diff -= ov - nv;
563                                 nrpages += ov - nv;
564                                 break;
565                         }
566                 } while (1);
567
568                 if (diff <= 0)
569                         break;
570
571                 if (sbi->ll_dt_exp == NULL) { /* being initialized */
572                         rc = -ENODEV;
573                         break;
574                 }
575
576                 /* difficult - have to ask OSCs to drop LRU slots. */
577                 tmp = diff << 1;
578                 rc = obd_set_info_async(env, sbi->ll_dt_exp,
579                                 sizeof(KEY_CACHE_LRU_SHRINK),
580                                 KEY_CACHE_LRU_SHRINK,
581                                 sizeof(tmp), &tmp, NULL);
582                 if (rc < 0)
583                         break;
584         }
585         cl_env_put(env, &refcheck);
586
587 out:
588         if (rc >= 0) {
589                 spin_lock(&sbi->ll_lock);
590                 cache->ccc_lru_max = pages_number;
591                 spin_unlock(&sbi->ll_lock);
592                 rc = count;
593         } else {
594                 atomic_long_add(nrpages, &cache->ccc_lru_left);
595         }
596         return rc;
597 }
598
599 LDEBUGFS_SEQ_FOPS(ll_max_cached_mb);
600
601 static ssize_t checksums_show(struct kobject *kobj, struct attribute *attr,
602                               char *buf)
603 {
604         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
605                                               ll_kset.kobj);
606
607         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
608 }
609
610 static ssize_t checksums_store(struct kobject *kobj, struct attribute *attr,
611                                const char *buffer, size_t count)
612 {
613         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
614                                               ll_kset.kobj);
615         bool val;
616         int tmp;
617         int rc;
618
619         if (!sbi->ll_dt_exp)
620                 /* Not set up yet */
621                 return -EAGAIN;
622
623         rc = kstrtobool(buffer, &val);
624         if (rc)
625                 return rc;
626         if (val)
627                 sbi->ll_flags |= LL_SBI_CHECKSUM;
628         else
629                 sbi->ll_flags &= ~LL_SBI_CHECKSUM;
630         tmp = val;
631
632         rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
633                                 KEY_CHECKSUM, sizeof(tmp), &tmp, NULL);
634         if (rc)
635                 CWARN("Failed to set OSC checksum flags: %d\n", rc);
636
637         return count;
638 }
639 LUSTRE_RW_ATTR(checksums);
640
641 LUSTRE_ATTR(checksum_pages, 0644, checksums_show, checksums_store);
642
643 static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
644                               enum stats_track_type type)
645 {
646         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
647                                               ll_kset.kobj);
648
649         if (sbi->ll_stats_track_type == type)
650                 return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
651         else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
652                 return sprintf(buf, "0 (all)\n");
653
654         return sprintf(buf, "untracked\n");
655 }
656
657 static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
658                               size_t count, enum stats_track_type type)
659 {
660         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
661                                               ll_kset.kobj);
662         unsigned long pid;
663         int rc;
664
665         rc = kstrtoul(buffer, 10, &pid);
666         if (rc)
667                 return rc;
668
669         sbi->ll_stats_track_id = pid;
670         if (pid == 0)
671                 sbi->ll_stats_track_type = STATS_TRACK_ALL;
672         else
673                 sbi->ll_stats_track_type = type;
674         lprocfs_clear_stats(sbi->ll_stats);
675         return count;
676 }
677
678 static ssize_t stats_track_pid_show(struct kobject *kobj,
679                                     struct attribute *attr,
680                                     char *buf)
681 {
682         return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
683 }
684
685 static ssize_t stats_track_pid_store(struct kobject *kobj,
686                                      struct attribute *attr,
687                                      const char *buffer,
688                                      size_t count)
689 {
690         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
691 }
692 LUSTRE_RW_ATTR(stats_track_pid);
693
694 static ssize_t stats_track_ppid_show(struct kobject *kobj,
695                                      struct attribute *attr,
696                                      char *buf)
697 {
698         return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
699 }
700
701 static ssize_t stats_track_ppid_store(struct kobject *kobj,
702                                       struct attribute *attr,
703                                       const char *buffer,
704                                       size_t count)
705 {
706         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
707 }
708 LUSTRE_RW_ATTR(stats_track_ppid);
709
710 static ssize_t stats_track_gid_show(struct kobject *kobj,
711                                     struct attribute *attr,
712                                     char *buf)
713 {
714         return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
715 }
716
717 static ssize_t stats_track_gid_store(struct kobject *kobj,
718                                      struct attribute *attr,
719                                      const char *buffer,
720                                      size_t count)
721 {
722         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
723 }
724 LUSTRE_RW_ATTR(stats_track_gid);
725
726 static ssize_t statahead_running_max_show(struct kobject *kobj,
727                                           struct attribute *attr,
728                                           char *buf)
729 {
730         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
731                                               ll_kset.kobj);
732
733         return snprintf(buf, 16, "%u\n", sbi->ll_sa_running_max);
734 }
735
736 static ssize_t statahead_running_max_store(struct kobject *kobj,
737                                            struct attribute *attr,
738                                            const char *buffer,
739                                            size_t count)
740 {
741         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
742                                               ll_kset.kobj);
743         unsigned long val;
744         int rc;
745
746         rc = kstrtoul(buffer, 0, &val);
747         if (rc)
748                 return rc;
749
750         if (val <= LL_SA_RUNNING_MAX) {
751                 sbi->ll_sa_running_max = val;
752                 return count;
753         }
754
755         CERROR("Bad statahead_running_max value %lu. Valid values "
756                "are in the range [0, %d]\n", val, LL_SA_RUNNING_MAX);
757
758         return -ERANGE;
759 }
760 LUSTRE_RW_ATTR(statahead_running_max);
761
762 static ssize_t statahead_max_show(struct kobject *kobj,
763                                   struct attribute *attr,
764                                   char *buf)
765 {
766         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
767                                               ll_kset.kobj);
768
769         return sprintf(buf, "%u\n", sbi->ll_sa_max);
770 }
771
772 static ssize_t statahead_max_store(struct kobject *kobj,
773                                    struct attribute *attr,
774                                    const char *buffer,
775                                    size_t count)
776 {
777         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
778                                               ll_kset.kobj);
779         unsigned long val;
780         int rc;
781
782         rc = kstrtoul(buffer, 0, &val);
783         if (rc)
784                 return rc;
785
786         if (val <= LL_SA_RPC_MAX)
787                 sbi->ll_sa_max = val;
788         else
789                 CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
790                        val, LL_SA_RPC_MAX);
791
792         return count;
793 }
794 LUSTRE_RW_ATTR(statahead_max);
795
796 static ssize_t statahead_agl_show(struct kobject *kobj,
797                                   struct attribute *attr,
798                                   char *buf)
799 {
800         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
801                                               ll_kset.kobj);
802
803         return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
804 }
805
806 static ssize_t statahead_agl_store(struct kobject *kobj,
807                                    struct attribute *attr,
808                                    const char *buffer,
809                                    size_t count)
810 {
811         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
812                                               ll_kset.kobj);
813         bool val;
814         int rc;
815
816         rc = kstrtobool(buffer, &val);
817         if (rc)
818                 return rc;
819
820         if (val)
821                 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
822         else
823                 sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
824
825         return count;
826 }
827 LUSTRE_RW_ATTR(statahead_agl);
828
829 static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
830 {
831         struct super_block *sb = m->private;
832         struct ll_sb_info *sbi = ll_s2sbi(sb);
833
834         seq_printf(m, "statahead total: %u\n"
835                       "statahead wrong: %u\n"
836                       "agl total: %u\n",
837                    atomic_read(&sbi->ll_sa_total),
838                    atomic_read(&sbi->ll_sa_wrong),
839                    atomic_read(&sbi->ll_agl_total));
840         return 0;
841 }
842
843 LDEBUGFS_SEQ_FOPS_RO(ll_statahead_stats);
844
845 static ssize_t lazystatfs_show(struct kobject *kobj,
846                                struct attribute *attr,
847                                char *buf)
848 {
849         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
850                                               ll_kset.kobj);
851
852         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_LAZYSTATFS) ? 1 : 0);
853 }
854
855 static ssize_t lazystatfs_store(struct kobject *kobj,
856                                 struct attribute *attr,
857                                 const char *buffer,
858                                 size_t count)
859 {
860         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
861                                               ll_kset.kobj);
862         bool val;
863         int rc;
864
865         rc = kstrtobool(buffer, &val);
866         if (rc)
867                 return rc;
868
869         if (val)
870                 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
871         else
872                 sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
873
874         return count;
875 }
876 LUSTRE_RW_ATTR(lazystatfs);
877
878 static ssize_t statfs_max_age_show(struct kobject *kobj, struct attribute *attr,
879                                    char *buf)
880 {
881         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
882                                               ll_kset.kobj);
883
884         return snprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_statfs_max_age);
885 }
886
887 static ssize_t statfs_max_age_store(struct kobject *kobj,
888                                     struct attribute *attr, const char *buffer,
889                                     size_t count)
890 {
891         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
892                                               ll_kset.kobj);
893         unsigned int val;
894         int rc;
895
896         rc = kstrtouint(buffer, 10, &val);
897         if (rc)
898                 return rc;
899         if (val > OBD_STATFS_CACHE_MAX_AGE)
900                 return -EINVAL;
901
902         sbi->ll_statfs_max_age = val;
903
904         return count;
905 }
906 LUSTRE_RW_ATTR(statfs_max_age);
907
908 static ssize_t max_easize_show(struct kobject *kobj,
909                                struct attribute *attr,
910                                char *buf)
911 {
912         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
913                                               ll_kset.kobj);
914         unsigned int ealen;
915         int rc;
916
917         rc = ll_get_max_mdsize(sbi, &ealen);
918         if (rc)
919                 return rc;
920
921         /* Limit xattr size returned to userspace based on kernel maximum */
922         return snprintf(buf, PAGE_SIZE, "%u\n",
923                         ealen > XATTR_SIZE_MAX ? XATTR_SIZE_MAX : ealen);
924 }
925 LUSTRE_RO_ATTR(max_easize);
926
927 /**
928  * Get default_easize.
929  *
930  * \see client_obd::cl_default_mds_easize
931  *
932  * \param[in] m         seq_file handle
933  * \param[in] v         unused for single entry
934  *
935  * \retval 0            on success
936  * \retval negative     negated errno on failure
937  */
938 static ssize_t default_easize_show(struct kobject *kobj,
939                                    struct attribute *attr,
940                                    char *buf)
941 {
942         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
943                                               ll_kset.kobj);
944         unsigned int ealen;
945         int rc;
946
947         rc = ll_get_default_mdsize(sbi, &ealen);
948         if (rc)
949                 return rc;
950
951         /* Limit xattr size returned to userspace based on kernel maximum */
952         return snprintf(buf, PAGE_SIZE, "%u\n",
953                         ealen > XATTR_SIZE_MAX ? XATTR_SIZE_MAX : ealen);
954 }
955
956 /**
957  * Set default_easize.
958  *
959  * Range checking on the passed value is handled by
960  * ll_set_default_mdsize().
961  *
962  * \see client_obd::cl_default_mds_easize
963  *
964  * \param[in] file      proc file
965  * \param[in] buffer    string passed from user space
966  * \param[in] count     \a buffer length
967  * \param[in] off       unused for single entry
968  *
969  * \retval positive     \a count on success
970  * \retval negative     negated errno on failure
971  */
972 static ssize_t default_easize_store(struct kobject *kobj,
973                                     struct attribute *attr,
974                                     const char *buffer,
975                                     size_t count)
976 {
977         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
978                                               ll_kset.kobj);
979         unsigned int val;
980         int rc;
981
982         if (count == 0)
983                 return 0;
984
985         rc = kstrtouint(buffer, 10, &val);
986         if (rc)
987                 return rc;
988
989         rc = ll_set_default_mdsize(sbi, val);
990         if (rc)
991                 return rc;
992
993         return count;
994 }
995 LUSTRE_RW_ATTR(default_easize);
996
997 static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
998 {
999         const char *str[] = LL_SBI_FLAGS;
1000         struct super_block *sb = m->private;
1001         int flags = ll_s2sbi(sb)->ll_flags;
1002         int i = 0;
1003
1004         while (flags != 0) {
1005                 if (ARRAY_SIZE(str) <= i) {
1006                         CERROR("%s: Revise array LL_SBI_FLAGS to match sbi "
1007                                 "flags please.\n", ll_s2sbi(sb)->ll_fsname);
1008                         return -EINVAL;
1009                 }
1010
1011                 if (flags & 0x1)
1012                         seq_printf(m, "%s ", str[i]);
1013                 flags >>= 1;
1014                 ++i;
1015         }
1016         seq_printf(m, "\b\n");
1017         return 0;
1018 }
1019
1020 LDEBUGFS_SEQ_FOPS_RO(ll_sbi_flags);
1021
1022 static ssize_t xattr_cache_show(struct kobject *kobj,
1023                                 struct attribute *attr,
1024                                 char *buf)
1025 {
1026         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1027                                               ll_kset.kobj);
1028
1029         return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled);
1030 }
1031
1032 static ssize_t xattr_cache_store(struct kobject *kobj,
1033                                  struct attribute *attr,
1034                                  const char *buffer,
1035                                  size_t count)
1036 {
1037         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1038                                               ll_kset.kobj);
1039         bool val;
1040         int rc;
1041
1042         rc = kstrtobool(buffer, &val);
1043         if (rc)
1044                 return rc;
1045
1046         if (val && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
1047                 return -ENOTSUPP;
1048
1049         sbi->ll_xattr_cache_enabled = val;
1050         sbi->ll_xattr_cache_set = 1;
1051
1052         return count;
1053 }
1054 LUSTRE_RW_ATTR(xattr_cache);
1055
1056 static ssize_t tiny_write_show(struct kobject *kobj,
1057                                struct attribute *attr,
1058                                char *buf)
1059 {
1060         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1061                                               ll_kset.kobj);
1062
1063         return sprintf(buf, "%u\n", !!(sbi->ll_flags & LL_SBI_TINY_WRITE));
1064 }
1065
1066 static ssize_t tiny_write_store(struct kobject *kobj,
1067                                 struct attribute *attr,
1068                                 const char *buffer,
1069                                 size_t count)
1070 {
1071         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1072                                               ll_kset.kobj);
1073         bool val;
1074         int rc;
1075
1076         rc = kstrtobool(buffer, &val);
1077         if (rc)
1078                 return rc;
1079
1080         spin_lock(&sbi->ll_lock);
1081         if (val)
1082                 sbi->ll_flags |= LL_SBI_TINY_WRITE;
1083         else
1084                 sbi->ll_flags &= ~LL_SBI_TINY_WRITE;
1085         spin_unlock(&sbi->ll_lock);
1086
1087         return count;
1088 }
1089 LUSTRE_RW_ATTR(tiny_write);
1090
1091 static ssize_t max_read_ahead_async_active_show(struct kobject *kobj,
1092                                                struct attribute *attr,
1093                                                char *buf)
1094 {
1095         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1096                                               ll_kset.kobj);
1097
1098         return snprintf(buf, PAGE_SIZE, "%u\n",
1099                         sbi->ll_ra_info.ra_async_max_active);
1100 }
1101
1102 static ssize_t max_read_ahead_async_active_store(struct kobject *kobj,
1103                                                 struct attribute *attr,
1104                                                 const char *buffer,
1105                                                 size_t count)
1106 {
1107         unsigned int val;
1108         int rc;
1109         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1110                                               ll_kset.kobj);
1111
1112         rc = kstrtouint(buffer, 10, &val);
1113         if (rc)
1114                 return rc;
1115
1116         if (val < 1 || val > WQ_UNBOUND_MAX_ACTIVE) {
1117                 CERROR("%s: cannot set max_read_ahead_async_active=%u %s than %u\n",
1118                        sbi->ll_fsname, val,
1119                        val < 1 ? "smaller" : "larger",
1120                        val < 1 ? 1 : WQ_UNBOUND_MAX_ACTIVE);
1121                 return -ERANGE;
1122         }
1123
1124         sbi->ll_ra_info.ra_async_max_active = val;
1125         workqueue_set_max_active(sbi->ll_ra_info.ll_readahead_wq, val);
1126
1127         return count;
1128 }
1129 LUSTRE_RW_ATTR(max_read_ahead_async_active);
1130
1131 static ssize_t read_ahead_async_file_threshold_mb_show(struct kobject *kobj,
1132                                                        struct attribute *attr,
1133                                                        char *buf)
1134 {
1135         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1136                                               ll_kset.kobj);
1137
1138         return snprintf(buf, PAGE_SIZE, "%lu\n",
1139              PAGES_TO_MiB(sbi->ll_ra_info.ra_async_pages_per_file_threshold));
1140 }
1141
1142 static ssize_t
1143 read_ahead_async_file_threshold_mb_store(struct kobject *kobj,
1144                                          struct attribute *attr,
1145                                          const char *buffer, size_t count)
1146 {
1147         unsigned long pages_number;
1148         unsigned long max_ra_per_file;
1149         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1150                                               ll_kset.kobj);
1151         int rc;
1152
1153         rc = kstrtoul(buffer, 10, &pages_number);
1154         if (rc)
1155                 return rc;
1156
1157         pages_number = MiB_TO_PAGES(pages_number);
1158         max_ra_per_file = sbi->ll_ra_info.ra_max_pages_per_file;
1159         if (pages_number < 0 || pages_number > max_ra_per_file) {
1160                 CERROR("%s: can't set read_ahead_async_file_threshold_mb=%lu > "
1161                        "max_read_readahead_per_file_mb=%lu\n", sbi->ll_fsname,
1162                        PAGES_TO_MiB(pages_number),
1163                        PAGES_TO_MiB(max_ra_per_file));
1164                 return -ERANGE;
1165         }
1166         sbi->ll_ra_info.ra_async_pages_per_file_threshold = pages_number;
1167
1168         return count;
1169 }
1170 LUSTRE_RW_ATTR(read_ahead_async_file_threshold_mb);
1171
1172 static ssize_t fast_read_show(struct kobject *kobj,
1173                               struct attribute *attr,
1174                               char *buf)
1175 {
1176         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1177                                               ll_kset.kobj);
1178
1179         return sprintf(buf, "%u\n", !!(sbi->ll_flags & LL_SBI_FAST_READ));
1180 }
1181
1182 static ssize_t fast_read_store(struct kobject *kobj,
1183                                struct attribute *attr,
1184                                const char *buffer,
1185                                size_t count)
1186 {
1187         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1188                                               ll_kset.kobj);
1189         bool val;
1190         int rc;
1191
1192         rc = kstrtobool(buffer, &val);
1193         if (rc)
1194                 return rc;
1195
1196         spin_lock(&sbi->ll_lock);
1197         if (val)
1198                 sbi->ll_flags |= LL_SBI_FAST_READ;
1199         else
1200                 sbi->ll_flags &= ~LL_SBI_FAST_READ;
1201         spin_unlock(&sbi->ll_lock);
1202
1203         return count;
1204 }
1205 LUSTRE_RW_ATTR(fast_read);
1206
1207 static ssize_t file_heat_show(struct kobject *kobj,
1208                               struct attribute *attr,
1209                               char *buf)
1210 {
1211         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1212                                               ll_kset.kobj);
1213
1214         return snprintf(buf, PAGE_SIZE, "%u\n",
1215                         !!(sbi->ll_flags & LL_SBI_FILE_HEAT));
1216 }
1217
1218 static ssize_t file_heat_store(struct kobject *kobj,
1219                                struct attribute *attr,
1220                                const char *buffer,
1221                                size_t count)
1222 {
1223         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1224                                               ll_kset.kobj);
1225         bool val;
1226         int rc;
1227
1228         rc = kstrtobool(buffer, &val);
1229         if (rc)
1230                 return rc;
1231
1232         spin_lock(&sbi->ll_lock);
1233         if (val)
1234                 sbi->ll_flags |= LL_SBI_FILE_HEAT;
1235         else
1236                 sbi->ll_flags &= ~LL_SBI_FILE_HEAT;
1237         spin_unlock(&sbi->ll_lock);
1238
1239         return count;
1240 }
1241 LUSTRE_RW_ATTR(file_heat);
1242
1243 static ssize_t heat_decay_percentage_show(struct kobject *kobj,
1244                                           struct attribute *attr,
1245                                           char *buf)
1246 {
1247         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1248                                               ll_kset.kobj);
1249
1250         return snprintf(buf, PAGE_SIZE, "%u\n",
1251                        (sbi->ll_heat_decay_weight * 100 + 128) / 256);
1252 }
1253
1254 static ssize_t heat_decay_percentage_store(struct kobject *kobj,
1255                                            struct attribute *attr,
1256                                            const char *buffer,
1257                                            size_t count)
1258 {
1259         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1260                                               ll_kset.kobj);
1261         unsigned long val;
1262         int rc;
1263
1264         rc = kstrtoul(buffer, 10, &val);
1265         if (rc)
1266                 return rc;
1267
1268         if (val < 0 || val > 100)
1269                 return -ERANGE;
1270
1271         sbi->ll_heat_decay_weight = (val * 256 + 50) / 100;
1272
1273         return count;
1274 }
1275 LUSTRE_RW_ATTR(heat_decay_percentage);
1276
1277 static ssize_t heat_period_second_show(struct kobject *kobj,
1278                                        struct attribute *attr,
1279                                        char *buf)
1280 {
1281         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1282                                               ll_kset.kobj);
1283
1284         return snprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_heat_period_second);
1285 }
1286
1287 static ssize_t heat_period_second_store(struct kobject *kobj,
1288                                         struct attribute *attr,
1289                                         const char *buffer,
1290                                         size_t count)
1291 {
1292         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1293                                               ll_kset.kobj);
1294         unsigned long val;
1295         int rc;
1296
1297         rc = kstrtoul(buffer, 10, &val);
1298         if (rc)
1299                 return rc;
1300
1301         if (val <= 0)
1302                 return -ERANGE;
1303
1304         sbi->ll_heat_period_second = val;
1305
1306         return count;
1307 }
1308 LUSTRE_RW_ATTR(heat_period_second);
1309
1310 static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
1311 {
1312         struct super_block      *sb    = m->private;
1313         struct ll_sb_info       *sbi   = ll_s2sbi(sb);
1314         struct cl_client_cache  *cache = sbi->ll_cache;
1315         long pages;
1316         int mb;
1317
1318         pages = atomic_long_read(&cache->ccc_unstable_nr);
1319         mb    = (pages * PAGE_SIZE) >> 20;
1320
1321         seq_printf(m, "unstable_check:     %8d\n"
1322                       "unstable_pages: %12ld\n"
1323                       "unstable_mb:        %8d\n",
1324                    cache->ccc_unstable_check, pages, mb);
1325         return 0;
1326 }
1327
1328 static ssize_t ll_unstable_stats_seq_write(struct file *file,
1329                                            const char __user *buffer,
1330                                            size_t count, loff_t *unused)
1331 {
1332         struct seq_file *seq = file->private_data;
1333         struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)seq->private);
1334         char kernbuf[128];
1335         bool val;
1336         int rc;
1337
1338         if (count == 0)
1339                 return 0;
1340         if (count >= sizeof(kernbuf))
1341                 return -EINVAL;
1342
1343         if (copy_from_user(kernbuf, buffer, count))
1344                 return -EFAULT;
1345         kernbuf[count] = 0;
1346
1347         buffer += lprocfs_find_named_value(kernbuf, "unstable_check:", &count) -
1348                   kernbuf;
1349         rc = kstrtobool_from_user(buffer, count, &val);
1350         if (rc < 0)
1351                 return rc;
1352
1353         /* borrow lru lock to set the value */
1354         spin_lock(&sbi->ll_cache->ccc_lru_lock);
1355         sbi->ll_cache->ccc_unstable_check = val;
1356         spin_unlock(&sbi->ll_cache->ccc_lru_lock);
1357
1358         return count;
1359 }
1360
1361 LDEBUGFS_SEQ_FOPS(ll_unstable_stats);
1362
1363 static int ll_root_squash_seq_show(struct seq_file *m, void *v)
1364 {
1365         struct super_block *sb = m->private;
1366         struct ll_sb_info *sbi = ll_s2sbi(sb);
1367         struct root_squash_info *squash = &sbi->ll_squash;
1368
1369         seq_printf(m, "%u:%u\n", squash->rsi_uid, squash->rsi_gid);
1370         return 0;
1371 }
1372
1373 static ssize_t ll_root_squash_seq_write(struct file *file,
1374                                         const char __user *buffer,
1375                                         size_t count, loff_t *off)
1376 {
1377         struct seq_file *m = file->private_data;
1378         struct super_block *sb = m->private;
1379         struct ll_sb_info *sbi = ll_s2sbi(sb);
1380         struct root_squash_info *squash = &sbi->ll_squash;
1381
1382         return lprocfs_wr_root_squash(buffer, count, squash, sbi->ll_fsname);
1383 }
1384
1385 LDEBUGFS_SEQ_FOPS(ll_root_squash);
1386
1387 static int ll_nosquash_nids_seq_show(struct seq_file *m, void *v)
1388 {
1389         struct super_block *sb = m->private;
1390         struct ll_sb_info *sbi = ll_s2sbi(sb);
1391         struct root_squash_info *squash = &sbi->ll_squash;
1392         int len;
1393
1394         spin_lock(&squash->rsi_lock);
1395         if (!list_empty(&squash->rsi_nosquash_nids)) {
1396                 len = cfs_print_nidlist(m->buf + m->count, m->size - m->count,
1397                                         &squash->rsi_nosquash_nids);
1398                 m->count += len;
1399                 seq_putc(m, '\n');
1400         } else {
1401                 seq_puts(m, "NONE\n");
1402         }
1403         spin_unlock(&squash->rsi_lock);
1404
1405         return 0;
1406 }
1407
1408 static ssize_t ll_nosquash_nids_seq_write(struct file *file,
1409                                           const char __user *buffer,
1410                                           size_t count, loff_t *off)
1411 {
1412         struct seq_file *m = file->private_data;
1413         struct super_block *sb = m->private;
1414         struct ll_sb_info *sbi = ll_s2sbi(sb);
1415         struct root_squash_info *squash = &sbi->ll_squash;
1416         int rc;
1417
1418         rc = lprocfs_wr_nosquash_nids(buffer, count, squash, sbi->ll_fsname);
1419         if (rc < 0)
1420                 return rc;
1421
1422         ll_compute_rootsquash_state(sbi);
1423
1424         return rc;
1425 }
1426
1427 LDEBUGFS_SEQ_FOPS(ll_nosquash_nids);
1428
1429 static int ll_pcc_seq_show(struct seq_file *m, void *v)
1430 {
1431         struct super_block *sb = m->private;
1432         struct ll_sb_info *sbi = ll_s2sbi(sb);
1433
1434         return pcc_super_dump(&sbi->ll_pcc_super, m);
1435 }
1436
1437 static ssize_t ll_pcc_seq_write(struct file *file, const char __user *buffer,
1438                                 size_t count, loff_t *off)
1439 {
1440         struct seq_file *m = file->private_data;
1441         struct super_block *sb = m->private;
1442         struct ll_sb_info *sbi = ll_s2sbi(sb);
1443         int rc;
1444         char *kernbuf;
1445
1446         if (count >= LPROCFS_WR_PCC_MAX_CMD)
1447                 return -EINVAL;
1448
1449         if (!(exp_connect_flags2(sbi->ll_md_exp) & OBD_CONNECT2_PCC))
1450                 return -EOPNOTSUPP;
1451
1452         OBD_ALLOC(kernbuf, count + 1);
1453         if (kernbuf == NULL)
1454                 return -ENOMEM;
1455
1456         if (copy_from_user(kernbuf, buffer, count))
1457                 GOTO(out_free_kernbuff, rc = -EFAULT);
1458
1459         rc = pcc_cmd_handle(kernbuf, count, &sbi->ll_pcc_super);
1460 out_free_kernbuff:
1461         OBD_FREE(kernbuf, count + 1);
1462         return rc ? rc : count;
1463 }
1464 LPROC_SEQ_FOPS(ll_pcc);
1465
1466 struct lprocfs_vars lprocfs_llite_obd_vars[] = {
1467         { .name =       "site",
1468           .fops =       &ll_site_stats_fops                     },
1469         { .name =       "max_read_ahead_mb",
1470           .fops =       &ll_max_readahead_mb_fops               },
1471         { .name =       "max_read_ahead_per_file_mb",
1472           .fops =       &ll_max_readahead_per_file_mb_fops      },
1473         { .name =       "max_read_ahead_whole_mb",
1474           .fops =       &ll_max_read_ahead_whole_mb_fops        },
1475         { .name =       "max_cached_mb",
1476           .fops =       &ll_max_cached_mb_fops                  },
1477         { .name =       "statahead_stats",
1478           .fops =       &ll_statahead_stats_fops                },
1479         { .name =       "unstable_stats",
1480           .fops =       &ll_unstable_stats_fops                 },
1481         { .name =       "sbi_flags",
1482           .fops =       &ll_sbi_flags_fops                      },
1483         { .name =       "root_squash",
1484           .fops =       &ll_root_squash_fops                    },
1485         { .name =       "nosquash_nids",
1486           .fops =       &ll_nosquash_nids_fops                  },
1487         { .name =       "pcc",
1488           .fops =       &ll_pcc_fops,                           },
1489         { NULL }
1490 };
1491
1492 #define MAX_STRING_SIZE 128
1493
1494 static struct attribute *llite_attrs[] = {
1495         &lustre_attr_blocksize.attr,
1496         &lustre_attr_stat_blocksize.attr,
1497         &lustre_attr_kbytestotal.attr,
1498         &lustre_attr_kbytesfree.attr,
1499         &lustre_attr_kbytesavail.attr,
1500         &lustre_attr_filestotal.attr,
1501         &lustre_attr_filesfree.attr,
1502         &lustre_attr_client_type.attr,
1503         &lustre_attr_fstype.attr,
1504         &lustre_attr_uuid.attr,
1505         &lustre_attr_checksums.attr,
1506         &lustre_attr_checksum_pages.attr,
1507         &lustre_attr_stats_track_pid.attr,
1508         &lustre_attr_stats_track_ppid.attr,
1509         &lustre_attr_stats_track_gid.attr,
1510         &lustre_attr_statahead_running_max.attr,
1511         &lustre_attr_statahead_max.attr,
1512         &lustre_attr_statahead_agl.attr,
1513         &lustre_attr_lazystatfs.attr,
1514         &lustre_attr_statfs_max_age.attr,
1515         &lustre_attr_max_easize.attr,
1516         &lustre_attr_default_easize.attr,
1517         &lustre_attr_xattr_cache.attr,
1518         &lustre_attr_fast_read.attr,
1519         &lustre_attr_tiny_write.attr,
1520         &lustre_attr_file_heat.attr,
1521         &lustre_attr_heat_decay_percentage.attr,
1522         &lustre_attr_heat_period_second.attr,
1523         &lustre_attr_max_read_ahead_async_active.attr,
1524         &lustre_attr_read_ahead_async_file_threshold_mb.attr,
1525         NULL,
1526 };
1527
1528 static void sbi_kobj_release(struct kobject *kobj)
1529 {
1530         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1531                                               ll_kset.kobj);
1532         complete(&sbi->ll_kobj_unregister);
1533 }
1534
1535 static struct kobj_type sbi_ktype = {
1536         .default_attrs  = llite_attrs,
1537         .sysfs_ops      = &lustre_sysfs_ops,
1538         .release        = sbi_kobj_release,
1539 };
1540
1541 #define LPROCFS_TYPE_LATENCY \
1542         (LPROCFS_TYPE_USEC | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV)
1543 static const struct llite_file_opcode {
1544         __u32           opcode;
1545         __u32           type;
1546         const char      *opname;
1547 } llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
1548         /* file operation */
1549         { LPROC_LL_READ_BYTES,  LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
1550                 "read_bytes" },
1551         { LPROC_LL_WRITE_BYTES, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
1552                 "write_bytes" },
1553         { LPROC_LL_READ,        LPROCFS_TYPE_LATENCY,   "read" },
1554         { LPROC_LL_WRITE,       LPROCFS_TYPE_LATENCY,   "write" },
1555         { LPROC_LL_IOCTL,       LPROCFS_TYPE_REQS,      "ioctl" },
1556         { LPROC_LL_OPEN,        LPROCFS_TYPE_LATENCY,   "open" },
1557         { LPROC_LL_RELEASE,     LPROCFS_TYPE_LATENCY,   "close" },
1558         { LPROC_LL_MMAP,        LPROCFS_TYPE_LATENCY,   "mmap" },
1559         { LPROC_LL_FAULT,       LPROCFS_TYPE_LATENCY,   "page_fault" },
1560         { LPROC_LL_MKWRITE,     LPROCFS_TYPE_LATENCY,   "page_mkwrite" },
1561         { LPROC_LL_LLSEEK,      LPROCFS_TYPE_LATENCY,   "seek" },
1562         { LPROC_LL_FSYNC,       LPROCFS_TYPE_LATENCY,   "fsync" },
1563         { LPROC_LL_READDIR,     LPROCFS_TYPE_LATENCY,   "readdir" },
1564         /* inode operation */
1565         { LPROC_LL_SETATTR,     LPROCFS_TYPE_LATENCY,   "setattr" },
1566         { LPROC_LL_TRUNC,       LPROCFS_TYPE_LATENCY,   "truncate" },
1567         { LPROC_LL_FLOCK,       LPROCFS_TYPE_LATENCY,   "flock" },
1568         { LPROC_LL_GETATTR,     LPROCFS_TYPE_LATENCY,   "getattr" },
1569         /* dir inode operation */
1570         { LPROC_LL_CREATE,      LPROCFS_TYPE_LATENCY,   "create" },
1571         { LPROC_LL_LINK,        LPROCFS_TYPE_LATENCY,   "link" },
1572         { LPROC_LL_UNLINK,      LPROCFS_TYPE_LATENCY,   "unlink" },
1573         { LPROC_LL_SYMLINK,     LPROCFS_TYPE_LATENCY,   "symlink" },
1574         { LPROC_LL_MKDIR,       LPROCFS_TYPE_LATENCY,   "mkdir" },
1575         { LPROC_LL_RMDIR,       LPROCFS_TYPE_LATENCY,   "rmdir" },
1576         { LPROC_LL_MKNOD,       LPROCFS_TYPE_LATENCY,   "mknod" },
1577         { LPROC_LL_RENAME,      LPROCFS_TYPE_LATENCY,   "rename" },
1578         /* special inode operation */
1579         { LPROC_LL_STATFS,      LPROCFS_TYPE_LATENCY,   "statfs" },
1580         { LPROC_LL_SETXATTR,    LPROCFS_TYPE_LATENCY,   "setxattr" },
1581         { LPROC_LL_GETXATTR,    LPROCFS_TYPE_LATENCY,   "getxattr" },
1582         { LPROC_LL_GETXATTR_HITS, LPROCFS_TYPE_REQS,    "getxattr_hits" },
1583         { LPROC_LL_LISTXATTR,   LPROCFS_TYPE_LATENCY,   "listxattr" },
1584         { LPROC_LL_REMOVEXATTR, LPROCFS_TYPE_LATENCY,   "removexattr" },
1585         { LPROC_LL_INODE_PERM,  LPROCFS_TYPE_LATENCY,   "inode_permission" },
1586 };
1587
1588 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, long count)
1589 {
1590         if (!sbi->ll_stats)
1591                 return;
1592
1593         if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
1594                 lprocfs_counter_add(sbi->ll_stats, op, count);
1595         else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
1596                  sbi->ll_stats_track_id == current->pid)
1597                 lprocfs_counter_add(sbi->ll_stats, op, count);
1598         else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
1599                  sbi->ll_stats_track_id == current->parent->pid)
1600                 lprocfs_counter_add(sbi->ll_stats, op, count);
1601         else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
1602                  sbi->ll_stats_track_id ==
1603                         from_kgid(&init_user_ns, current_gid()))
1604                 lprocfs_counter_add(sbi->ll_stats, op, count);
1605 }
1606 EXPORT_SYMBOL(ll_stats_ops_tally);
1607
1608 static const char *ra_stat_string[] = {
1609         [RA_STAT_HIT] = "hits",
1610         [RA_STAT_MISS] = "misses",
1611         [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
1612         [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
1613         [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
1614         [RA_STAT_FAILED_MATCH] = "failed lock match",
1615         [RA_STAT_DISCARDED] = "read but discarded",
1616         [RA_STAT_ZERO_LEN] = "zero length file",
1617         [RA_STAT_ZERO_WINDOW] = "zero size window",
1618         [RA_STAT_EOF] = "read-ahead to EOF",
1619         [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
1620         [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
1621         [RA_STAT_FAILED_REACH_END] = "failed to reach end",
1622         [RA_STAT_ASYNC] = "async readahead",
1623         [RA_STAT_FAILED_FAST_READ] = "failed to fast read",
1624 };
1625
1626 int ll_debugfs_register_super(struct super_block *sb, const char *name)
1627 {
1628         struct lustre_sb_info *lsi = s2lsi(sb);
1629         struct ll_sb_info *sbi = ll_s2sbi(sb);
1630         int err, id, rc;
1631
1632         ENTRY;
1633         LASSERT(sbi);
1634
1635         if (IS_ERR_OR_NULL(llite_root))
1636                 goto out_ll_kset;
1637
1638         sbi->ll_debugfs_entry = ldebugfs_register(name, llite_root,
1639                                                   lprocfs_llite_obd_vars, sb);
1640         if (IS_ERR_OR_NULL(sbi->ll_debugfs_entry)) {
1641                 err = sbi->ll_debugfs_entry ? PTR_ERR(sbi->ll_debugfs_entry) :
1642                                               -ENOMEM;
1643                 sbi->ll_debugfs_entry = NULL;
1644                 RETURN(err);
1645         }
1646
1647         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "dump_page_cache",0444,
1648                                  &vvp_dump_pgcache_file_ops, sbi);
1649         if (rc)
1650                 CWARN("Error adding the dump_page_cache file\n");
1651
1652         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "extents_stats", 0644,
1653                                  &ll_rw_extents_stats_fops, sbi);
1654         if (rc)
1655                 CWARN("Error adding the extent_stats file\n");
1656
1657         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry,
1658                                  "extents_stats_per_process", 0644,
1659                                  &ll_rw_extents_stats_pp_fops, sbi);
1660         if (rc)
1661                 CWARN("Error adding the extents_stats_per_process file\n");
1662
1663         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "offset_stats", 0644,
1664                                  &ll_rw_offset_stats_fops, sbi);
1665         if (rc)
1666                 CWARN("Error adding the offset_stats file\n");
1667
1668         /* File operations stats */
1669         sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
1670                                             LPROCFS_STATS_FLAG_NONE);
1671         if (sbi->ll_stats == NULL)
1672                 GOTO(out_debugfs, err = -ENOMEM);
1673
1674         /* do counter init */
1675         for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
1676                 u32 type = llite_opcode_table[id].type;
1677                 void *ptr = NULL;
1678
1679                 if (type & LPROCFS_TYPE_REQS)
1680                         ptr = "reqs";
1681                 else if (type & LPROCFS_TYPE_BYTES)
1682                         ptr = "bytes";
1683                 else if (type & LPROCFS_TYPE_PAGES)
1684                         ptr = "pages";
1685                 else if (type & LPROCFS_TYPE_USEC)
1686                         ptr = "usec";
1687                 lprocfs_counter_init(sbi->ll_stats,
1688                                      llite_opcode_table[id].opcode,
1689                                      (type & LPROCFS_CNTR_AVGMINMAX),
1690                                      llite_opcode_table[id].opname, ptr);
1691         }
1692
1693         err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats",
1694                                       sbi->ll_stats);
1695         if (err)
1696                 GOTO(out_stats, err);
1697
1698         sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
1699                                                LPROCFS_STATS_FLAG_NONE);
1700         if (sbi->ll_ra_stats == NULL)
1701                 GOTO(out_stats, err = -ENOMEM);
1702
1703         for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
1704                 lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
1705                                      ra_stat_string[id], "pages");
1706
1707         err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats",
1708                                       sbi->ll_ra_stats);
1709         if (err)
1710                 GOTO(out_ra_stats, err);
1711
1712 out_ll_kset:
1713         /* Yes we also register sysfs mount kset here as well */
1714         sbi->ll_kset.kobj.parent = llite_kobj;
1715         sbi->ll_kset.kobj.ktype = &sbi_ktype;
1716         init_completion(&sbi->ll_kobj_unregister);
1717         err = kobject_set_name(&sbi->ll_kset.kobj, "%s", name);
1718         if (err)
1719                 GOTO(out_ra_stats, err);
1720
1721         err = kset_register(&sbi->ll_kset);
1722         if (err)
1723                 GOTO(out_ra_stats, err);
1724
1725         lsi->lsi_kobj = kobject_get(&sbi->ll_kset.kobj);
1726
1727         RETURN(0);
1728 out_ra_stats:
1729         lprocfs_free_stats(&sbi->ll_ra_stats);
1730 out_stats:
1731         lprocfs_free_stats(&sbi->ll_stats);
1732 out_debugfs:
1733         debugfs_remove_recursive(sbi->ll_debugfs_entry);
1734
1735         RETURN(err);
1736 }
1737
1738 void ll_debugfs_unregister_super(struct super_block *sb)
1739 {
1740         struct lustre_sb_info *lsi = s2lsi(sb);
1741         struct ll_sb_info *sbi = ll_s2sbi(sb);
1742
1743         debugfs_remove_recursive(sbi->ll_debugfs_entry);
1744
1745         if (sbi->ll_dt_obd)
1746                 sysfs_remove_link(&sbi->ll_kset.kobj,
1747                                   sbi->ll_dt_obd->obd_type->typ_name);
1748
1749         if (sbi->ll_md_obd)
1750                 sysfs_remove_link(&sbi->ll_kset.kobj,
1751                                   sbi->ll_md_obd->obd_type->typ_name);
1752
1753         kobject_put(lsi->lsi_kobj);
1754
1755         kset_unregister(&sbi->ll_kset);
1756         wait_for_completion(&sbi->ll_kobj_unregister);
1757
1758         lprocfs_free_stats(&sbi->ll_ra_stats);
1759         lprocfs_free_stats(&sbi->ll_stats);
1760 }
1761 #undef MAX_STRING_SIZE
1762
1763 static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
1764                                    struct seq_file *seq, int which)
1765 {
1766         unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
1767         unsigned long start, end, r, w;
1768         char *unitp = "KMGTPEZY";
1769         int i, units = 10;
1770         struct per_process_info *pp_info = &io_extents->pp_extents[which];
1771
1772         read_cum = 0;
1773         write_cum = 0;
1774         start = 0;
1775
1776         for(i = 0; i < LL_HIST_MAX; i++) {
1777                 read_tot += pp_info->pp_r_hist.oh_buckets[i];
1778                 write_tot += pp_info->pp_w_hist.oh_buckets[i];
1779         }
1780
1781         for(i = 0; i < LL_HIST_MAX; i++) {
1782                 r = pp_info->pp_r_hist.oh_buckets[i];
1783                 w = pp_info->pp_w_hist.oh_buckets[i];
1784                 read_cum += r;
1785                 write_cum += w;
1786                 end = BIT(i + LL_HIST_START - units);
1787                 seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4u %4u  | "
1788                            "%14lu %4u %4u\n", start, *unitp, end, *unitp,
1789                            (i == LL_HIST_MAX - 1) ? '+' : ' ',
1790                            r, pct(r, read_tot), pct(read_cum, read_tot),
1791                            w, pct(w, write_tot), pct(write_cum, write_tot));
1792                 start = end;
1793                 if (start == BIT(10)) {
1794                         start = 1;
1795                         units += 10;
1796                         unitp++;
1797                 }
1798                 if (read_cum == read_tot && write_cum == write_tot)
1799                         break;
1800         }
1801 }
1802
1803 static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
1804 {
1805         struct timespec64 now;
1806         struct ll_sb_info *sbi = seq->private;
1807         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1808         int k;
1809
1810         ktime_get_real_ts64(&now);
1811
1812         if (!sbi->ll_rw_stats_on) {
1813                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1814                 return 0;
1815         }
1816         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1817                    (s64)now.tv_sec, now.tv_nsec);
1818         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1819         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1820                    "extents", "calls", "%", "cum%",
1821                    "calls", "%", "cum%");
1822         spin_lock(&sbi->ll_pp_extent_lock);
1823         for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
1824                 if (io_extents->pp_extents[k].pid != 0) {
1825                         seq_printf(seq, "\nPID: %d\n",
1826                                    io_extents->pp_extents[k].pid);
1827                         ll_display_extents_info(io_extents, seq, k);
1828                 }
1829         }
1830         spin_unlock(&sbi->ll_pp_extent_lock);
1831         return 0;
1832 }
1833
1834 static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
1835                                                 const char __user *buf,
1836                                                 size_t len,
1837                                                 loff_t *off)
1838 {
1839         struct seq_file *seq = file->private_data;
1840         struct ll_sb_info *sbi = seq->private;
1841         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1842         int i;
1843         __s64 value;
1844
1845         if (len == 0)
1846                 return -EINVAL;
1847
1848         value = ll_stats_pid_write(buf, len);
1849
1850         if (value == 0)
1851                 sbi->ll_rw_stats_on = 0;
1852         else
1853                 sbi->ll_rw_stats_on = 1;
1854
1855         spin_lock(&sbi->ll_pp_extent_lock);
1856         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1857                 io_extents->pp_extents[i].pid = 0;
1858                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1859                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1860         }
1861         spin_unlock(&sbi->ll_pp_extent_lock);
1862         return len;
1863 }
1864
1865 LDEBUGFS_SEQ_FOPS(ll_rw_extents_stats_pp);
1866
1867 static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
1868 {
1869         struct timespec64 now;
1870         struct ll_sb_info *sbi = seq->private;
1871         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1872
1873         ktime_get_real_ts64(&now);
1874
1875         if (!sbi->ll_rw_stats_on) {
1876                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1877                 return 0;
1878         }
1879         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1880                    (s64)now.tv_sec, now.tv_nsec);
1881
1882         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1883         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1884                    "extents", "calls", "%", "cum%",
1885                    "calls", "%", "cum%");
1886         spin_lock(&sbi->ll_lock);
1887         ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
1888         spin_unlock(&sbi->ll_lock);
1889
1890         return 0;
1891 }
1892
1893 static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
1894                                              const char __user *buf,
1895                                              size_t len, loff_t *off)
1896 {
1897         struct seq_file *seq = file->private_data;
1898         struct ll_sb_info *sbi = seq->private;
1899         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1900         int i;
1901         __s64 value;
1902
1903         if (len == 0)
1904                 return -EINVAL;
1905
1906         value = ll_stats_pid_write(buf, len);
1907
1908         if (value == 0)
1909                 sbi->ll_rw_stats_on = 0;
1910         else
1911                 sbi->ll_rw_stats_on = 1;
1912
1913         spin_lock(&sbi->ll_pp_extent_lock);
1914         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
1915                 io_extents->pp_extents[i].pid = 0;
1916                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1917                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1918         }
1919         spin_unlock(&sbi->ll_pp_extent_lock);
1920
1921         return len;
1922 }
1923
1924 LDEBUGFS_SEQ_FOPS(ll_rw_extents_stats);
1925
1926 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1927                        struct ll_file_data *file, loff_t pos,
1928                        size_t count, int rw)
1929 {
1930         int i, cur = -1;
1931         struct ll_rw_process_info *process;
1932         struct ll_rw_process_info *offset;
1933         int *off_count = &sbi->ll_rw_offset_entry_count;
1934         int *process_count = &sbi->ll_offset_process_count;
1935         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1936
1937         if(!sbi->ll_rw_stats_on)
1938                 return;
1939         process = sbi->ll_rw_process_info;
1940         offset = sbi->ll_rw_offset_info;
1941
1942         spin_lock(&sbi->ll_pp_extent_lock);
1943         /* Extent statistics */
1944         for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1945                 if(io_extents->pp_extents[i].pid == pid) {
1946                         cur = i;
1947                         break;
1948                 }
1949         }
1950
1951         if (cur == -1) {
1952                 /* new process */
1953                 sbi->ll_extent_process_count =
1954                         (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
1955                 cur = sbi->ll_extent_process_count;
1956                 io_extents->pp_extents[cur].pid = pid;
1957                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
1958                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
1959         }
1960
1961         for (i = 0; (count >= BIT(LL_HIST_START + i)) &&
1962              (i < (LL_HIST_MAX - 1)); i++);
1963         if (rw == 0) {
1964                 io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
1965                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
1966         } else {
1967                 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
1968                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
1969         }
1970         spin_unlock(&sbi->ll_pp_extent_lock);
1971
1972         spin_lock(&sbi->ll_process_lock);
1973         /* Offset statistics */
1974         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1975                 if (process[i].rw_pid == pid) {
1976                         if (process[i].rw_last_file != file) {
1977                                 process[i].rw_range_start = pos;
1978                                 process[i].rw_last_file_pos = pos + count;
1979                                 process[i].rw_smallest_extent = count;
1980                                 process[i].rw_largest_extent = count;
1981                                 process[i].rw_offset = 0;
1982                                 process[i].rw_last_file = file;
1983                                 spin_unlock(&sbi->ll_process_lock);
1984                                 return;
1985                         }
1986                         if (process[i].rw_last_file_pos != pos) {
1987                                 *off_count =
1988                                     (*off_count + 1) % LL_OFFSET_HIST_MAX;
1989                                 offset[*off_count].rw_op = process[i].rw_op;
1990                                 offset[*off_count].rw_pid = pid;
1991                                 offset[*off_count].rw_range_start =
1992                                         process[i].rw_range_start;
1993                                 offset[*off_count].rw_range_end =
1994                                         process[i].rw_last_file_pos;
1995                                 offset[*off_count].rw_smallest_extent =
1996                                         process[i].rw_smallest_extent;
1997                                 offset[*off_count].rw_largest_extent =
1998                                         process[i].rw_largest_extent;
1999                                 offset[*off_count].rw_offset =
2000                                         process[i].rw_offset;
2001                                 process[i].rw_op = rw;
2002                                 process[i].rw_range_start = pos;
2003                                 process[i].rw_smallest_extent = count;
2004                                 process[i].rw_largest_extent = count;
2005                                 process[i].rw_offset = pos -
2006                                         process[i].rw_last_file_pos;
2007                         }
2008                         if(process[i].rw_smallest_extent > count)
2009                                 process[i].rw_smallest_extent = count;
2010                         if(process[i].rw_largest_extent < count)
2011                                 process[i].rw_largest_extent = count;
2012                         process[i].rw_last_file_pos = pos + count;
2013                         spin_unlock(&sbi->ll_process_lock);
2014                         return;
2015                 }
2016         }
2017         *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
2018         process[*process_count].rw_pid = pid;
2019         process[*process_count].rw_op = rw;
2020         process[*process_count].rw_range_start = pos;
2021         process[*process_count].rw_last_file_pos = pos + count;
2022         process[*process_count].rw_smallest_extent = count;
2023         process[*process_count].rw_largest_extent = count;
2024         process[*process_count].rw_offset = 0;
2025         process[*process_count].rw_last_file = file;
2026         spin_unlock(&sbi->ll_process_lock);
2027 }
2028
2029 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
2030 {
2031         struct timespec64 now;
2032         struct ll_sb_info *sbi = seq->private;
2033         struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
2034         struct ll_rw_process_info *process = sbi->ll_rw_process_info;
2035         int i;
2036
2037         ktime_get_real_ts64(&now);
2038
2039         if (!sbi->ll_rw_stats_on) {
2040                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
2041                 return 0;
2042         }
2043         spin_lock(&sbi->ll_process_lock);
2044
2045         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
2046                    (s64)now.tv_sec, now.tv_nsec);
2047         seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
2048                    "R/W", "PID", "RANGE START", "RANGE END",
2049                    "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
2050
2051         /* We stored the discontiguous offsets here; print them first */
2052         for (i = 0; i < LL_OFFSET_HIST_MAX; i++) {
2053                 if (offset[i].rw_pid != 0)
2054                         seq_printf(seq,
2055                                   "%3c %10d %14llu %14llu %17lu %17lu %14lld\n",
2056                                    offset[i].rw_op == READ ? 'R' : 'W',
2057                                    offset[i].rw_pid,
2058                                    offset[i].rw_range_start,
2059                                    offset[i].rw_range_end,
2060                                    (unsigned long)offset[i].rw_smallest_extent,
2061                                    (unsigned long)offset[i].rw_largest_extent,
2062                                    offset[i].rw_offset);
2063         }
2064
2065         /* Then print the current offsets for each process */
2066         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
2067                 if (process[i].rw_pid != 0)
2068                         seq_printf(seq,
2069                                   "%3c %10d %14llu %14llu %17lu %17lu %14lld\n",
2070                                    process[i].rw_op == READ ? 'R' : 'W',
2071                                    process[i].rw_pid,
2072                                    process[i].rw_range_start,
2073                                    process[i].rw_last_file_pos,
2074                                    (unsigned long)process[i].rw_smallest_extent,
2075                                    (unsigned long)process[i].rw_largest_extent,
2076                                    process[i].rw_offset);
2077         }
2078         spin_unlock(&sbi->ll_process_lock);
2079
2080         return 0;
2081 }
2082
2083 static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
2084                                             const char __user *buf,
2085                                             size_t len, loff_t *off)
2086 {
2087         struct seq_file *seq = file->private_data;
2088         struct ll_sb_info *sbi = seq->private;
2089         struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
2090         struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
2091         __s64 value;
2092
2093         if (len == 0)
2094                 return -EINVAL;
2095
2096         value = ll_stats_pid_write(buf, len);
2097
2098         if (value == 0)
2099                 sbi->ll_rw_stats_on = 0;
2100         else
2101                 sbi->ll_rw_stats_on = 1;
2102
2103         spin_lock(&sbi->ll_process_lock);
2104         sbi->ll_offset_process_count = 0;
2105         sbi->ll_rw_offset_entry_count = 0;
2106         memset(process_info, 0, sizeof(struct ll_rw_process_info) *
2107                LL_PROCESS_HIST_MAX);
2108         memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
2109                LL_OFFSET_HIST_MAX);
2110         spin_unlock(&sbi->ll_process_lock);
2111
2112         return len;
2113 }
2114
2115 LDEBUGFS_SEQ_FOPS(ll_rw_offset_stats);