Whamcloud - gitweb
bfb6949c775771faa0892f377f841bbe50baa384
[fs/lustre-release.git] / lustre / llite / lproc_llite.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32 #define DEBUG_SUBSYSTEM S_LLITE
33
34 #include <linux/version.h>
35 #include <linux/user_namespace.h>
36 #ifdef HAVE_UIDGID_HEADER
37 # include <linux/uidgid.h>
38 #endif
39 #include <uapi/linux/lustre/lustre_param.h>
40 #include <lprocfs_status.h>
41 #include <obd_support.h>
42
43 #include "llite_internal.h"
44 #include "vvp_internal.h"
45
46 static struct kobject *llite_kobj;
47 static struct dentry *llite_root;
48
49 static void llite_kobj_release(struct kobject *kobj)
50 {
51         if (!IS_ERR_OR_NULL(llite_root)) {
52                 debugfs_remove(llite_root);
53                 llite_root = NULL;
54         }
55
56         kfree(kobj);
57 }
58
59 static struct kobj_type llite_kobj_ktype = {
60         .release        = llite_kobj_release,
61         .sysfs_ops      = &lustre_sysfs_ops,
62 };
63
64 int llite_tunables_register(void)
65 {
66         int rc;
67
68         llite_kobj = kzalloc(sizeof(*llite_kobj), GFP_KERNEL);
69         if (!llite_kobj)
70                 return -ENOMEM;
71
72         llite_kobj->kset = lustre_kset;
73         rc = kobject_init_and_add(llite_kobj, &llite_kobj_ktype,
74                                   &lustre_kset->kobj, "%s", "llite");
75         if (rc)
76                 goto free_kobj;
77
78         llite_root = debugfs_create_dir("llite", debugfs_lustre_root);
79         if (IS_ERR_OR_NULL(llite_root)) {
80                 rc = llite_root ? PTR_ERR(llite_root) : -ENOMEM;
81                 llite_root = NULL;
82 free_kobj:
83                 kobject_put(llite_kobj);
84                 llite_kobj = NULL;
85         }
86
87         return rc;
88 }
89
90 void llite_tunables_unregister(void)
91 {
92         kobject_put(llite_kobj);
93         llite_kobj = NULL;
94 }
95
96 /* <debugfs>/lustre/llite mount point registration */
97 static const struct file_operations ll_rw_extents_stats_fops;
98 static const struct file_operations ll_rw_extents_stats_pp_fops;
99 static const struct file_operations ll_rw_offset_stats_fops;
100
101 /**
102  * ll_stats_pid_write() - Determine if stats collection should be enabled
103  * @buf: Buffer containing the data written
104  * @len: Number of bytes in the buffer
105  *
106  * Several proc files begin collecting stats when a value is written, and stop
107  * collecting when either '0' or 'disable' is written. This function checks the
108  * written value to see if collection should be enabled or disabled.
109  *
110  * Return: If '0' or 'disable' is provided, 0 is returned. If the text
111  * equivalent of a number is written, that number is returned. Otherwise,
112  * 1 is returned. Non-zero return values indicate collection should be enabled.
113  */
114 static s64 ll_stats_pid_write(const char __user *buf, size_t len)
115 {
116         unsigned long long value = 1;
117         char kernbuf[16];
118         int rc;
119
120         rc = kstrtoull_from_user(buf, len, 0, &value);
121         if (rc < 0 && len < sizeof(kernbuf)) {
122                 if (copy_from_user(kernbuf, buf, len))
123                         return -EFAULT;
124                 kernbuf[len] = 0;
125
126                 if (kernbuf[len - 1] == '\n')
127                         kernbuf[len - 1] = 0;
128
129                 if (strncasecmp(kernbuf, "disable", 7) == 0)
130                         value = 0;
131         }
132
133         return value;
134 }
135
136 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
137                               char *buf)
138 {
139         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
140                                               ll_kset.kobj);
141         struct obd_statfs osfs;
142         int rc;
143
144         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
145         if (rc)
146                 return rc;
147
148         return sprintf(buf, "%u\n", osfs.os_bsize);
149 }
150 LUSTRE_RO_ATTR(blocksize);
151
152 static ssize_t stat_blocksize_show(struct kobject *kobj, struct attribute *attr,
153                                    char *buf)
154 {
155         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
156                                               ll_kset.kobj);
157
158         return sprintf(buf, "%u\n", sbi->ll_stat_blksize);
159 }
160
161 static ssize_t stat_blocksize_store(struct kobject *kobj,
162                                     struct attribute *attr,
163                                     const char *buffer,
164                                     size_t count)
165 {
166         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
167                                               ll_kset.kobj);
168         unsigned int val;
169         int rc;
170
171         rc = kstrtouint(buffer, 10, &val);
172         if (rc)
173                 return rc;
174
175         if (val != 0 && (val < PAGE_SIZE || (val & (val - 1))) != 0)
176                 return -ERANGE;
177
178         sbi->ll_stat_blksize = val;
179
180         return count;
181 }
182 LUSTRE_RW_ATTR(stat_blocksize);
183
184 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
185                                 char *buf)
186 {
187         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
188                                               ll_kset.kobj);
189         struct obd_statfs osfs;
190         u32 blk_size;
191         u64 result;
192         int rc;
193
194         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
195         if (rc)
196                 return rc;
197
198         blk_size = osfs.os_bsize >> 10;
199         result = osfs.os_blocks;
200
201         while (blk_size >>= 1)
202                 result <<= 1;
203
204         return sprintf(buf, "%llu\n", result);
205 }
206 LUSTRE_RO_ATTR(kbytestotal);
207
208 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
209                                char *buf)
210 {
211         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
212                                               ll_kset.kobj);
213         struct obd_statfs osfs;
214         u32 blk_size;
215         u64 result;
216         int rc;
217
218         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
219         if (rc)
220                 return rc;
221
222         blk_size = osfs.os_bsize >> 10;
223         result = osfs.os_bfree;
224
225         while (blk_size >>= 1)
226                 result <<= 1;
227
228         return sprintf(buf, "%llu\n", result);
229 }
230 LUSTRE_RO_ATTR(kbytesfree);
231
232 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
233                                 char *buf)
234 {
235         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
236                                               ll_kset.kobj);
237         struct obd_statfs osfs;
238         u32 blk_size;
239         u64 result;
240         int rc;
241
242         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
243         if (rc)
244                 return rc;
245
246         blk_size = osfs.os_bsize >> 10;
247         result = osfs.os_bavail;
248
249         while (blk_size >>= 1)
250                 result <<= 1;
251
252         return sprintf(buf, "%llu\n", result);
253 }
254 LUSTRE_RO_ATTR(kbytesavail);
255
256 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
257                                char *buf)
258 {
259         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
260                                               ll_kset.kobj);
261         struct obd_statfs osfs;
262         int rc;
263
264         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
265         if (rc)
266                 return rc;
267
268         return sprintf(buf, "%llu\n", osfs.os_files);
269 }
270 LUSTRE_RO_ATTR(filestotal);
271
272 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
273                               char *buf)
274 {
275         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
276                                               ll_kset.kobj);
277         struct obd_statfs osfs;
278         int rc;
279
280         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
281         if (rc)
282                 return rc;
283
284         return sprintf(buf, "%llu\n", osfs.os_ffree);
285 }
286 LUSTRE_RO_ATTR(filesfree);
287
288 static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
289                                 char *buf)
290 {
291         return sprintf(buf, "local client\n");
292 }
293 LUSTRE_RO_ATTR(client_type);
294
295 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
296                            char *buf)
297 {
298         return sprintf(buf, "lustre\n");
299 }
300 LUSTRE_RO_ATTR(fstype);
301
302 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
303                          char *buf)
304 {
305         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
306                                               ll_kset.kobj);
307
308         return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
309 }
310 LUSTRE_RO_ATTR(uuid);
311
312 static int ll_site_stats_seq_show(struct seq_file *m, void *v)
313 {
314         struct super_block *sb = m->private;
315
316         /*
317          * See description of statistical counters in struct cl_site, and
318          * struct lu_site.
319          */
320         return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
321 }
322
323 LDEBUGFS_SEQ_FOPS_RO(ll_site_stats);
324
325 static int ll_max_readahead_mb_seq_show(struct seq_file *m, void *v)
326 {
327         struct super_block *sb = m->private;
328         struct ll_sb_info *sbi = ll_s2sbi(sb);
329         unsigned long ra_max_mb;
330
331         spin_lock(&sbi->ll_lock);
332         ra_max_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages);
333         spin_unlock(&sbi->ll_lock);
334
335         seq_printf(m, "%lu\n", ra_max_mb);
336         return 0;
337 }
338
339 static ssize_t
340 ll_max_readahead_mb_seq_write(struct file *file, const char __user *buffer,
341                               size_t count, loff_t *off)
342 {
343         struct seq_file *m = file->private_data;
344         struct super_block *sb = m->private;
345         struct ll_sb_info *sbi = ll_s2sbi(sb);
346         s64 ra_max_mb, pages_number;
347         int rc;
348
349         rc = lprocfs_str_with_units_to_s64(buffer, count, &ra_max_mb, 'M');
350         if (rc)
351                 return rc;
352
353         pages_number = round_up(ra_max_mb, 1024 * 1024) >> PAGE_SHIFT;
354         if (pages_number < 0 || pages_number > cfs_totalram_pages() / 2) {
355                 /* 1/2 of RAM */
356                 CERROR("%s: can't set max_readahead_mb=%llu > %luMB\n",
357                        sbi->ll_fsname, PAGES_TO_MiB(pages_number),
358                        PAGES_TO_MiB(cfs_totalram_pages()));
359                 return -ERANGE;
360         }
361
362         spin_lock(&sbi->ll_lock);
363         sbi->ll_ra_info.ra_max_pages = pages_number;
364         spin_unlock(&sbi->ll_lock);
365
366         return count;
367 }
368
369 LDEBUGFS_SEQ_FOPS(ll_max_readahead_mb);
370
371 static int ll_max_readahead_per_file_mb_seq_show(struct seq_file *m, void *v)
372 {
373         struct super_block *sb = m->private;
374         struct ll_sb_info *sbi = ll_s2sbi(sb);
375         unsigned long ra_max_file_mb;
376
377         spin_lock(&sbi->ll_lock);
378         ra_max_file_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages_per_file);
379         spin_unlock(&sbi->ll_lock);
380
381         seq_printf(m, "%lu\n", ra_max_file_mb);
382         return 0;
383 }
384
385 static ssize_t
386 ll_max_readahead_per_file_mb_seq_write(struct file *file,
387                                        const char __user *buffer,
388                                        size_t count, loff_t *off)
389 {
390         struct seq_file *m = file->private_data;
391         struct super_block *sb = m->private;
392         struct ll_sb_info *sbi = ll_s2sbi(sb);
393         s64 ra_max_file_mb, pages_number;
394         int rc;
395
396         rc = lprocfs_str_with_units_to_s64(buffer, count, &ra_max_file_mb,
397                                            'M');
398         if (rc)
399                 return rc;
400
401         pages_number = round_up(ra_max_file_mb, 1024 * 1024) >> PAGE_SHIFT;
402         if (pages_number < 0 || pages_number > sbi->ll_ra_info.ra_max_pages) {
403                 CERROR("%s: can't set max_readahead_per_file_mb=%llu > max_read_ahead_mb=%lu\n",
404                        sbi->ll_fsname, PAGES_TO_MiB(pages_number),
405                        PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages));
406                 return -ERANGE;
407         }
408
409         spin_lock(&sbi->ll_lock);
410         sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
411         spin_unlock(&sbi->ll_lock);
412
413         return count;
414 }
415
416 LDEBUGFS_SEQ_FOPS(ll_max_readahead_per_file_mb);
417
418 static int ll_max_read_ahead_whole_mb_seq_show(struct seq_file *m, void *v)
419 {
420         struct super_block *sb = m->private;
421         struct ll_sb_info *sbi = ll_s2sbi(sb);
422         unsigned long ra_max_whole_mb;
423
424         spin_lock(&sbi->ll_lock);
425         ra_max_whole_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_read_ahead_whole_pages);
426         spin_unlock(&sbi->ll_lock);
427
428         seq_printf(m, "%lu\n", ra_max_whole_mb);
429         return 0;
430 }
431
432 static ssize_t
433 ll_max_read_ahead_whole_mb_seq_write(struct file *file,
434                                      const char __user *buffer,
435                                      size_t count, loff_t *off)
436 {
437         struct seq_file *m = file->private_data;
438         struct super_block *sb = m->private;
439         struct ll_sb_info *sbi = ll_s2sbi(sb);
440         s64 ra_max_whole_mb, pages_number;
441         int rc;
442
443         rc = lprocfs_str_with_units_to_s64(buffer, count, &ra_max_whole_mb,
444                                            'M');
445         if (rc)
446                 return rc;
447
448         pages_number = round_up(ra_max_whole_mb, 1024 * 1024) >> PAGE_SHIFT;
449         /* Cap this at the current max readahead window size, the readahead
450          * algorithm does this anyway so it's pointless to set it larger.
451          */
452         if (pages_number < 0 ||
453             pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
454                 CERROR("%s: can't set max_read_ahead_whole_mb=%llu > max_read_ahead_per_file_mb=%lu\n",
455                        sbi->ll_fsname, PAGES_TO_MiB(pages_number),
456                        PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages_per_file));
457                 return -ERANGE;
458         }
459
460         spin_lock(&sbi->ll_lock);
461         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
462         spin_unlock(&sbi->ll_lock);
463
464         return count;
465 }
466
467 LDEBUGFS_SEQ_FOPS(ll_max_read_ahead_whole_mb);
468
469 static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
470 {
471         struct super_block     *sb    = m->private;
472         struct ll_sb_info      *sbi   = ll_s2sbi(sb);
473         struct cl_client_cache *cache = sbi->ll_cache;
474         long max_cached_mb;
475         long unused_mb;
476
477         max_cached_mb = PAGES_TO_MiB(cache->ccc_lru_max);
478         unused_mb = PAGES_TO_MiB(atomic_long_read(&cache->ccc_lru_left));
479         seq_printf(m, "users: %d\n"
480                       "max_cached_mb: %ld\n"
481                       "used_mb: %ld\n"
482                       "unused_mb: %ld\n"
483                       "reclaim_count: %u\n",
484                    atomic_read(&cache->ccc_users),
485                    max_cached_mb,
486                    max_cached_mb - unused_mb,
487                    unused_mb,
488                    cache->ccc_lru_shrinkers);
489         return 0;
490 }
491
492 static ssize_t ll_max_cached_mb_seq_write(struct file *file,
493                                           const char __user *buffer,
494                                           size_t count, loff_t *off)
495 {
496         struct seq_file *m = file->private_data;
497         struct super_block *sb = m->private;
498         struct ll_sb_info *sbi = ll_s2sbi(sb);
499         struct cl_client_cache *cache = sbi->ll_cache;
500         struct lu_env *env;
501         long diff = 0;
502         long nrpages = 0;
503         __u16 refcheck;
504         __s64 pages_number;
505         int rc;
506         char kernbuf[128];
507
508         ENTRY;
509         if (count >= sizeof(kernbuf))
510                 RETURN(-EINVAL);
511
512         if (copy_from_user(kernbuf, buffer, count))
513                 RETURN(-EFAULT);
514         kernbuf[count] = 0;
515
516         buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
517                   kernbuf;
518         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
519         if (rc)
520                 RETURN(rc);
521
522         pages_number >>= PAGE_SHIFT;
523
524         if (pages_number < 0 || pages_number > cfs_totalram_pages()) {
525                 CERROR("%s: can't set max cache more than %lu MB\n",
526                        sbi->ll_fsname,
527                        PAGES_TO_MiB(cfs_totalram_pages()));
528                 RETURN(-ERANGE);
529         }
530         /* Allow enough cache so clients can make well-formed RPCs */
531         pages_number = max_t(long, pages_number, PTLRPC_MAX_BRW_PAGES);
532
533         spin_lock(&sbi->ll_lock);
534         diff = pages_number - cache->ccc_lru_max;
535         spin_unlock(&sbi->ll_lock);
536
537         /* easy - add more LRU slots. */
538         if (diff >= 0) {
539                 atomic_long_add(diff, &cache->ccc_lru_left);
540                 GOTO(out, rc = 0);
541         }
542
543         env = cl_env_get(&refcheck);
544         if (IS_ERR(env))
545                 RETURN(PTR_ERR(env));
546
547         diff = -diff;
548         while (diff > 0) {
549                 long tmp;
550
551                 /* reduce LRU budget from free slots. */
552                 do {
553                         long ov, nv;
554
555                         ov = atomic_long_read(&cache->ccc_lru_left);
556                         if (ov == 0)
557                                 break;
558
559                         nv = ov > diff ? ov - diff : 0;
560                         rc = atomic_long_cmpxchg(&cache->ccc_lru_left, ov, nv);
561                         if (likely(ov == rc)) {
562                                 diff -= ov - nv;
563                                 nrpages += ov - nv;
564                                 break;
565                         }
566                 } while (1);
567
568                 if (diff <= 0)
569                         break;
570
571                 if (sbi->ll_dt_exp == NULL) { /* being initialized */
572                         rc = -ENODEV;
573                         break;
574                 }
575
576                 /* difficult - have to ask OSCs to drop LRU slots. */
577                 tmp = diff << 1;
578                 rc = obd_set_info_async(env, sbi->ll_dt_exp,
579                                 sizeof(KEY_CACHE_LRU_SHRINK),
580                                 KEY_CACHE_LRU_SHRINK,
581                                 sizeof(tmp), &tmp, NULL);
582                 if (rc < 0)
583                         break;
584         }
585         cl_env_put(env, &refcheck);
586
587 out:
588         if (rc >= 0) {
589                 spin_lock(&sbi->ll_lock);
590                 cache->ccc_lru_max = pages_number;
591                 spin_unlock(&sbi->ll_lock);
592                 rc = count;
593         } else {
594                 atomic_long_add(nrpages, &cache->ccc_lru_left);
595         }
596         return rc;
597 }
598
599 LDEBUGFS_SEQ_FOPS(ll_max_cached_mb);
600
601 static ssize_t checksums_show(struct kobject *kobj, struct attribute *attr,
602                               char *buf)
603 {
604         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
605                                               ll_kset.kobj);
606
607         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
608 }
609
610 static ssize_t checksums_store(struct kobject *kobj, struct attribute *attr,
611                                const char *buffer, size_t count)
612 {
613         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
614                                               ll_kset.kobj);
615         bool val;
616         int tmp;
617         int rc;
618
619         if (!sbi->ll_dt_exp)
620                 /* Not set up yet */
621                 return -EAGAIN;
622
623         rc = kstrtobool(buffer, &val);
624         if (rc)
625                 return rc;
626         if (val)
627                 sbi->ll_flags |= LL_SBI_CHECKSUM;
628         else
629                 sbi->ll_flags &= ~LL_SBI_CHECKSUM;
630         tmp = val;
631
632         rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
633                                 KEY_CHECKSUM, sizeof(tmp), &tmp, NULL);
634         if (rc)
635                 CWARN("Failed to set OSC checksum flags: %d\n", rc);
636
637         return count;
638 }
639 LUSTRE_RW_ATTR(checksums);
640
641 LUSTRE_ATTR(checksum_pages, 0644, checksums_show, checksums_store);
642
643 static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
644                               enum stats_track_type type)
645 {
646         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
647                                               ll_kset.kobj);
648
649         if (sbi->ll_stats_track_type == type)
650                 return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
651         else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
652                 return sprintf(buf, "0 (all)\n");
653
654         return sprintf(buf, "untracked\n");
655 }
656
657 static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
658                               size_t count, enum stats_track_type type)
659 {
660         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
661                                               ll_kset.kobj);
662         unsigned long pid;
663         int rc;
664
665         rc = kstrtoul(buffer, 10, &pid);
666         if (rc)
667                 return rc;
668
669         sbi->ll_stats_track_id = pid;
670         if (pid == 0)
671                 sbi->ll_stats_track_type = STATS_TRACK_ALL;
672         else
673                 sbi->ll_stats_track_type = type;
674         lprocfs_clear_stats(sbi->ll_stats);
675         return count;
676 }
677
678 static ssize_t stats_track_pid_show(struct kobject *kobj,
679                                     struct attribute *attr,
680                                     char *buf)
681 {
682         return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
683 }
684
685 static ssize_t stats_track_pid_store(struct kobject *kobj,
686                                      struct attribute *attr,
687                                      const char *buffer,
688                                      size_t count)
689 {
690         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
691 }
692 LUSTRE_RW_ATTR(stats_track_pid);
693
694 static ssize_t stats_track_ppid_show(struct kobject *kobj,
695                                      struct attribute *attr,
696                                      char *buf)
697 {
698         return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
699 }
700
701 static ssize_t stats_track_ppid_store(struct kobject *kobj,
702                                       struct attribute *attr,
703                                       const char *buffer,
704                                       size_t count)
705 {
706         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
707 }
708 LUSTRE_RW_ATTR(stats_track_ppid);
709
710 static ssize_t stats_track_gid_show(struct kobject *kobj,
711                                     struct attribute *attr,
712                                     char *buf)
713 {
714         return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
715 }
716
717 static ssize_t stats_track_gid_store(struct kobject *kobj,
718                                      struct attribute *attr,
719                                      const char *buffer,
720                                      size_t count)
721 {
722         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
723 }
724 LUSTRE_RW_ATTR(stats_track_gid);
725
726 static ssize_t statahead_running_max_show(struct kobject *kobj,
727                                           struct attribute *attr,
728                                           char *buf)
729 {
730         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
731                                               ll_kset.kobj);
732
733         return snprintf(buf, 16, "%u\n", sbi->ll_sa_running_max);
734 }
735
736 static ssize_t statahead_running_max_store(struct kobject *kobj,
737                                            struct attribute *attr,
738                                            const char *buffer,
739                                            size_t count)
740 {
741         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
742                                               ll_kset.kobj);
743         unsigned long val;
744         int rc;
745
746         rc = kstrtoul(buffer, 0, &val);
747         if (rc)
748                 return rc;
749
750         if (val <= LL_SA_RUNNING_MAX) {
751                 sbi->ll_sa_running_max = val;
752                 return count;
753         }
754
755         CERROR("Bad statahead_running_max value %lu. Valid values "
756                "are in the range [0, %d]\n", val, LL_SA_RUNNING_MAX);
757
758         return -ERANGE;
759 }
760 LUSTRE_RW_ATTR(statahead_running_max);
761
762 static ssize_t statahead_max_show(struct kobject *kobj,
763                                   struct attribute *attr,
764                                   char *buf)
765 {
766         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
767                                               ll_kset.kobj);
768
769         return sprintf(buf, "%u\n", sbi->ll_sa_max);
770 }
771
772 static ssize_t statahead_max_store(struct kobject *kobj,
773                                    struct attribute *attr,
774                                    const char *buffer,
775                                    size_t count)
776 {
777         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
778                                               ll_kset.kobj);
779         unsigned long val;
780         int rc;
781
782         rc = kstrtoul(buffer, 0, &val);
783         if (rc)
784                 return rc;
785
786         if (val <= LL_SA_RPC_MAX)
787                 sbi->ll_sa_max = val;
788         else
789                 CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
790                        val, LL_SA_RPC_MAX);
791
792         return count;
793 }
794 LUSTRE_RW_ATTR(statahead_max);
795
796 static ssize_t statahead_agl_show(struct kobject *kobj,
797                                   struct attribute *attr,
798                                   char *buf)
799 {
800         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
801                                               ll_kset.kobj);
802
803         return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
804 }
805
806 static ssize_t statahead_agl_store(struct kobject *kobj,
807                                    struct attribute *attr,
808                                    const char *buffer,
809                                    size_t count)
810 {
811         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
812                                               ll_kset.kobj);
813         bool val;
814         int rc;
815
816         rc = kstrtobool(buffer, &val);
817         if (rc)
818                 return rc;
819
820         if (val)
821                 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
822         else
823                 sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
824
825         return count;
826 }
827 LUSTRE_RW_ATTR(statahead_agl);
828
829 static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
830 {
831         struct super_block *sb = m->private;
832         struct ll_sb_info *sbi = ll_s2sbi(sb);
833
834         seq_printf(m, "statahead total: %u\n"
835                       "statahead wrong: %u\n"
836                       "agl total: %u\n",
837                    atomic_read(&sbi->ll_sa_total),
838                    atomic_read(&sbi->ll_sa_wrong),
839                    atomic_read(&sbi->ll_agl_total));
840         return 0;
841 }
842
843 LDEBUGFS_SEQ_FOPS_RO(ll_statahead_stats);
844
845 static ssize_t lazystatfs_show(struct kobject *kobj,
846                                struct attribute *attr,
847                                char *buf)
848 {
849         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
850                                               ll_kset.kobj);
851
852         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_LAZYSTATFS) ? 1 : 0);
853 }
854
855 static ssize_t lazystatfs_store(struct kobject *kobj,
856                                 struct attribute *attr,
857                                 const char *buffer,
858                                 size_t count)
859 {
860         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
861                                               ll_kset.kobj);
862         bool val;
863         int rc;
864
865         rc = kstrtobool(buffer, &val);
866         if (rc)
867                 return rc;
868
869         if (val)
870                 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
871         else
872                 sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
873
874         return count;
875 }
876 LUSTRE_RW_ATTR(lazystatfs);
877
878 static ssize_t statfs_max_age_show(struct kobject *kobj, struct attribute *attr,
879                                    char *buf)
880 {
881         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
882                                               ll_kset.kobj);
883
884         return snprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_statfs_max_age);
885 }
886
887 static ssize_t statfs_max_age_store(struct kobject *kobj,
888                                     struct attribute *attr, const char *buffer,
889                                     size_t count)
890 {
891         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
892                                               ll_kset.kobj);
893         unsigned int val;
894         int rc;
895
896         rc = kstrtouint(buffer, 10, &val);
897         if (rc)
898                 return rc;
899         if (val > OBD_STATFS_CACHE_MAX_AGE)
900                 return -EINVAL;
901
902         sbi->ll_statfs_max_age = val;
903
904         return count;
905 }
906 LUSTRE_RW_ATTR(statfs_max_age);
907
908 static ssize_t max_easize_show(struct kobject *kobj,
909                                struct attribute *attr,
910                                char *buf)
911 {
912         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
913                                               ll_kset.kobj);
914         unsigned int ealen;
915         int rc;
916
917         rc = ll_get_max_mdsize(sbi, &ealen);
918         if (rc)
919                 return rc;
920
921         return sprintf(buf, "%u\n", ealen);
922 }
923 LUSTRE_RO_ATTR(max_easize);
924
925 /**
926  * Get default_easize.
927  *
928  * \see client_obd::cl_default_mds_easize
929  *
930  * \param[in] m         seq_file handle
931  * \param[in] v         unused for single entry
932  *
933  * \retval 0            on success
934  * \retval negative     negated errno on failure
935  */
936 static ssize_t default_easize_show(struct kobject *kobj,
937                                    struct attribute *attr,
938                                    char *buf)
939 {
940         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
941                                               ll_kset.kobj);
942         unsigned int ealen;
943         int rc;
944
945         rc = ll_get_default_mdsize(sbi, &ealen);
946         if (rc)
947                 return rc;
948
949         return sprintf(buf, "%u\n", ealen);
950 }
951
952 /**
953  * Set default_easize.
954  *
955  * Range checking on the passed value is handled by
956  * ll_set_default_mdsize().
957  *
958  * \see client_obd::cl_default_mds_easize
959  *
960  * \param[in] file      proc file
961  * \param[in] buffer    string passed from user space
962  * \param[in] count     \a buffer length
963  * \param[in] off       unused for single entry
964  *
965  * \retval positive     \a count on success
966  * \retval negative     negated errno on failure
967  */
968 static ssize_t default_easize_store(struct kobject *kobj,
969                                     struct attribute *attr,
970                                     const char *buffer,
971                                     size_t count)
972 {
973         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
974                                               ll_kset.kobj);
975         unsigned int val;
976         int rc;
977
978         if (count == 0)
979                 return 0;
980
981         rc = kstrtouint(buffer, 10, &val);
982         if (rc)
983                 return rc;
984
985         rc = ll_set_default_mdsize(sbi, val);
986         if (rc)
987                 return rc;
988
989         return count;
990 }
991 LUSTRE_RW_ATTR(default_easize);
992
993 static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
994 {
995         const char *str[] = LL_SBI_FLAGS;
996         struct super_block *sb = m->private;
997         int flags = ll_s2sbi(sb)->ll_flags;
998         int i = 0;
999
1000         while (flags != 0) {
1001                 if (ARRAY_SIZE(str) <= i) {
1002                         CERROR("%s: Revise array LL_SBI_FLAGS to match sbi "
1003                                 "flags please.\n", ll_s2sbi(sb)->ll_fsname);
1004                         return -EINVAL;
1005                 }
1006
1007                 if (flags & 0x1)
1008                         seq_printf(m, "%s ", str[i]);
1009                 flags >>= 1;
1010                 ++i;
1011         }
1012         seq_printf(m, "\b\n");
1013         return 0;
1014 }
1015
1016 LDEBUGFS_SEQ_FOPS_RO(ll_sbi_flags);
1017
1018 static ssize_t xattr_cache_show(struct kobject *kobj,
1019                                 struct attribute *attr,
1020                                 char *buf)
1021 {
1022         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1023                                               ll_kset.kobj);
1024
1025         return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled);
1026 }
1027
1028 static ssize_t xattr_cache_store(struct kobject *kobj,
1029                                  struct attribute *attr,
1030                                  const char *buffer,
1031                                  size_t count)
1032 {
1033         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1034                                               ll_kset.kobj);
1035         bool val;
1036         int rc;
1037
1038         rc = kstrtobool(buffer, &val);
1039         if (rc)
1040                 return rc;
1041
1042         if (val && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
1043                 return -ENOTSUPP;
1044
1045         sbi->ll_xattr_cache_enabled = val;
1046         sbi->ll_xattr_cache_set = 1;
1047
1048         return count;
1049 }
1050 LUSTRE_RW_ATTR(xattr_cache);
1051
1052 static ssize_t tiny_write_show(struct kobject *kobj,
1053                                struct attribute *attr,
1054                                char *buf)
1055 {
1056         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1057                                               ll_kset.kobj);
1058
1059         return sprintf(buf, "%u\n", !!(sbi->ll_flags & LL_SBI_TINY_WRITE));
1060 }
1061
1062 static ssize_t tiny_write_store(struct kobject *kobj,
1063                                 struct attribute *attr,
1064                                 const char *buffer,
1065                                 size_t count)
1066 {
1067         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1068                                               ll_kset.kobj);
1069         bool val;
1070         int rc;
1071
1072         rc = kstrtobool(buffer, &val);
1073         if (rc)
1074                 return rc;
1075
1076         spin_lock(&sbi->ll_lock);
1077         if (val)
1078                 sbi->ll_flags |= LL_SBI_TINY_WRITE;
1079         else
1080                 sbi->ll_flags &= ~LL_SBI_TINY_WRITE;
1081         spin_unlock(&sbi->ll_lock);
1082
1083         return count;
1084 }
1085 LUSTRE_RW_ATTR(tiny_write);
1086
1087 static ssize_t max_read_ahead_async_active_show(struct kobject *kobj,
1088                                                struct attribute *attr,
1089                                                char *buf)
1090 {
1091         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1092                                               ll_kset.kobj);
1093
1094         return snprintf(buf, PAGE_SIZE, "%u\n",
1095                         sbi->ll_ra_info.ra_async_max_active);
1096 }
1097
1098 static ssize_t max_read_ahead_async_active_store(struct kobject *kobj,
1099                                                 struct attribute *attr,
1100                                                 const char *buffer,
1101                                                 size_t count)
1102 {
1103         unsigned int val;
1104         int rc;
1105         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1106                                               ll_kset.kobj);
1107
1108         rc = kstrtouint(buffer, 10, &val);
1109         if (rc)
1110                 return rc;
1111
1112         if (val < 1 || val > WQ_UNBOUND_MAX_ACTIVE) {
1113                 CERROR("%s: cannot set max_read_ahead_async_active=%u %s than %u\n",
1114                        sbi->ll_fsname, val,
1115                        val < 1 ? "smaller" : "larger",
1116                        val < 1 ? 1 : WQ_UNBOUND_MAX_ACTIVE);
1117                 return -ERANGE;
1118         }
1119
1120         sbi->ll_ra_info.ra_async_max_active = val;
1121         workqueue_set_max_active(sbi->ll_ra_info.ll_readahead_wq, val);
1122
1123         return count;
1124 }
1125 LUSTRE_RW_ATTR(max_read_ahead_async_active);
1126
1127 static ssize_t read_ahead_async_file_threshold_mb_show(struct kobject *kobj,
1128                                                        struct attribute *attr,
1129                                                        char *buf)
1130 {
1131         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1132                                               ll_kset.kobj);
1133
1134         return snprintf(buf, PAGE_SIZE, "%lu\n",
1135              PAGES_TO_MiB(sbi->ll_ra_info.ra_async_pages_per_file_threshold));
1136 }
1137
1138 static ssize_t
1139 read_ahead_async_file_threshold_mb_store(struct kobject *kobj,
1140                                          struct attribute *attr,
1141                                          const char *buffer, size_t count)
1142 {
1143         unsigned long pages_number;
1144         unsigned long max_ra_per_file;
1145         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1146                                               ll_kset.kobj);
1147         int rc;
1148
1149         rc = kstrtoul(buffer, 10, &pages_number);
1150         if (rc)
1151                 return rc;
1152
1153         pages_number = MiB_TO_PAGES(pages_number);
1154         max_ra_per_file = sbi->ll_ra_info.ra_max_pages_per_file;
1155         if (pages_number < 0 || pages_number > max_ra_per_file) {
1156                 CERROR("%s: can't set read_ahead_async_file_threshold_mb=%lu > "
1157                        "max_read_readahead_per_file_mb=%lu\n", sbi->ll_fsname,
1158                        PAGES_TO_MiB(pages_number),
1159                        PAGES_TO_MiB(max_ra_per_file));
1160                 return -ERANGE;
1161         }
1162         sbi->ll_ra_info.ra_async_pages_per_file_threshold = pages_number;
1163
1164         return count;
1165 }
1166 LUSTRE_RW_ATTR(read_ahead_async_file_threshold_mb);
1167
1168 static ssize_t fast_read_show(struct kobject *kobj,
1169                               struct attribute *attr,
1170                               char *buf)
1171 {
1172         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1173                                               ll_kset.kobj);
1174
1175         return sprintf(buf, "%u\n", !!(sbi->ll_flags & LL_SBI_FAST_READ));
1176 }
1177
1178 static ssize_t fast_read_store(struct kobject *kobj,
1179                                struct attribute *attr,
1180                                const char *buffer,
1181                                size_t count)
1182 {
1183         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1184                                               ll_kset.kobj);
1185         bool val;
1186         int rc;
1187
1188         rc = kstrtobool(buffer, &val);
1189         if (rc)
1190                 return rc;
1191
1192         spin_lock(&sbi->ll_lock);
1193         if (val)
1194                 sbi->ll_flags |= LL_SBI_FAST_READ;
1195         else
1196                 sbi->ll_flags &= ~LL_SBI_FAST_READ;
1197         spin_unlock(&sbi->ll_lock);
1198
1199         return count;
1200 }
1201 LUSTRE_RW_ATTR(fast_read);
1202
1203 static ssize_t file_heat_show(struct kobject *kobj,
1204                               struct attribute *attr,
1205                               char *buf)
1206 {
1207         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1208                                               ll_kset.kobj);
1209
1210         return snprintf(buf, PAGE_SIZE, "%u\n",
1211                         !!(sbi->ll_flags & LL_SBI_FILE_HEAT));
1212 }
1213
1214 static ssize_t file_heat_store(struct kobject *kobj,
1215                                struct attribute *attr,
1216                                const char *buffer,
1217                                size_t count)
1218 {
1219         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1220                                               ll_kset.kobj);
1221         bool val;
1222         int rc;
1223
1224         rc = kstrtobool(buffer, &val);
1225         if (rc)
1226                 return rc;
1227
1228         spin_lock(&sbi->ll_lock);
1229         if (val)
1230                 sbi->ll_flags |= LL_SBI_FILE_HEAT;
1231         else
1232                 sbi->ll_flags &= ~LL_SBI_FILE_HEAT;
1233         spin_unlock(&sbi->ll_lock);
1234
1235         return count;
1236 }
1237 LUSTRE_RW_ATTR(file_heat);
1238
1239 static ssize_t heat_decay_percentage_show(struct kobject *kobj,
1240                                           struct attribute *attr,
1241                                           char *buf)
1242 {
1243         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1244                                               ll_kset.kobj);
1245
1246         return snprintf(buf, PAGE_SIZE, "%u\n",
1247                        (sbi->ll_heat_decay_weight * 100 + 128) / 256);
1248 }
1249
1250 static ssize_t heat_decay_percentage_store(struct kobject *kobj,
1251                                            struct attribute *attr,
1252                                            const char *buffer,
1253                                            size_t count)
1254 {
1255         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1256                                               ll_kset.kobj);
1257         unsigned long val;
1258         int rc;
1259
1260         rc = kstrtoul(buffer, 10, &val);
1261         if (rc)
1262                 return rc;
1263
1264         if (val < 0 || val > 100)
1265                 return -ERANGE;
1266
1267         sbi->ll_heat_decay_weight = (val * 256 + 50) / 100;
1268
1269         return count;
1270 }
1271 LUSTRE_RW_ATTR(heat_decay_percentage);
1272
1273 static ssize_t heat_period_second_show(struct kobject *kobj,
1274                                        struct attribute *attr,
1275                                        char *buf)
1276 {
1277         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1278                                               ll_kset.kobj);
1279
1280         return snprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_heat_period_second);
1281 }
1282
1283 static ssize_t heat_period_second_store(struct kobject *kobj,
1284                                         struct attribute *attr,
1285                                         const char *buffer,
1286                                         size_t count)
1287 {
1288         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1289                                               ll_kset.kobj);
1290         unsigned long val;
1291         int rc;
1292
1293         rc = kstrtoul(buffer, 10, &val);
1294         if (rc)
1295                 return rc;
1296
1297         if (val <= 0)
1298                 return -ERANGE;
1299
1300         sbi->ll_heat_period_second = val;
1301
1302         return count;
1303 }
1304 LUSTRE_RW_ATTR(heat_period_second);
1305
1306 static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
1307 {
1308         struct super_block      *sb    = m->private;
1309         struct ll_sb_info       *sbi   = ll_s2sbi(sb);
1310         struct cl_client_cache  *cache = sbi->ll_cache;
1311         long pages;
1312         int mb;
1313
1314         pages = atomic_long_read(&cache->ccc_unstable_nr);
1315         mb    = (pages * PAGE_SIZE) >> 20;
1316
1317         seq_printf(m, "unstable_check:     %8d\n"
1318                       "unstable_pages: %12ld\n"
1319                       "unstable_mb:        %8d\n",
1320                    cache->ccc_unstable_check, pages, mb);
1321         return 0;
1322 }
1323
1324 static ssize_t ll_unstable_stats_seq_write(struct file *file,
1325                                            const char __user *buffer,
1326                                            size_t count, loff_t *unused)
1327 {
1328         struct seq_file *seq = file->private_data;
1329         struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)seq->private);
1330         char kernbuf[128];
1331         bool val;
1332         int rc;
1333
1334         if (count == 0)
1335                 return 0;
1336         if (count >= sizeof(kernbuf))
1337                 return -EINVAL;
1338
1339         if (copy_from_user(kernbuf, buffer, count))
1340                 return -EFAULT;
1341         kernbuf[count] = 0;
1342
1343         buffer += lprocfs_find_named_value(kernbuf, "unstable_check:", &count) -
1344                   kernbuf;
1345         rc = kstrtobool_from_user(buffer, count, &val);
1346         if (rc < 0)
1347                 return rc;
1348
1349         /* borrow lru lock to set the value */
1350         spin_lock(&sbi->ll_cache->ccc_lru_lock);
1351         sbi->ll_cache->ccc_unstable_check = val;
1352         spin_unlock(&sbi->ll_cache->ccc_lru_lock);
1353
1354         return count;
1355 }
1356
1357 LDEBUGFS_SEQ_FOPS(ll_unstable_stats);
1358
1359 static int ll_root_squash_seq_show(struct seq_file *m, void *v)
1360 {
1361         struct super_block *sb = m->private;
1362         struct ll_sb_info *sbi = ll_s2sbi(sb);
1363         struct root_squash_info *squash = &sbi->ll_squash;
1364
1365         seq_printf(m, "%u:%u\n", squash->rsi_uid, squash->rsi_gid);
1366         return 0;
1367 }
1368
1369 static ssize_t ll_root_squash_seq_write(struct file *file,
1370                                         const char __user *buffer,
1371                                         size_t count, loff_t *off)
1372 {
1373         struct seq_file *m = file->private_data;
1374         struct super_block *sb = m->private;
1375         struct ll_sb_info *sbi = ll_s2sbi(sb);
1376         struct root_squash_info *squash = &sbi->ll_squash;
1377
1378         return lprocfs_wr_root_squash(buffer, count, squash, sbi->ll_fsname);
1379 }
1380
1381 LDEBUGFS_SEQ_FOPS(ll_root_squash);
1382
1383 static int ll_nosquash_nids_seq_show(struct seq_file *m, void *v)
1384 {
1385         struct super_block *sb = m->private;
1386         struct ll_sb_info *sbi = ll_s2sbi(sb);
1387         struct root_squash_info *squash = &sbi->ll_squash;
1388         int len;
1389
1390         spin_lock(&squash->rsi_lock);
1391         if (!list_empty(&squash->rsi_nosquash_nids)) {
1392                 len = cfs_print_nidlist(m->buf + m->count, m->size - m->count,
1393                                         &squash->rsi_nosquash_nids);
1394                 m->count += len;
1395                 seq_putc(m, '\n');
1396         } else {
1397                 seq_puts(m, "NONE\n");
1398         }
1399         spin_unlock(&squash->rsi_lock);
1400
1401         return 0;
1402 }
1403
1404 static ssize_t ll_nosquash_nids_seq_write(struct file *file,
1405                                           const char __user *buffer,
1406                                           size_t count, loff_t *off)
1407 {
1408         struct seq_file *m = file->private_data;
1409         struct super_block *sb = m->private;
1410         struct ll_sb_info *sbi = ll_s2sbi(sb);
1411         struct root_squash_info *squash = &sbi->ll_squash;
1412         int rc;
1413
1414         rc = lprocfs_wr_nosquash_nids(buffer, count, squash, sbi->ll_fsname);
1415         if (rc < 0)
1416                 return rc;
1417
1418         ll_compute_rootsquash_state(sbi);
1419
1420         return rc;
1421 }
1422
1423 LDEBUGFS_SEQ_FOPS(ll_nosquash_nids);
1424
1425 static int ll_pcc_seq_show(struct seq_file *m, void *v)
1426 {
1427         struct super_block *sb = m->private;
1428         struct ll_sb_info *sbi = ll_s2sbi(sb);
1429
1430         return pcc_super_dump(&sbi->ll_pcc_super, m);
1431 }
1432
1433 static ssize_t ll_pcc_seq_write(struct file *file, const char __user *buffer,
1434                                 size_t count, loff_t *off)
1435 {
1436         struct seq_file *m = file->private_data;
1437         struct super_block *sb = m->private;
1438         struct ll_sb_info *sbi = ll_s2sbi(sb);
1439         int rc;
1440         char *kernbuf;
1441
1442         if (count >= LPROCFS_WR_PCC_MAX_CMD)
1443                 return -EINVAL;
1444
1445         if (!(exp_connect_flags2(sbi->ll_md_exp) & OBD_CONNECT2_PCC))
1446                 return -EOPNOTSUPP;
1447
1448         OBD_ALLOC(kernbuf, count + 1);
1449         if (kernbuf == NULL)
1450                 return -ENOMEM;
1451
1452         if (copy_from_user(kernbuf, buffer, count))
1453                 GOTO(out_free_kernbuff, rc = -EFAULT);
1454
1455         rc = pcc_cmd_handle(kernbuf, count, &sbi->ll_pcc_super);
1456 out_free_kernbuff:
1457         OBD_FREE(kernbuf, count + 1);
1458         return rc ? rc : count;
1459 }
1460 LPROC_SEQ_FOPS(ll_pcc);
1461
1462 struct lprocfs_vars lprocfs_llite_obd_vars[] = {
1463         { .name =       "site",
1464           .fops =       &ll_site_stats_fops                     },
1465         { .name =       "max_read_ahead_mb",
1466           .fops =       &ll_max_readahead_mb_fops               },
1467         { .name =       "max_read_ahead_per_file_mb",
1468           .fops =       &ll_max_readahead_per_file_mb_fops      },
1469         { .name =       "max_read_ahead_whole_mb",
1470           .fops =       &ll_max_read_ahead_whole_mb_fops        },
1471         { .name =       "max_cached_mb",
1472           .fops =       &ll_max_cached_mb_fops                  },
1473         { .name =       "statahead_stats",
1474           .fops =       &ll_statahead_stats_fops                },
1475         { .name =       "unstable_stats",
1476           .fops =       &ll_unstable_stats_fops                 },
1477         { .name =       "sbi_flags",
1478           .fops =       &ll_sbi_flags_fops                      },
1479         { .name =       "root_squash",
1480           .fops =       &ll_root_squash_fops                    },
1481         { .name =       "nosquash_nids",
1482           .fops =       &ll_nosquash_nids_fops                  },
1483         { .name =       "pcc",
1484           .fops =       &ll_pcc_fops,                           },
1485         { NULL }
1486 };
1487
1488 #define MAX_STRING_SIZE 128
1489
1490 static struct attribute *llite_attrs[] = {
1491         &lustre_attr_blocksize.attr,
1492         &lustre_attr_stat_blocksize.attr,
1493         &lustre_attr_kbytestotal.attr,
1494         &lustre_attr_kbytesfree.attr,
1495         &lustre_attr_kbytesavail.attr,
1496         &lustre_attr_filestotal.attr,
1497         &lustre_attr_filesfree.attr,
1498         &lustre_attr_client_type.attr,
1499         &lustre_attr_fstype.attr,
1500         &lustre_attr_uuid.attr,
1501         &lustre_attr_checksums.attr,
1502         &lustre_attr_checksum_pages.attr,
1503         &lustre_attr_stats_track_pid.attr,
1504         &lustre_attr_stats_track_ppid.attr,
1505         &lustre_attr_stats_track_gid.attr,
1506         &lustre_attr_statahead_running_max.attr,
1507         &lustre_attr_statahead_max.attr,
1508         &lustre_attr_statahead_agl.attr,
1509         &lustre_attr_lazystatfs.attr,
1510         &lustre_attr_statfs_max_age.attr,
1511         &lustre_attr_max_easize.attr,
1512         &lustre_attr_default_easize.attr,
1513         &lustre_attr_xattr_cache.attr,
1514         &lustre_attr_fast_read.attr,
1515         &lustre_attr_tiny_write.attr,
1516         &lustre_attr_file_heat.attr,
1517         &lustre_attr_heat_decay_percentage.attr,
1518         &lustre_attr_heat_period_second.attr,
1519         &lustre_attr_max_read_ahead_async_active.attr,
1520         &lustre_attr_read_ahead_async_file_threshold_mb.attr,
1521         NULL,
1522 };
1523
1524 static void sbi_kobj_release(struct kobject *kobj)
1525 {
1526         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1527                                               ll_kset.kobj);
1528         complete(&sbi->ll_kobj_unregister);
1529 }
1530
1531 static struct kobj_type sbi_ktype = {
1532         .default_attrs  = llite_attrs,
1533         .sysfs_ops      = &lustre_sysfs_ops,
1534         .release        = sbi_kobj_release,
1535 };
1536
1537 static const struct llite_file_opcode {
1538         __u32       opcode;
1539         __u32       type;
1540         const char *opname;
1541 } llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
1542         /* file operation */
1543         { LPROC_LL_READ_BYTES,     LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
1544                                    "read_bytes" },
1545         { LPROC_LL_WRITE_BYTES,    LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
1546                                    "write_bytes" },
1547         { LPROC_LL_IOCTL,          LPROCFS_TYPE_REGS, "ioctl" },
1548         { LPROC_LL_OPEN,           LPROCFS_TYPE_REGS, "open" },
1549         { LPROC_LL_RELEASE,        LPROCFS_TYPE_REGS, "close" },
1550         { LPROC_LL_MAP,            LPROCFS_TYPE_REGS, "mmap" },
1551         { LPROC_LL_FAULT,          LPROCFS_TYPE_REGS, "page_fault" },
1552         { LPROC_LL_MKWRITE,        LPROCFS_TYPE_REGS, "page_mkwrite" },
1553         { LPROC_LL_LLSEEK,         LPROCFS_TYPE_REGS, "seek" },
1554         { LPROC_LL_FSYNC,          LPROCFS_TYPE_REGS, "fsync" },
1555         { LPROC_LL_READDIR,        LPROCFS_TYPE_REGS, "readdir" },
1556         /* inode operation */
1557         { LPROC_LL_SETATTR,        LPROCFS_TYPE_REGS, "setattr" },
1558         { LPROC_LL_TRUNC,          LPROCFS_TYPE_REGS, "truncate" },
1559         { LPROC_LL_FLOCK,          LPROCFS_TYPE_REGS, "flock" },
1560         { LPROC_LL_GETATTR,        LPROCFS_TYPE_REGS, "getattr" },
1561         /* dir inode operation */
1562         { LPROC_LL_CREATE,         LPROCFS_TYPE_REGS, "create" },
1563         { LPROC_LL_LINK,           LPROCFS_TYPE_REGS, "link" },
1564         { LPROC_LL_UNLINK,         LPROCFS_TYPE_REGS, "unlink" },
1565         { LPROC_LL_SYMLINK,        LPROCFS_TYPE_REGS, "symlink" },
1566         { LPROC_LL_MKDIR,          LPROCFS_TYPE_REGS, "mkdir" },
1567         { LPROC_LL_RMDIR,          LPROCFS_TYPE_REGS, "rmdir" },
1568         { LPROC_LL_MKNOD,          LPROCFS_TYPE_REGS, "mknod" },
1569         { LPROC_LL_RENAME,         LPROCFS_TYPE_REGS, "rename" },
1570         /* special inode operation */
1571         { LPROC_LL_STATFS,          LPROCFS_TYPE_REGS, "statfs" },
1572         { LPROC_LL_ALLOC_INODE,    LPROCFS_TYPE_REGS, "alloc_inode" },
1573         { LPROC_LL_SETXATTR,       LPROCFS_TYPE_REGS, "setxattr" },
1574         { LPROC_LL_GETXATTR,       LPROCFS_TYPE_REGS, "getxattr" },
1575         { LPROC_LL_GETXATTR_HITS,  LPROCFS_TYPE_REGS, "getxattr_hits" },
1576         { LPROC_LL_LISTXATTR,      LPROCFS_TYPE_REGS, "listxattr" },
1577         { LPROC_LL_REMOVEXATTR,    LPROCFS_TYPE_REGS, "removexattr" },
1578         { LPROC_LL_INODE_PERM,     LPROCFS_TYPE_REGS, "inode_permission" },
1579 };
1580
1581 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count)
1582 {
1583         if (!sbi->ll_stats)
1584                 return;
1585         if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
1586                 lprocfs_counter_add(sbi->ll_stats, op, count);
1587         else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
1588                  sbi->ll_stats_track_id == current->pid)
1589                 lprocfs_counter_add(sbi->ll_stats, op, count);
1590         else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
1591                  sbi->ll_stats_track_id == current->parent->pid)
1592                 lprocfs_counter_add(sbi->ll_stats, op, count);
1593         else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
1594                  sbi->ll_stats_track_id ==
1595                         from_kgid(&init_user_ns, current_gid()))
1596                 lprocfs_counter_add(sbi->ll_stats, op, count);
1597 }
1598 EXPORT_SYMBOL(ll_stats_ops_tally);
1599
1600 static const char *ra_stat_string[] = {
1601         [RA_STAT_HIT] = "hits",
1602         [RA_STAT_MISS] = "misses",
1603         [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
1604         [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
1605         [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
1606         [RA_STAT_FAILED_MATCH] = "failed lock match",
1607         [RA_STAT_DISCARDED] = "read but discarded",
1608         [RA_STAT_ZERO_LEN] = "zero length file",
1609         [RA_STAT_ZERO_WINDOW] = "zero size window",
1610         [RA_STAT_EOF] = "read-ahead to EOF",
1611         [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
1612         [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
1613         [RA_STAT_FAILED_REACH_END] = "failed to reach end",
1614         [RA_STAT_ASYNC] = "async readahead",
1615         [RA_STAT_FAILED_FAST_READ] = "failed to fast read",
1616 };
1617
1618 int ll_debugfs_register_super(struct super_block *sb, const char *name)
1619 {
1620         struct lustre_sb_info *lsi = s2lsi(sb);
1621         struct ll_sb_info *sbi = ll_s2sbi(sb);
1622         int err, id, rc;
1623
1624         ENTRY;
1625         LASSERT(sbi);
1626
1627         if (IS_ERR_OR_NULL(llite_root))
1628                 goto out_ll_kset;
1629
1630         sbi->ll_debugfs_entry = ldebugfs_register(name, llite_root,
1631                                                   lprocfs_llite_obd_vars, sb);
1632         if (IS_ERR_OR_NULL(sbi->ll_debugfs_entry)) {
1633                 err = sbi->ll_debugfs_entry ? PTR_ERR(sbi->ll_debugfs_entry) :
1634                                               -ENOMEM;
1635                 sbi->ll_debugfs_entry = NULL;
1636                 RETURN(err);
1637         }
1638
1639         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "dump_page_cache",0444,
1640                                  &vvp_dump_pgcache_file_ops, sbi);
1641         if (rc)
1642                 CWARN("Error adding the dump_page_cache file\n");
1643
1644         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "extents_stats", 0644,
1645                                  &ll_rw_extents_stats_fops, sbi);
1646         if (rc)
1647                 CWARN("Error adding the extent_stats file\n");
1648
1649         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry,
1650                                  "extents_stats_per_process", 0644,
1651                                  &ll_rw_extents_stats_pp_fops, sbi);
1652         if (rc)
1653                 CWARN("Error adding the extents_stats_per_process file\n");
1654
1655         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "offset_stats", 0644,
1656                                  &ll_rw_offset_stats_fops, sbi);
1657         if (rc)
1658                 CWARN("Error adding the offset_stats file\n");
1659
1660         /* File operations stats */
1661         sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
1662                                             LPROCFS_STATS_FLAG_NONE);
1663         if (sbi->ll_stats == NULL)
1664                 GOTO(out_debugfs, err = -ENOMEM);
1665
1666         /* do counter init */
1667         for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
1668                 u32 type = llite_opcode_table[id].type;
1669                 void *ptr = NULL;
1670
1671                 if (type & LPROCFS_TYPE_REGS)
1672                         ptr = "regs";
1673                 else if (type & LPROCFS_TYPE_BYTES)
1674                         ptr = "bytes";
1675                 else if (type & LPROCFS_TYPE_PAGES)
1676                         ptr = "pages";
1677                 lprocfs_counter_init(sbi->ll_stats,
1678                                      llite_opcode_table[id].opcode,
1679                                      (type & LPROCFS_CNTR_AVGMINMAX),
1680                                      llite_opcode_table[id].opname, ptr);
1681         }
1682
1683         err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats",
1684                                       sbi->ll_stats);
1685         if (err)
1686                 GOTO(out_stats, err);
1687
1688         sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
1689                                                LPROCFS_STATS_FLAG_NONE);
1690         if (sbi->ll_ra_stats == NULL)
1691                 GOTO(out_stats, err = -ENOMEM);
1692
1693         for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
1694                 lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
1695                                      ra_stat_string[id], "pages");
1696
1697         err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats",
1698                                       sbi->ll_ra_stats);
1699         if (err)
1700                 GOTO(out_ra_stats, err);
1701
1702 out_ll_kset:
1703         /* Yes we also register sysfs mount kset here as well */
1704         sbi->ll_kset.kobj.parent = llite_kobj;
1705         sbi->ll_kset.kobj.ktype = &sbi_ktype;
1706         init_completion(&sbi->ll_kobj_unregister);
1707         err = kobject_set_name(&sbi->ll_kset.kobj, "%s", name);
1708         if (err)
1709                 GOTO(out_ra_stats, err);
1710
1711         err = kset_register(&sbi->ll_kset);
1712         if (err)
1713                 GOTO(out_ra_stats, err);
1714
1715         lsi->lsi_kobj = kobject_get(&sbi->ll_kset.kobj);
1716
1717         RETURN(0);
1718 out_ra_stats:
1719         lprocfs_free_stats(&sbi->ll_ra_stats);
1720 out_stats:
1721         lprocfs_free_stats(&sbi->ll_stats);
1722 out_debugfs:
1723         ldebugfs_remove(&sbi->ll_debugfs_entry);
1724
1725         RETURN(err);
1726 }
1727
1728 void ll_debugfs_unregister_super(struct super_block *sb)
1729 {
1730         struct lustre_sb_info *lsi = s2lsi(sb);
1731         struct ll_sb_info *sbi = ll_s2sbi(sb);
1732
1733         if (!IS_ERR_OR_NULL(sbi->ll_debugfs_entry))
1734                 ldebugfs_remove(&sbi->ll_debugfs_entry);
1735
1736         if (sbi->ll_dt_obd)
1737                 sysfs_remove_link(&sbi->ll_kset.kobj,
1738                                   sbi->ll_dt_obd->obd_type->typ_name);
1739
1740         if (sbi->ll_md_obd)
1741                 sysfs_remove_link(&sbi->ll_kset.kobj,
1742                                   sbi->ll_md_obd->obd_type->typ_name);
1743
1744         kobject_put(lsi->lsi_kobj);
1745
1746         kset_unregister(&sbi->ll_kset);
1747         wait_for_completion(&sbi->ll_kobj_unregister);
1748
1749         lprocfs_free_stats(&sbi->ll_ra_stats);
1750         lprocfs_free_stats(&sbi->ll_stats);
1751 }
1752 #undef MAX_STRING_SIZE
1753
1754 static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
1755                                    struct seq_file *seq, int which)
1756 {
1757         unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
1758         unsigned long start, end, r, w;
1759         char *unitp = "KMGTPEZY";
1760         int i, units = 10;
1761         struct per_process_info *pp_info = &io_extents->pp_extents[which];
1762
1763         read_cum = 0;
1764         write_cum = 0;
1765         start = 0;
1766
1767         for(i = 0; i < LL_HIST_MAX; i++) {
1768                 read_tot += pp_info->pp_r_hist.oh_buckets[i];
1769                 write_tot += pp_info->pp_w_hist.oh_buckets[i];
1770         }
1771
1772         for(i = 0; i < LL_HIST_MAX; i++) {
1773                 r = pp_info->pp_r_hist.oh_buckets[i];
1774                 w = pp_info->pp_w_hist.oh_buckets[i];
1775                 read_cum += r;
1776                 write_cum += w;
1777                 end = BIT(i + LL_HIST_START - units);
1778                 seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4u %4u  | "
1779                            "%14lu %4u %4u\n", start, *unitp, end, *unitp,
1780                            (i == LL_HIST_MAX - 1) ? '+' : ' ',
1781                            r, pct(r, read_tot), pct(read_cum, read_tot),
1782                            w, pct(w, write_tot), pct(write_cum, write_tot));
1783                 start = end;
1784                 if (start == BIT(10)) {
1785                         start = 1;
1786                         units += 10;
1787                         unitp++;
1788                 }
1789                 if (read_cum == read_tot && write_cum == write_tot)
1790                         break;
1791         }
1792 }
1793
1794 static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
1795 {
1796         struct timespec64 now;
1797         struct ll_sb_info *sbi = seq->private;
1798         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1799         int k;
1800
1801         ktime_get_real_ts64(&now);
1802
1803         if (!sbi->ll_rw_stats_on) {
1804                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1805                 return 0;
1806         }
1807         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1808                    (s64)now.tv_sec, now.tv_nsec);
1809         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1810         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1811                    "extents", "calls", "%", "cum%",
1812                    "calls", "%", "cum%");
1813         spin_lock(&sbi->ll_pp_extent_lock);
1814         for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
1815                 if (io_extents->pp_extents[k].pid != 0) {
1816                         seq_printf(seq, "\nPID: %d\n",
1817                                    io_extents->pp_extents[k].pid);
1818                         ll_display_extents_info(io_extents, seq, k);
1819                 }
1820         }
1821         spin_unlock(&sbi->ll_pp_extent_lock);
1822         return 0;
1823 }
1824
1825 static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
1826                                                 const char __user *buf,
1827                                                 size_t len,
1828                                                 loff_t *off)
1829 {
1830         struct seq_file *seq = file->private_data;
1831         struct ll_sb_info *sbi = seq->private;
1832         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1833         int i;
1834         __s64 value;
1835
1836         if (len == 0)
1837                 return -EINVAL;
1838
1839         value = ll_stats_pid_write(buf, len);
1840
1841         if (value == 0)
1842                 sbi->ll_rw_stats_on = 0;
1843         else
1844                 sbi->ll_rw_stats_on = 1;
1845
1846         spin_lock(&sbi->ll_pp_extent_lock);
1847         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1848                 io_extents->pp_extents[i].pid = 0;
1849                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1850                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1851         }
1852         spin_unlock(&sbi->ll_pp_extent_lock);
1853         return len;
1854 }
1855
1856 LDEBUGFS_SEQ_FOPS(ll_rw_extents_stats_pp);
1857
1858 static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
1859 {
1860         struct timespec64 now;
1861         struct ll_sb_info *sbi = seq->private;
1862         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1863
1864         ktime_get_real_ts64(&now);
1865
1866         if (!sbi->ll_rw_stats_on) {
1867                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1868                 return 0;
1869         }
1870         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1871                    (s64)now.tv_sec, now.tv_nsec);
1872
1873         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1874         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1875                    "extents", "calls", "%", "cum%",
1876                    "calls", "%", "cum%");
1877         spin_lock(&sbi->ll_lock);
1878         ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
1879         spin_unlock(&sbi->ll_lock);
1880
1881         return 0;
1882 }
1883
1884 static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
1885                                              const char __user *buf,
1886                                              size_t len, loff_t *off)
1887 {
1888         struct seq_file *seq = file->private_data;
1889         struct ll_sb_info *sbi = seq->private;
1890         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1891         int i;
1892         __s64 value;
1893
1894         if (len == 0)
1895                 return -EINVAL;
1896
1897         value = ll_stats_pid_write(buf, len);
1898
1899         if (value == 0)
1900                 sbi->ll_rw_stats_on = 0;
1901         else
1902                 sbi->ll_rw_stats_on = 1;
1903
1904         spin_lock(&sbi->ll_pp_extent_lock);
1905         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
1906                 io_extents->pp_extents[i].pid = 0;
1907                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1908                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1909         }
1910         spin_unlock(&sbi->ll_pp_extent_lock);
1911
1912         return len;
1913 }
1914
1915 LDEBUGFS_SEQ_FOPS(ll_rw_extents_stats);
1916
1917 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1918                        struct ll_file_data *file, loff_t pos,
1919                        size_t count, int rw)
1920 {
1921         int i, cur = -1;
1922         struct ll_rw_process_info *process;
1923         struct ll_rw_process_info *offset;
1924         int *off_count = &sbi->ll_rw_offset_entry_count;
1925         int *process_count = &sbi->ll_offset_process_count;
1926         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1927
1928         if(!sbi->ll_rw_stats_on)
1929                 return;
1930         process = sbi->ll_rw_process_info;
1931         offset = sbi->ll_rw_offset_info;
1932
1933         spin_lock(&sbi->ll_pp_extent_lock);
1934         /* Extent statistics */
1935         for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1936                 if(io_extents->pp_extents[i].pid == pid) {
1937                         cur = i;
1938                         break;
1939                 }
1940         }
1941
1942         if (cur == -1) {
1943                 /* new process */
1944                 sbi->ll_extent_process_count =
1945                         (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
1946                 cur = sbi->ll_extent_process_count;
1947                 io_extents->pp_extents[cur].pid = pid;
1948                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
1949                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
1950         }
1951
1952         for(i = 0; (count >= BIT(LL_HIST_START << i)) &&
1953              (i < (LL_HIST_MAX - 1)); i++);
1954         if (rw == 0) {
1955                 io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
1956                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
1957         } else {
1958                 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
1959                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
1960         }
1961         spin_unlock(&sbi->ll_pp_extent_lock);
1962
1963         spin_lock(&sbi->ll_process_lock);
1964         /* Offset statistics */
1965         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1966                 if (process[i].rw_pid == pid) {
1967                         if (process[i].rw_last_file != file) {
1968                                 process[i].rw_range_start = pos;
1969                                 process[i].rw_last_file_pos = pos + count;
1970                                 process[i].rw_smallest_extent = count;
1971                                 process[i].rw_largest_extent = count;
1972                                 process[i].rw_offset = 0;
1973                                 process[i].rw_last_file = file;
1974                                 spin_unlock(&sbi->ll_process_lock);
1975                                 return;
1976                         }
1977                         if (process[i].rw_last_file_pos != pos) {
1978                                 *off_count =
1979                                     (*off_count + 1) % LL_OFFSET_HIST_MAX;
1980                                 offset[*off_count].rw_op = process[i].rw_op;
1981                                 offset[*off_count].rw_pid = pid;
1982                                 offset[*off_count].rw_range_start =
1983                                         process[i].rw_range_start;
1984                                 offset[*off_count].rw_range_end =
1985                                         process[i].rw_last_file_pos;
1986                                 offset[*off_count].rw_smallest_extent =
1987                                         process[i].rw_smallest_extent;
1988                                 offset[*off_count].rw_largest_extent =
1989                                         process[i].rw_largest_extent;
1990                                 offset[*off_count].rw_offset =
1991                                         process[i].rw_offset;
1992                                 process[i].rw_op = rw;
1993                                 process[i].rw_range_start = pos;
1994                                 process[i].rw_smallest_extent = count;
1995                                 process[i].rw_largest_extent = count;
1996                                 process[i].rw_offset = pos -
1997                                         process[i].rw_last_file_pos;
1998                         }
1999                         if(process[i].rw_smallest_extent > count)
2000                                 process[i].rw_smallest_extent = count;
2001                         if(process[i].rw_largest_extent < count)
2002                                 process[i].rw_largest_extent = count;
2003                         process[i].rw_last_file_pos = pos + count;
2004                         spin_unlock(&sbi->ll_process_lock);
2005                         return;
2006                 }
2007         }
2008         *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
2009         process[*process_count].rw_pid = pid;
2010         process[*process_count].rw_op = rw;
2011         process[*process_count].rw_range_start = pos;
2012         process[*process_count].rw_last_file_pos = pos + count;
2013         process[*process_count].rw_smallest_extent = count;
2014         process[*process_count].rw_largest_extent = count;
2015         process[*process_count].rw_offset = 0;
2016         process[*process_count].rw_last_file = file;
2017         spin_unlock(&sbi->ll_process_lock);
2018 }
2019
2020 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
2021 {
2022         struct timespec64 now;
2023         struct ll_sb_info *sbi = seq->private;
2024         struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
2025         struct ll_rw_process_info *process = sbi->ll_rw_process_info;
2026         int i;
2027
2028         ktime_get_real_ts64(&now);
2029
2030         if (!sbi->ll_rw_stats_on) {
2031                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
2032                 return 0;
2033         }
2034         spin_lock(&sbi->ll_process_lock);
2035
2036         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
2037                    (s64)now.tv_sec, now.tv_nsec);
2038         seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
2039                    "R/W", "PID", "RANGE START", "RANGE END",
2040                    "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
2041
2042         /* We stored the discontiguous offsets here; print them first */
2043         for (i = 0; i < LL_OFFSET_HIST_MAX; i++) {
2044                 if (offset[i].rw_pid != 0)
2045                         seq_printf(seq,
2046                                   "%3c %10d %14llu %14llu %17lu %17lu %14llu\n",
2047                                    offset[i].rw_op == READ ? 'R' : 'W',
2048                                    offset[i].rw_pid,
2049                                    offset[i].rw_range_start,
2050                                    offset[i].rw_range_end,
2051                                    (unsigned long)offset[i].rw_smallest_extent,
2052                                    (unsigned long)offset[i].rw_largest_extent,
2053                                    offset[i].rw_offset);
2054         }
2055
2056         /* Then print the current offsets for each process */
2057         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
2058                 if (process[i].rw_pid != 0)
2059                         seq_printf(seq,
2060                                   "%3c %10d %14llu %14llu %17lu %17lu %14llu\n",
2061                                    process[i].rw_op == READ ? 'R' : 'W',
2062                                    process[i].rw_pid,
2063                                    process[i].rw_range_start,
2064                                    process[i].rw_last_file_pos,
2065                                    (unsigned long)process[i].rw_smallest_extent,
2066                                    (unsigned long)process[i].rw_largest_extent,
2067                                    process[i].rw_offset);
2068         }
2069         spin_unlock(&sbi->ll_process_lock);
2070
2071         return 0;
2072 }
2073
2074 static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
2075                                             const char __user *buf,
2076                                             size_t len, loff_t *off)
2077 {
2078         struct seq_file *seq = file->private_data;
2079         struct ll_sb_info *sbi = seq->private;
2080         struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
2081         struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
2082         __s64 value;
2083
2084         if (len == 0)
2085                 return -EINVAL;
2086
2087         value = ll_stats_pid_write(buf, len);
2088
2089         if (value == 0)
2090                 sbi->ll_rw_stats_on = 0;
2091         else
2092                 sbi->ll_rw_stats_on = 1;
2093
2094         spin_lock(&sbi->ll_process_lock);
2095         sbi->ll_offset_process_count = 0;
2096         sbi->ll_rw_offset_entry_count = 0;
2097         memset(process_info, 0, sizeof(struct ll_rw_process_info) *
2098                LL_PROCESS_HIST_MAX);
2099         memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
2100                LL_OFFSET_HIST_MAX);
2101         spin_unlock(&sbi->ll_process_lock);
2102
2103         return len;
2104 }
2105
2106 LDEBUGFS_SEQ_FOPS(ll_rw_offset_stats);