Whamcloud - gitweb
LU-8066 llite: don't use class_setup_tunables()
[fs/lustre-release.git] / lustre / llite / lproc_llite.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32 #define DEBUG_SUBSYSTEM S_LLITE
33
34 #include <linux/version.h>
35 #include <linux/user_namespace.h>
36 #ifdef HAVE_UIDGID_HEADER
37 # include <linux/uidgid.h>
38 #endif
39 #include <uapi/linux/lustre/lustre_param.h>
40 #include <lprocfs_status.h>
41 #include <obd_support.h>
42
43 #include "llite_internal.h"
44 #include "vvp_internal.h"
45
46 static struct kobject *llite_kobj;
47 static struct dentry *llite_root;
48
49 static void llite_kobj_release(struct kobject *kobj)
50 {
51         if (!IS_ERR_OR_NULL(llite_root)) {
52                 debugfs_remove(llite_root);
53                 llite_root = NULL;
54         }
55
56         kfree(kobj);
57 }
58
59 static struct kobj_type llite_kobj_ktype = {
60         .release        = llite_kobj_release,
61         .sysfs_ops      = &lustre_sysfs_ops,
62 };
63
64 int llite_tunables_register(void)
65 {
66         int rc;
67
68         llite_kobj = kzalloc(sizeof(*llite_kobj), GFP_KERNEL);
69         if (!llite_kobj)
70                 return -ENOMEM;
71
72         llite_kobj->kset = lustre_kset;
73         rc = kobject_init_and_add(llite_kobj, &llite_kobj_ktype,
74                                   &lustre_kset->kobj, "%s", "llite");
75         if (rc)
76                 goto free_kobj;
77
78         llite_root = debugfs_create_dir("llite", debugfs_lustre_root);
79         if (IS_ERR_OR_NULL(llite_root)) {
80                 rc = llite_root ? PTR_ERR(llite_root) : -ENOMEM;
81                 llite_root = NULL;
82 free_kobj:
83                 kobject_put(llite_kobj);
84                 llite_kobj = NULL;
85         }
86
87         return rc;
88 }
89
90 void llite_tunables_unregister(void)
91 {
92         kobject_put(llite_kobj);
93         llite_kobj = NULL;
94 }
95
96 /* <debugfs>/lustre/llite mount point registration */
97 static const struct file_operations ll_rw_extents_stats_fops;
98 static const struct file_operations ll_rw_extents_stats_pp_fops;
99 static const struct file_operations ll_rw_offset_stats_fops;
100
101 /**
102  * ll_stats_pid_write() - Determine if stats collection should be enabled
103  * @buf: Buffer containing the data written
104  * @len: Number of bytes in the buffer
105  *
106  * Several proc files begin collecting stats when a value is written, and stop
107  * collecting when either '0' or 'disable' is written. This function checks the
108  * written value to see if collection should be enabled or disabled.
109  *
110  * Return: If '0' or 'disable' is provided, 0 is returned. If the text
111  * equivalent of a number is written, that number is returned. Otherwise,
112  * 1 is returned. Non-zero return values indicate collection should be enabled.
113  */
114 static s64 ll_stats_pid_write(const char __user *buf, size_t len)
115 {
116         unsigned long long value = 1;
117         char kernbuf[16];
118         int rc;
119
120         rc = kstrtoull_from_user(buf, len, 0, &value);
121         if (rc < 0 && len < sizeof(kernbuf)) {
122                 if (copy_from_user(kernbuf, buf, len))
123                         return -EFAULT;
124                 kernbuf[len] = 0;
125
126                 if (kernbuf[len - 1] == '\n')
127                         kernbuf[len - 1] = 0;
128
129                 if (strncasecmp(kernbuf, "disable", 7) == 0)
130                         value = 0;
131         }
132
133         return value;
134 }
135
136 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
137                               char *buf)
138 {
139         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
140                                               ll_kset.kobj);
141         struct obd_statfs osfs;
142         int rc;
143
144         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
145         if (rc)
146                 return rc;
147
148         return sprintf(buf, "%u\n", osfs.os_bsize);
149 }
150 LUSTRE_RO_ATTR(blocksize);
151
152 static ssize_t stat_blocksize_show(struct kobject *kobj, struct attribute *attr,
153                                    char *buf)
154 {
155         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
156                                               ll_kset.kobj);
157
158         return sprintf(buf, "%u\n", sbi->ll_stat_blksize);
159 }
160
161 static ssize_t stat_blocksize_store(struct kobject *kobj,
162                                     struct attribute *attr,
163                                     const char *buffer,
164                                     size_t count)
165 {
166         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
167                                               ll_kset.kobj);
168         unsigned int val;
169         int rc;
170
171         rc = kstrtouint(buffer, 10, &val);
172         if (rc)
173                 return rc;
174
175         if (val != 0 && (val < PAGE_SIZE || (val & (val - 1))) != 0)
176                 return -ERANGE;
177
178         sbi->ll_stat_blksize = val;
179
180         return count;
181 }
182 LUSTRE_RW_ATTR(stat_blocksize);
183
184 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
185                                 char *buf)
186 {
187         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
188                                               ll_kset.kobj);
189         struct obd_statfs osfs;
190         u32 blk_size;
191         u64 result;
192         int rc;
193
194         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
195         if (rc)
196                 return rc;
197
198         blk_size = osfs.os_bsize >> 10;
199         result = osfs.os_blocks;
200
201         while (blk_size >>= 1)
202                 result <<= 1;
203
204         return sprintf(buf, "%llu\n", result);
205 }
206 LUSTRE_RO_ATTR(kbytestotal);
207
208 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
209                                char *buf)
210 {
211         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
212                                               ll_kset.kobj);
213         struct obd_statfs osfs;
214         u32 blk_size;
215         u64 result;
216         int rc;
217
218         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
219         if (rc)
220                 return rc;
221
222         blk_size = osfs.os_bsize >> 10;
223         result = osfs.os_bfree;
224
225         while (blk_size >>= 1)
226                 result <<= 1;
227
228         return sprintf(buf, "%llu\n", result);
229 }
230 LUSTRE_RO_ATTR(kbytesfree);
231
232 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
233                                 char *buf)
234 {
235         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
236                                               ll_kset.kobj);
237         struct obd_statfs osfs;
238         u32 blk_size;
239         u64 result;
240         int rc;
241
242         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
243         if (rc)
244                 return rc;
245
246         blk_size = osfs.os_bsize >> 10;
247         result = osfs.os_bavail;
248
249         while (blk_size >>= 1)
250                 result <<= 1;
251
252         return sprintf(buf, "%llu\n", result);
253 }
254 LUSTRE_RO_ATTR(kbytesavail);
255
256 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
257                                char *buf)
258 {
259         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
260                                               ll_kset.kobj);
261         struct obd_statfs osfs;
262         int rc;
263
264         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
265         if (rc)
266                 return rc;
267
268         return sprintf(buf, "%llu\n", osfs.os_files);
269 }
270 LUSTRE_RO_ATTR(filestotal);
271
272 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
273                               char *buf)
274 {
275         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
276                                               ll_kset.kobj);
277         struct obd_statfs osfs;
278         int rc;
279
280         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
281         if (rc)
282                 return rc;
283
284         return sprintf(buf, "%llu\n", osfs.os_ffree);
285 }
286 LUSTRE_RO_ATTR(filesfree);
287
288 static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
289                                 char *buf)
290 {
291         return sprintf(buf, "local client\n");
292 }
293 LUSTRE_RO_ATTR(client_type);
294
295 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
296                            char *buf)
297 {
298         return sprintf(buf, "lustre\n");
299 }
300 LUSTRE_RO_ATTR(fstype);
301
302 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
303                          char *buf)
304 {
305         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
306                                               ll_kset.kobj);
307
308         return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
309 }
310 LUSTRE_RO_ATTR(uuid);
311
312 static int ll_site_stats_seq_show(struct seq_file *m, void *v)
313 {
314         struct super_block *sb = m->private;
315
316         /*
317          * See description of statistical counters in struct cl_site, and
318          * struct lu_site.
319          */
320         return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
321 }
322
323 LDEBUGFS_SEQ_FOPS_RO(ll_site_stats);
324
325 static int ll_max_readahead_mb_seq_show(struct seq_file *m, void *v)
326 {
327         struct super_block *sb = m->private;
328         struct ll_sb_info *sbi = ll_s2sbi(sb);
329         long pages_number;
330         int mult;
331
332         spin_lock(&sbi->ll_lock);
333         pages_number = sbi->ll_ra_info.ra_max_pages;
334         spin_unlock(&sbi->ll_lock);
335
336         mult = 1 << (20 - PAGE_SHIFT);
337         return lprocfs_seq_read_frac_helper(m, pages_number, mult);
338 }
339
340 static ssize_t
341 ll_max_readahead_mb_seq_write(struct file *file, const char __user *buffer,
342                               size_t count, loff_t *off)
343 {
344         struct seq_file *m = file->private_data;
345         struct super_block *sb = m->private;
346         struct ll_sb_info *sbi = ll_s2sbi(sb);
347         __s64 pages_number;
348         int rc;
349
350         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
351         if (rc)
352                 return rc;
353
354         pages_number >>= PAGE_SHIFT;
355
356         if (pages_number < 0 || pages_number > totalram_pages / 2) {
357                 /* 1/2 of RAM */
358                 CERROR("%s: can't set max_readahead_mb=%lu > %luMB\n",
359                        ll_get_fsname(sb, NULL, 0),
360                        (unsigned long)pages_number >> (20 - PAGE_SHIFT),
361                        totalram_pages >> (20 - PAGE_SHIFT + 1));
362                 return -ERANGE;
363         }
364
365         spin_lock(&sbi->ll_lock);
366         sbi->ll_ra_info.ra_max_pages = pages_number;
367         spin_unlock(&sbi->ll_lock);
368
369         return count;
370 }
371
372 LDEBUGFS_SEQ_FOPS(ll_max_readahead_mb);
373
374 static int ll_max_readahead_per_file_mb_seq_show(struct seq_file *m, void *v)
375 {
376         struct super_block *sb = m->private;
377         struct ll_sb_info *sbi = ll_s2sbi(sb);
378         long pages_number;
379         int mult;
380
381         spin_lock(&sbi->ll_lock);
382         pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
383         spin_unlock(&sbi->ll_lock);
384
385         mult = 1 << (20 - PAGE_SHIFT);
386         return lprocfs_seq_read_frac_helper(m, pages_number, mult);
387 }
388
389 static ssize_t
390 ll_max_readahead_per_file_mb_seq_write(struct file *file,
391                                        const char __user *buffer,
392                                        size_t count, loff_t *off)
393 {
394         struct seq_file *m = file->private_data;
395         struct super_block *sb = m->private;
396         struct ll_sb_info *sbi = ll_s2sbi(sb);
397         int rc;
398         __s64 pages_number;
399
400         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
401         if (rc)
402                 return rc;
403
404         pages_number >>= PAGE_SHIFT;
405
406         if (pages_number < 0 || pages_number > sbi->ll_ra_info.ra_max_pages) {
407                 CERROR("%s: can't set max_readahead_per_file_mb=%lu > "
408                        "max_read_ahead_mb=%lu\n", ll_get_fsname(sb, NULL, 0),
409                        (unsigned long)pages_number >> (20 - PAGE_SHIFT),
410                        sbi->ll_ra_info.ra_max_pages >> (20 - PAGE_SHIFT));
411                 return -ERANGE;
412         }
413
414         spin_lock(&sbi->ll_lock);
415         sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
416         spin_unlock(&sbi->ll_lock);
417
418         return count;
419 }
420
421 LDEBUGFS_SEQ_FOPS(ll_max_readahead_per_file_mb);
422
423 static int ll_max_read_ahead_whole_mb_seq_show(struct seq_file *m, void *v)
424 {
425         struct super_block *sb = m->private;
426         struct ll_sb_info *sbi = ll_s2sbi(sb);
427         long pages_number;
428         int mult;
429
430         spin_lock(&sbi->ll_lock);
431         pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
432         spin_unlock(&sbi->ll_lock);
433
434         mult = 1 << (20 - PAGE_SHIFT);
435         return lprocfs_seq_read_frac_helper(m, pages_number, mult);
436 }
437
438 static ssize_t
439 ll_max_read_ahead_whole_mb_seq_write(struct file *file,
440                                      const char __user *buffer,
441                                      size_t count, loff_t *off)
442 {
443         struct seq_file *m = file->private_data;
444         struct super_block *sb = m->private;
445         struct ll_sb_info *sbi = ll_s2sbi(sb);
446         int rc;
447         __s64 pages_number;
448
449         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
450         if (rc)
451                 return rc;
452
453         pages_number >>= PAGE_SHIFT;
454
455         /* Cap this at the current max readahead window size, the readahead
456          * algorithm does this anyway so it's pointless to set it larger. */
457         if (pages_number < 0 ||
458             pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
459                 int pages_shift = 20 - PAGE_SHIFT;
460                 CERROR("%s: can't set max_read_ahead_whole_mb=%lu > "
461                        "max_read_ahead_per_file_mb=%lu\n",
462                        ll_get_fsname(sb, NULL, 0),
463                        (unsigned long)pages_number >> pages_shift,
464                        sbi->ll_ra_info.ra_max_pages_per_file >> pages_shift);
465                 return -ERANGE;
466         }
467
468         spin_lock(&sbi->ll_lock);
469         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
470         spin_unlock(&sbi->ll_lock);
471
472         return count;
473 }
474
475 LDEBUGFS_SEQ_FOPS(ll_max_read_ahead_whole_mb);
476
477 static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
478 {
479         struct super_block     *sb    = m->private;
480         struct ll_sb_info      *sbi   = ll_s2sbi(sb);
481         struct cl_client_cache *cache = sbi->ll_cache;
482         int shift = 20 - PAGE_SHIFT;
483         long max_cached_mb;
484         long unused_mb;
485
486         max_cached_mb = cache->ccc_lru_max >> shift;
487         unused_mb = atomic_long_read(&cache->ccc_lru_left) >> shift;
488         seq_printf(m, "users: %d\n"
489                       "max_cached_mb: %ld\n"
490                       "used_mb: %ld\n"
491                       "unused_mb: %ld\n"
492                       "reclaim_count: %u\n",
493                    atomic_read(&cache->ccc_users),
494                    max_cached_mb,
495                    max_cached_mb - unused_mb,
496                    unused_mb,
497                    cache->ccc_lru_shrinkers);
498         return 0;
499 }
500
501 static ssize_t ll_max_cached_mb_seq_write(struct file *file,
502                                           const char __user *buffer,
503                                           size_t count, loff_t *off)
504 {
505         struct seq_file *m = file->private_data;
506         struct super_block *sb = m->private;
507         struct ll_sb_info *sbi = ll_s2sbi(sb);
508         struct cl_client_cache *cache = sbi->ll_cache;
509         struct lu_env *env;
510         long diff = 0;
511         long nrpages = 0;
512         __u16 refcheck;
513         __s64 pages_number;
514         int rc;
515         char kernbuf[128];
516
517         ENTRY;
518         if (count >= sizeof(kernbuf))
519                 RETURN(-EINVAL);
520
521         if (copy_from_user(kernbuf, buffer, count))
522                 RETURN(-EFAULT);
523         kernbuf[count] = 0;
524
525         buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
526                   kernbuf;
527         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
528         if (rc)
529                 RETURN(rc);
530
531         pages_number >>= PAGE_SHIFT;
532
533         if (pages_number < 0 || pages_number > totalram_pages) {
534                 CERROR("%s: can't set max cache more than %lu MB\n",
535                        ll_get_fsname(sb, NULL, 0),
536                        totalram_pages >> (20 - PAGE_SHIFT));
537                 RETURN(-ERANGE);
538         }
539         /* Allow enough cache so clients can make well-formed RPCs */
540         pages_number = max_t(long, pages_number, PTLRPC_MAX_BRW_PAGES);
541
542         spin_lock(&sbi->ll_lock);
543         diff = pages_number - cache->ccc_lru_max;
544         spin_unlock(&sbi->ll_lock);
545
546         /* easy - add more LRU slots. */
547         if (diff >= 0) {
548                 atomic_long_add(diff, &cache->ccc_lru_left);
549                 GOTO(out, rc = 0);
550         }
551
552         env = cl_env_get(&refcheck);
553         if (IS_ERR(env))
554                 RETURN(PTR_ERR(env));
555
556         diff = -diff;
557         while (diff > 0) {
558                 long tmp;
559
560                 /* reduce LRU budget from free slots. */
561                 do {
562                         long ov, nv;
563
564                         ov = atomic_long_read(&cache->ccc_lru_left);
565                         if (ov == 0)
566                                 break;
567
568                         nv = ov > diff ? ov - diff : 0;
569                         rc = atomic_long_cmpxchg(&cache->ccc_lru_left, ov, nv);
570                         if (likely(ov == rc)) {
571                                 diff -= ov - nv;
572                                 nrpages += ov - nv;
573                                 break;
574                         }
575                 } while (1);
576
577                 if (diff <= 0)
578                         break;
579
580                 if (sbi->ll_dt_exp == NULL) { /* being initialized */
581                         rc = -ENODEV;
582                         break;
583                 }
584
585                 /* difficult - have to ask OSCs to drop LRU slots. */
586                 tmp = diff << 1;
587                 rc = obd_set_info_async(env, sbi->ll_dt_exp,
588                                 sizeof(KEY_CACHE_LRU_SHRINK),
589                                 KEY_CACHE_LRU_SHRINK,
590                                 sizeof(tmp), &tmp, NULL);
591                 if (rc < 0)
592                         break;
593         }
594         cl_env_put(env, &refcheck);
595
596 out:
597         if (rc >= 0) {
598                 spin_lock(&sbi->ll_lock);
599                 cache->ccc_lru_max = pages_number;
600                 spin_unlock(&sbi->ll_lock);
601                 rc = count;
602         } else {
603                 atomic_long_add(nrpages, &cache->ccc_lru_left);
604         }
605         return rc;
606 }
607
608 LDEBUGFS_SEQ_FOPS(ll_max_cached_mb);
609
610 static ssize_t checksums_show(struct kobject *kobj, struct attribute *attr,
611                               char *buf)
612 {
613         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
614                                               ll_kset.kobj);
615
616         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
617 }
618
619 static ssize_t checksums_store(struct kobject *kobj, struct attribute *attr,
620                                const char *buffer, size_t count)
621 {
622         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
623                                               ll_kset.kobj);
624         bool val;
625         int tmp;
626         int rc;
627
628         if (!sbi->ll_dt_exp)
629                 /* Not set up yet */
630                 return -EAGAIN;
631
632         rc = kstrtobool(buffer, &val);
633         if (rc)
634                 return rc;
635         if (val)
636                 sbi->ll_flags |= LL_SBI_CHECKSUM;
637         else
638                 sbi->ll_flags &= ~LL_SBI_CHECKSUM;
639         tmp = val;
640
641         rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
642                                 KEY_CHECKSUM, sizeof(tmp), &tmp, NULL);
643         if (rc)
644                 CWARN("Failed to set OSC checksum flags: %d\n", rc);
645
646         return count;
647 }
648 LUSTRE_RW_ATTR(checksums);
649
650 LUSTRE_ATTR(checksum_pages, 0644, checksums_show, checksums_store);
651
652 static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
653                               enum stats_track_type type)
654 {
655         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
656                                               ll_kset.kobj);
657
658         if (sbi->ll_stats_track_type == type)
659                 return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
660         else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
661                 return sprintf(buf, "0 (all)\n");
662
663         return sprintf(buf, "untracked\n");
664 }
665
666 static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
667                               size_t count, enum stats_track_type type)
668 {
669         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
670                                               ll_kset.kobj);
671         unsigned long pid;
672         int rc;
673
674         rc = kstrtoul(buffer, 10, &pid);
675         if (rc)
676                 return rc;
677
678         sbi->ll_stats_track_id = pid;
679         if (pid == 0)
680                 sbi->ll_stats_track_type = STATS_TRACK_ALL;
681         else
682                 sbi->ll_stats_track_type = type;
683         lprocfs_clear_stats(sbi->ll_stats);
684         return count;
685 }
686
687 static ssize_t stats_track_pid_show(struct kobject *kobj,
688                                     struct attribute *attr,
689                                     char *buf)
690 {
691         return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
692 }
693
694 static ssize_t stats_track_pid_store(struct kobject *kobj,
695                                      struct attribute *attr,
696                                      const char *buffer,
697                                      size_t count)
698 {
699         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
700 }
701 LUSTRE_RW_ATTR(stats_track_pid);
702
703 static ssize_t stats_track_ppid_show(struct kobject *kobj,
704                                      struct attribute *attr,
705                                      char *buf)
706 {
707         return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
708 }
709
710 static ssize_t stats_track_ppid_store(struct kobject *kobj,
711                                       struct attribute *attr,
712                                       const char *buffer,
713                                       size_t count)
714 {
715         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
716 }
717 LUSTRE_RW_ATTR(stats_track_ppid);
718
719 static ssize_t stats_track_gid_show(struct kobject *kobj,
720                                     struct attribute *attr,
721                                     char *buf)
722 {
723         return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
724 }
725
726 static ssize_t stats_track_gid_store(struct kobject *kobj,
727                                      struct attribute *attr,
728                                      const char *buffer,
729                                      size_t count)
730 {
731         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
732 }
733 LUSTRE_RW_ATTR(stats_track_gid);
734
735 static ssize_t statahead_running_max_show(struct kobject *kobj,
736                                           struct attribute *attr,
737                                           char *buf)
738 {
739         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
740                                               ll_kset.kobj);
741
742         return snprintf(buf, 16, "%u\n", sbi->ll_sa_running_max);
743 }
744
745 static ssize_t statahead_running_max_store(struct kobject *kobj,
746                                            struct attribute *attr,
747                                            const char *buffer,
748                                            size_t count)
749 {
750         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
751                                               ll_kset.kobj);
752         unsigned long val;
753         int rc;
754
755         rc = kstrtoul(buffer, 0, &val);
756         if (rc)
757                 return rc;
758
759         if (val <= LL_SA_RUNNING_MAX) {
760                 sbi->ll_sa_running_max = val;
761                 return count;
762         }
763
764         CERROR("Bad statahead_running_max value %lu. Valid values "
765                "are in the range [0, %d]\n", val, LL_SA_RUNNING_MAX);
766
767         return -ERANGE;
768 }
769 LUSTRE_RW_ATTR(statahead_running_max);
770
771 static ssize_t statahead_max_show(struct kobject *kobj,
772                                   struct attribute *attr,
773                                   char *buf)
774 {
775         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
776                                               ll_kset.kobj);
777
778         return sprintf(buf, "%u\n", sbi->ll_sa_max);
779 }
780
781 static ssize_t statahead_max_store(struct kobject *kobj,
782                                    struct attribute *attr,
783                                    const char *buffer,
784                                    size_t count)
785 {
786         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
787                                               ll_kset.kobj);
788         unsigned long val;
789         int rc;
790
791         rc = kstrtoul(buffer, 0, &val);
792         if (rc)
793                 return rc;
794
795         if (val <= LL_SA_RPC_MAX)
796                 sbi->ll_sa_max = val;
797         else
798                 CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
799                        val, LL_SA_RPC_MAX);
800
801         return count;
802 }
803 LUSTRE_RW_ATTR(statahead_max);
804
805 static ssize_t statahead_agl_show(struct kobject *kobj,
806                                   struct attribute *attr,
807                                   char *buf)
808 {
809         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
810                                               ll_kset.kobj);
811
812         return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
813 }
814
815 static ssize_t statahead_agl_store(struct kobject *kobj,
816                                    struct attribute *attr,
817                                    const char *buffer,
818                                    size_t count)
819 {
820         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
821                                               ll_kset.kobj);
822         bool val;
823         int rc;
824
825         rc = kstrtobool(buffer, &val);
826         if (rc)
827                 return rc;
828
829         if (val)
830                 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
831         else
832                 sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
833
834         return count;
835 }
836 LUSTRE_RW_ATTR(statahead_agl);
837
838 static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
839 {
840         struct super_block *sb = m->private;
841         struct ll_sb_info *sbi = ll_s2sbi(sb);
842
843         seq_printf(m, "statahead total: %u\n"
844                       "statahead wrong: %u\n"
845                       "agl total: %u\n",
846                    atomic_read(&sbi->ll_sa_total),
847                    atomic_read(&sbi->ll_sa_wrong),
848                    atomic_read(&sbi->ll_agl_total));
849         return 0;
850 }
851
852 LDEBUGFS_SEQ_FOPS_RO(ll_statahead_stats);
853
854 static ssize_t lazystatfs_show(struct kobject *kobj,
855                                struct attribute *attr,
856                                char *buf)
857 {
858         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
859                                               ll_kset.kobj);
860
861         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_LAZYSTATFS) ? 1 : 0);
862 }
863
864 static ssize_t lazystatfs_store(struct kobject *kobj,
865                                 struct attribute *attr,
866                                 const char *buffer,
867                                 size_t count)
868 {
869         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
870                                               ll_kset.kobj);
871         bool val;
872         int rc;
873
874         rc = kstrtobool(buffer, &val);
875         if (rc)
876                 return rc;
877
878         if (val)
879                 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
880         else
881                 sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
882
883         return count;
884 }
885 LUSTRE_RW_ATTR(lazystatfs);
886
887 static ssize_t max_easize_show(struct kobject *kobj,
888                                struct attribute *attr,
889                                char *buf)
890 {
891         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
892                                               ll_kset.kobj);
893         unsigned int ealen;
894         int rc;
895
896         rc = ll_get_max_mdsize(sbi, &ealen);
897         if (rc)
898                 return rc;
899
900         return sprintf(buf, "%u\n", ealen);
901 }
902 LUSTRE_RO_ATTR(max_easize);
903
904 /**
905  * Get default_easize.
906  *
907  * \see client_obd::cl_default_mds_easize
908  *
909  * \param[in] m         seq_file handle
910  * \param[in] v         unused for single entry
911  *
912  * \retval 0            on success
913  * \retval negative     negated errno on failure
914  */
915 static ssize_t default_easize_show(struct kobject *kobj,
916                                    struct attribute *attr,
917                                    char *buf)
918 {
919         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
920                                               ll_kset.kobj);
921         unsigned int ealen;
922         int rc;
923
924         rc = ll_get_default_mdsize(sbi, &ealen);
925         if (rc)
926                 return rc;
927
928         return sprintf(buf, "%u\n", ealen);
929 }
930
931 /**
932  * Set default_easize.
933  *
934  * Range checking on the passed value is handled by
935  * ll_set_default_mdsize().
936  *
937  * \see client_obd::cl_default_mds_easize
938  *
939  * \param[in] file      proc file
940  * \param[in] buffer    string passed from user space
941  * \param[in] count     \a buffer length
942  * \param[in] off       unused for single entry
943  *
944  * \retval positive     \a count on success
945  * \retval negative     negated errno on failure
946  */
947 static ssize_t default_easize_store(struct kobject *kobj,
948                                     struct attribute *attr,
949                                     const char *buffer,
950                                     size_t count)
951 {
952         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
953                                               ll_kset.kobj);
954         unsigned int val;
955         int rc;
956
957         if (count == 0)
958                 return 0;
959
960         rc = kstrtouint(buffer, 10, &val);
961         if (rc)
962                 return rc;
963
964         rc = ll_set_default_mdsize(sbi, val);
965         if (rc)
966                 return rc;
967
968         return count;
969 }
970 LUSTRE_RW_ATTR(default_easize);
971
972 static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
973 {
974         const char *str[] = LL_SBI_FLAGS;
975         struct super_block *sb = m->private;
976         int flags = ll_s2sbi(sb)->ll_flags;
977         int i = 0;
978
979         while (flags != 0) {
980                 if (ARRAY_SIZE(str) <= i) {
981                         CERROR("%s: Revise array LL_SBI_FLAGS to match sbi "
982                                 "flags please.\n", ll_get_fsname(sb, NULL, 0));
983                         return -EINVAL;
984                 }
985
986                 if (flags & 0x1)
987                         seq_printf(m, "%s ", str[i]);
988                 flags >>= 1;
989                 ++i;
990         }
991         seq_printf(m, "\b\n");
992         return 0;
993 }
994
995 LDEBUGFS_SEQ_FOPS_RO(ll_sbi_flags);
996
997 static ssize_t xattr_cache_show(struct kobject *kobj,
998                                 struct attribute *attr,
999                                 char *buf)
1000 {
1001         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1002                                               ll_kset.kobj);
1003
1004         return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled);
1005 }
1006
1007 static ssize_t xattr_cache_store(struct kobject *kobj,
1008                                  struct attribute *attr,
1009                                  const char *buffer,
1010                                  size_t count)
1011 {
1012         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1013                                               ll_kset.kobj);
1014         bool val;
1015         int rc;
1016
1017         rc = kstrtobool(buffer, &val);
1018         if (rc)
1019                 return rc;
1020
1021         if (val && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
1022                 return -ENOTSUPP;
1023
1024         sbi->ll_xattr_cache_enabled = val;
1025         sbi->ll_xattr_cache_set = 1;
1026
1027         return count;
1028 }
1029 LUSTRE_RW_ATTR(xattr_cache);
1030
1031 static ssize_t tiny_write_show(struct kobject *kobj,
1032                                struct attribute *attr,
1033                                char *buf)
1034 {
1035         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1036                                               ll_kset.kobj);
1037
1038         return sprintf(buf, "%u\n", !!(sbi->ll_flags & LL_SBI_TINY_WRITE));
1039 }
1040
1041 static ssize_t tiny_write_store(struct kobject *kobj,
1042                                 struct attribute *attr,
1043                                 const char *buffer,
1044                                 size_t count)
1045 {
1046         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1047                                               ll_kset.kobj);
1048         bool val;
1049         int rc;
1050
1051         rc = kstrtobool(buffer, &val);
1052         if (rc)
1053                 return rc;
1054
1055         spin_lock(&sbi->ll_lock);
1056         if (val)
1057                 sbi->ll_flags |= LL_SBI_TINY_WRITE;
1058         else
1059                 sbi->ll_flags &= ~LL_SBI_TINY_WRITE;
1060         spin_unlock(&sbi->ll_lock);
1061
1062         return count;
1063 }
1064 LUSTRE_RW_ATTR(tiny_write);
1065
1066 static ssize_t fast_read_show(struct kobject *kobj,
1067                               struct attribute *attr,
1068                               char *buf)
1069 {
1070         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1071                                               ll_kset.kobj);
1072
1073         return sprintf(buf, "%u\n", !!(sbi->ll_flags & LL_SBI_FAST_READ));
1074 }
1075
1076 static ssize_t fast_read_store(struct kobject *kobj,
1077                                struct attribute *attr,
1078                                const char *buffer,
1079                                size_t count)
1080 {
1081         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1082                                               ll_kset.kobj);
1083         bool val;
1084         int rc;
1085
1086         rc = kstrtobool(buffer, &val);
1087         if (rc)
1088                 return rc;
1089
1090         spin_lock(&sbi->ll_lock);
1091         if (val)
1092                 sbi->ll_flags |= LL_SBI_FAST_READ;
1093         else
1094                 sbi->ll_flags &= ~LL_SBI_FAST_READ;
1095         spin_unlock(&sbi->ll_lock);
1096
1097         return count;
1098 }
1099 LUSTRE_RW_ATTR(fast_read);
1100
1101 static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
1102 {
1103         struct super_block      *sb    = m->private;
1104         struct ll_sb_info       *sbi   = ll_s2sbi(sb);
1105         struct cl_client_cache  *cache = sbi->ll_cache;
1106         long pages;
1107         int mb;
1108
1109         pages = atomic_long_read(&cache->ccc_unstable_nr);
1110         mb    = (pages * PAGE_SIZE) >> 20;
1111
1112         seq_printf(m, "unstable_check:     %8d\n"
1113                       "unstable_pages: %12ld\n"
1114                       "unstable_mb:        %8d\n",
1115                    cache->ccc_unstable_check, pages, mb);
1116         return 0;
1117 }
1118
1119 static ssize_t ll_unstable_stats_seq_write(struct file *file,
1120                                            const char __user *buffer,
1121                                            size_t count, loff_t *unused)
1122 {
1123         struct seq_file *seq = file->private_data;
1124         struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)seq->private);
1125         char kernbuf[128];
1126         bool val;
1127         int rc;
1128
1129         if (count == 0)
1130                 return 0;
1131         if (count >= sizeof(kernbuf))
1132                 return -EINVAL;
1133
1134         if (copy_from_user(kernbuf, buffer, count))
1135                 return -EFAULT;
1136         kernbuf[count] = 0;
1137
1138         buffer += lprocfs_find_named_value(kernbuf, "unstable_check:", &count) -
1139                   kernbuf;
1140         rc = kstrtobool_from_user(buffer, count, &val);
1141         if (rc < 0)
1142                 return rc;
1143
1144         /* borrow lru lock to set the value */
1145         spin_lock(&sbi->ll_cache->ccc_lru_lock);
1146         sbi->ll_cache->ccc_unstable_check = val;
1147         spin_unlock(&sbi->ll_cache->ccc_lru_lock);
1148
1149         return count;
1150 }
1151
1152 LDEBUGFS_SEQ_FOPS(ll_unstable_stats);
1153
1154 static int ll_root_squash_seq_show(struct seq_file *m, void *v)
1155 {
1156         struct super_block *sb = m->private;
1157         struct ll_sb_info *sbi = ll_s2sbi(sb);
1158         struct root_squash_info *squash = &sbi->ll_squash;
1159
1160         seq_printf(m, "%u:%u\n", squash->rsi_uid, squash->rsi_gid);
1161         return 0;
1162 }
1163
1164 static ssize_t ll_root_squash_seq_write(struct file *file,
1165                                         const char __user *buffer,
1166                                         size_t count, loff_t *off)
1167 {
1168         struct seq_file *m = file->private_data;
1169         struct super_block *sb = m->private;
1170         struct ll_sb_info *sbi = ll_s2sbi(sb);
1171         struct root_squash_info *squash = &sbi->ll_squash;
1172
1173         return lprocfs_wr_root_squash(buffer, count, squash,
1174                                       ll_get_fsname(sb, NULL, 0));
1175 }
1176
1177 LDEBUGFS_SEQ_FOPS(ll_root_squash);
1178
1179 static int ll_nosquash_nids_seq_show(struct seq_file *m, void *v)
1180 {
1181         struct super_block *sb = m->private;
1182         struct ll_sb_info *sbi = ll_s2sbi(sb);
1183         struct root_squash_info *squash = &sbi->ll_squash;
1184         int len;
1185
1186         down_read(&squash->rsi_sem);
1187         if (!list_empty(&squash->rsi_nosquash_nids)) {
1188                 len = cfs_print_nidlist(m->buf + m->count, m->size - m->count,
1189                                         &squash->rsi_nosquash_nids);
1190                 m->count += len;
1191                 seq_putc(m, '\n');
1192         } else {
1193                 seq_puts(m, "NONE\n");
1194         }
1195         up_read(&squash->rsi_sem);
1196
1197         return 0;
1198 }
1199
1200 static ssize_t ll_nosquash_nids_seq_write(struct file *file,
1201                                           const char __user *buffer,
1202                                           size_t count, loff_t *off)
1203 {
1204         struct seq_file *m = file->private_data;
1205         struct super_block *sb = m->private;
1206         struct ll_sb_info *sbi = ll_s2sbi(sb);
1207         struct root_squash_info *squash = &sbi->ll_squash;
1208         int rc;
1209
1210         rc = lprocfs_wr_nosquash_nids(buffer, count, squash,
1211                                       ll_get_fsname(sb, NULL, 0));
1212         if (rc < 0)
1213                 return rc;
1214
1215         ll_compute_rootsquash_state(sbi);
1216
1217         return rc;
1218 }
1219
1220 LDEBUGFS_SEQ_FOPS(ll_nosquash_nids);
1221
1222 struct lprocfs_vars lprocfs_llite_obd_vars[] = {
1223         { .name =       "site",
1224           .fops =       &ll_site_stats_fops                     },
1225         { .name =       "max_read_ahead_mb",
1226           .fops =       &ll_max_readahead_mb_fops               },
1227         { .name =       "max_read_ahead_per_file_mb",
1228           .fops =       &ll_max_readahead_per_file_mb_fops      },
1229         { .name =       "max_read_ahead_whole_mb",
1230           .fops =       &ll_max_read_ahead_whole_mb_fops        },
1231         { .name =       "max_cached_mb",
1232           .fops =       &ll_max_cached_mb_fops                  },
1233         { .name =       "statahead_stats",
1234           .fops =       &ll_statahead_stats_fops                },
1235         { .name =       "unstable_stats",
1236           .fops =       &ll_unstable_stats_fops                 },
1237         { .name =       "sbi_flags",
1238           .fops =       &ll_sbi_flags_fops                      },
1239         { .name =       "root_squash",
1240           .fops =       &ll_root_squash_fops                    },
1241         { .name =       "nosquash_nids",
1242           .fops =       &ll_nosquash_nids_fops                  },
1243         { NULL }
1244 };
1245
1246 #define MAX_STRING_SIZE 128
1247
1248 static struct attribute *llite_attrs[] = {
1249         &lustre_attr_blocksize.attr,
1250         &lustre_attr_stat_blocksize.attr,
1251         &lustre_attr_kbytestotal.attr,
1252         &lustre_attr_kbytesfree.attr,
1253         &lustre_attr_kbytesavail.attr,
1254         &lustre_attr_filestotal.attr,
1255         &lustre_attr_filesfree.attr,
1256         &lustre_attr_client_type.attr,
1257         &lustre_attr_fstype.attr,
1258         &lustre_attr_uuid.attr,
1259         &lustre_attr_checksums.attr,
1260         &lustre_attr_checksum_pages.attr,
1261         &lustre_attr_stats_track_pid.attr,
1262         &lustre_attr_stats_track_ppid.attr,
1263         &lustre_attr_stats_track_gid.attr,
1264         &lustre_attr_statahead_running_max.attr,
1265         &lustre_attr_statahead_max.attr,
1266         &lustre_attr_statahead_agl.attr,
1267         &lustre_attr_lazystatfs.attr,
1268         &lustre_attr_max_easize.attr,
1269         &lustre_attr_default_easize.attr,
1270         &lustre_attr_xattr_cache.attr,
1271         &lustre_attr_fast_read.attr,
1272         &lustre_attr_tiny_write.attr,
1273         NULL,
1274 };
1275
1276 static void sbi_kobj_release(struct kobject *kobj)
1277 {
1278         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1279                                               ll_kset.kobj);
1280         complete(&sbi->ll_kobj_unregister);
1281 }
1282
1283 static struct kobj_type sbi_ktype = {
1284         .default_attrs  = llite_attrs,
1285         .sysfs_ops      = &lustre_sysfs_ops,
1286         .release        = sbi_kobj_release,
1287 };
1288
1289 static const struct llite_file_opcode {
1290         __u32       opcode;
1291         __u32       type;
1292         const char *opname;
1293 } llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
1294         /* file operation */
1295         { LPROC_LL_DIRTY_HITS,     LPROCFS_TYPE_REGS, "dirty_pages_hits" },
1296         { LPROC_LL_DIRTY_MISSES,   LPROCFS_TYPE_REGS, "dirty_pages_misses" },
1297         { LPROC_LL_READ_BYTES,     LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
1298                                    "read_bytes" },
1299         { LPROC_LL_WRITE_BYTES,    LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
1300                                    "write_bytes" },
1301         { LPROC_LL_BRW_READ,       LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
1302                                    "brw_read" },
1303         { LPROC_LL_BRW_WRITE,      LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
1304                                    "brw_write" },
1305         { LPROC_LL_IOCTL,          LPROCFS_TYPE_REGS, "ioctl" },
1306         { LPROC_LL_OPEN,           LPROCFS_TYPE_REGS, "open" },
1307         { LPROC_LL_RELEASE,        LPROCFS_TYPE_REGS, "close" },
1308         { LPROC_LL_MAP,            LPROCFS_TYPE_REGS, "mmap" },
1309         { LPROC_LL_FAULT,          LPROCFS_TYPE_REGS, "page_fault" },
1310         { LPROC_LL_MKWRITE,        LPROCFS_TYPE_REGS, "page_mkwrite" },
1311         { LPROC_LL_LLSEEK,         LPROCFS_TYPE_REGS, "seek" },
1312         { LPROC_LL_FSYNC,          LPROCFS_TYPE_REGS, "fsync" },
1313         { LPROC_LL_READDIR,        LPROCFS_TYPE_REGS, "readdir" },
1314         /* inode operation */
1315         { LPROC_LL_SETATTR,        LPROCFS_TYPE_REGS, "setattr" },
1316         { LPROC_LL_TRUNC,          LPROCFS_TYPE_REGS, "truncate" },
1317         { LPROC_LL_FLOCK,          LPROCFS_TYPE_REGS, "flock" },
1318         { LPROC_LL_GETATTR,        LPROCFS_TYPE_REGS, "getattr" },
1319         /* dir inode operation */
1320         { LPROC_LL_CREATE,         LPROCFS_TYPE_REGS, "create" },
1321         { LPROC_LL_LINK,           LPROCFS_TYPE_REGS, "link" },
1322         { LPROC_LL_UNLINK,         LPROCFS_TYPE_REGS, "unlink" },
1323         { LPROC_LL_SYMLINK,        LPROCFS_TYPE_REGS, "symlink" },
1324         { LPROC_LL_MKDIR,          LPROCFS_TYPE_REGS, "mkdir" },
1325         { LPROC_LL_RMDIR,          LPROCFS_TYPE_REGS, "rmdir" },
1326         { LPROC_LL_MKNOD,          LPROCFS_TYPE_REGS, "mknod" },
1327         { LPROC_LL_RENAME,         LPROCFS_TYPE_REGS, "rename" },
1328         /* special inode operation */
1329         { LPROC_LL_STAFS,          LPROCFS_TYPE_REGS, "statfs" },
1330         { LPROC_LL_ALLOC_INODE,    LPROCFS_TYPE_REGS, "alloc_inode" },
1331         { LPROC_LL_SETXATTR,       LPROCFS_TYPE_REGS, "setxattr" },
1332         { LPROC_LL_GETXATTR,       LPROCFS_TYPE_REGS, "getxattr" },
1333         { LPROC_LL_GETXATTR_HITS,  LPROCFS_TYPE_REGS, "getxattr_hits" },
1334         { LPROC_LL_LISTXATTR,      LPROCFS_TYPE_REGS, "listxattr" },
1335         { LPROC_LL_REMOVEXATTR,    LPROCFS_TYPE_REGS, "removexattr" },
1336         { LPROC_LL_INODE_PERM,     LPROCFS_TYPE_REGS, "inode_permission" },
1337 };
1338
1339 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count)
1340 {
1341         if (!sbi->ll_stats)
1342                 return;
1343         if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
1344                 lprocfs_counter_add(sbi->ll_stats, op, count);
1345         else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
1346                  sbi->ll_stats_track_id == current->pid)
1347                 lprocfs_counter_add(sbi->ll_stats, op, count);
1348         else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
1349                  sbi->ll_stats_track_id == current->parent->pid)
1350                 lprocfs_counter_add(sbi->ll_stats, op, count);
1351         else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
1352                  sbi->ll_stats_track_id ==
1353                         from_kgid(&init_user_ns, current_gid()))
1354                 lprocfs_counter_add(sbi->ll_stats, op, count);
1355 }
1356 EXPORT_SYMBOL(ll_stats_ops_tally);
1357
1358 static const char *ra_stat_string[] = {
1359         [RA_STAT_HIT] = "hits",
1360         [RA_STAT_MISS] = "misses",
1361         [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
1362         [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
1363         [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
1364         [RA_STAT_FAILED_MATCH] = "failed lock match",
1365         [RA_STAT_DISCARDED] = "read but discarded",
1366         [RA_STAT_ZERO_LEN] = "zero length file",
1367         [RA_STAT_ZERO_WINDOW] = "zero size window",
1368         [RA_STAT_EOF] = "read-ahead to EOF",
1369         [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
1370         [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
1371         [RA_STAT_FAILED_REACH_END] = "failed to reach end"
1372 };
1373
1374 int ll_debugfs_register_super(struct super_block *sb, const char *name)
1375 {
1376         struct lustre_sb_info *lsi = s2lsi(sb);
1377         struct ll_sb_info *sbi = ll_s2sbi(sb);
1378         int err, id, rc;
1379
1380         ENTRY;
1381         LASSERT(sbi);
1382
1383         if (IS_ERR_OR_NULL(llite_root))
1384                 goto out_ll_kset;
1385
1386         sbi->ll_debugfs_entry = ldebugfs_register(name, llite_root,
1387                                                   lprocfs_llite_obd_vars, sb);
1388         if (IS_ERR_OR_NULL(sbi->ll_debugfs_entry)) {
1389                 err = sbi->ll_debugfs_entry ? PTR_ERR(sbi->ll_debugfs_entry) :
1390                                               -ENOMEM;
1391                 sbi->ll_debugfs_entry = NULL;
1392                 RETURN(err);
1393         }
1394
1395         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "dump_page_cache",0444,
1396                                  &vvp_dump_pgcache_file_ops, sbi);
1397         if (rc)
1398                 CWARN("Error adding the dump_page_cache file\n");
1399
1400         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "extents_stats", 0644,
1401                                  &ll_rw_extents_stats_fops, sbi);
1402         if (rc)
1403                 CWARN("Error adding the extent_stats file\n");
1404
1405         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry,
1406                                  "extents_stats_per_process", 0644,
1407                                  &ll_rw_extents_stats_pp_fops, sbi);
1408         if (rc)
1409                 CWARN("Error adding the extents_stats_per_process file\n");
1410
1411         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "offset_stats", 0644,
1412                                  &ll_rw_offset_stats_fops, sbi);
1413         if (rc)
1414                 CWARN("Error adding the offset_stats file\n");
1415
1416         /* File operations stats */
1417         sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
1418                                             LPROCFS_STATS_FLAG_NONE);
1419         if (sbi->ll_stats == NULL)
1420                 GOTO(out_debugfs, err = -ENOMEM);
1421
1422         /* do counter init */
1423         for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
1424                 u32 type = llite_opcode_table[id].type;
1425                 void *ptr = NULL;
1426
1427                 if (type & LPROCFS_TYPE_REGS)
1428                         ptr = "regs";
1429                 else if (type & LPROCFS_TYPE_BYTES)
1430                         ptr = "bytes";
1431                 else if (type & LPROCFS_TYPE_PAGES)
1432                         ptr = "pages";
1433                 lprocfs_counter_init(sbi->ll_stats,
1434                                      llite_opcode_table[id].opcode,
1435                                      (type & LPROCFS_CNTR_AVGMINMAX),
1436                                      llite_opcode_table[id].opname, ptr);
1437         }
1438
1439         err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats",
1440                                       sbi->ll_stats);
1441         if (err)
1442                 GOTO(out_stats, err);
1443
1444         sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
1445                                                LPROCFS_STATS_FLAG_NONE);
1446         if (sbi->ll_ra_stats == NULL)
1447                 GOTO(out_stats, err = -ENOMEM);
1448
1449         for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
1450                 lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
1451                                      ra_stat_string[id], "pages");
1452
1453         err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats",
1454                                       sbi->ll_ra_stats);
1455         if (err)
1456                 GOTO(out_ra_stats, err);
1457
1458 out_ll_kset:
1459         /* Yes we also register sysfs mount kset here as well */
1460         sbi->ll_kset.kobj.parent = llite_kobj;
1461         sbi->ll_kset.kobj.ktype = &sbi_ktype;
1462         init_completion(&sbi->ll_kobj_unregister);
1463         err = kobject_set_name(&sbi->ll_kset.kobj, "%s", name);
1464         if (err)
1465                 GOTO(out_ra_stats, err);
1466
1467         err = kset_register(&sbi->ll_kset);
1468         if (err)
1469                 GOTO(out_ra_stats, err);
1470
1471         lsi->lsi_kobj = kobject_get(&sbi->ll_kset.kobj);
1472
1473         RETURN(0);
1474 out_ra_stats:
1475         lprocfs_free_stats(&sbi->ll_ra_stats);
1476 out_stats:
1477         lprocfs_free_stats(&sbi->ll_stats);
1478 out_debugfs:
1479         ldebugfs_remove(&sbi->ll_debugfs_entry);
1480
1481         RETURN(err);
1482 }
1483
1484 void ll_debugfs_unregister_super(struct super_block *sb)
1485 {
1486         struct lustre_sb_info *lsi = s2lsi(sb);
1487         struct ll_sb_info *sbi = ll_s2sbi(sb);
1488
1489         if (!IS_ERR_OR_NULL(sbi->ll_debugfs_entry))
1490                 ldebugfs_remove(&sbi->ll_debugfs_entry);
1491
1492         if (sbi->ll_dt_obd)
1493                 sysfs_remove_link(&sbi->ll_kset.kobj,
1494                                   sbi->ll_dt_obd->obd_type->typ_name);
1495
1496         if (sbi->ll_md_obd)
1497                 sysfs_remove_link(&sbi->ll_kset.kobj,
1498                                   sbi->ll_md_obd->obd_type->typ_name);
1499
1500         kobject_put(lsi->lsi_kobj);
1501
1502         kset_unregister(&sbi->ll_kset);
1503         wait_for_completion(&sbi->ll_kobj_unregister);
1504
1505         lprocfs_free_stats(&sbi->ll_ra_stats);
1506         lprocfs_free_stats(&sbi->ll_stats);
1507 }
1508 #undef MAX_STRING_SIZE
1509
1510 static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
1511                                    struct seq_file *seq, int which)
1512 {
1513         unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
1514         unsigned long start, end, r, w;
1515         char *unitp = "KMGTPEZY";
1516         int i, units = 10;
1517         struct per_process_info *pp_info = &io_extents->pp_extents[which];
1518
1519         read_cum = 0;
1520         write_cum = 0;
1521         start = 0;
1522
1523         for(i = 0; i < LL_HIST_MAX; i++) {
1524                 read_tot += pp_info->pp_r_hist.oh_buckets[i];
1525                 write_tot += pp_info->pp_w_hist.oh_buckets[i];
1526         }
1527
1528         for(i = 0; i < LL_HIST_MAX; i++) {
1529                 r = pp_info->pp_r_hist.oh_buckets[i];
1530                 w = pp_info->pp_w_hist.oh_buckets[i];
1531                 read_cum += r;
1532                 write_cum += w;
1533                 end = BIT(i + LL_HIST_START - units);
1534                 seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4u %4u  | "
1535                            "%14lu %4u %4u\n", start, *unitp, end, *unitp,
1536                            (i == LL_HIST_MAX - 1) ? '+' : ' ',
1537                            r, pct(r, read_tot), pct(read_cum, read_tot),
1538                            w, pct(w, write_tot), pct(write_cum, write_tot));
1539                 start = end;
1540                 if (start == BIT(10)) {
1541                         start = 1;
1542                         units += 10;
1543                         unitp++;
1544                 }
1545                 if (read_cum == read_tot && write_cum == write_tot)
1546                         break;
1547         }
1548 }
1549
1550 static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
1551 {
1552         struct timespec64 now;
1553         struct ll_sb_info *sbi = seq->private;
1554         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1555         int k;
1556
1557         ktime_get_real_ts64(&now);
1558
1559         if (!sbi->ll_rw_stats_on) {
1560                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1561                 return 0;
1562         }
1563         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1564                    (s64)now.tv_sec, now.tv_nsec);
1565         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1566         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1567                    "extents", "calls", "%", "cum%",
1568                    "calls", "%", "cum%");
1569         spin_lock(&sbi->ll_pp_extent_lock);
1570         for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
1571                 if (io_extents->pp_extents[k].pid != 0) {
1572                         seq_printf(seq, "\nPID: %d\n",
1573                                    io_extents->pp_extents[k].pid);
1574                         ll_display_extents_info(io_extents, seq, k);
1575                 }
1576         }
1577         spin_unlock(&sbi->ll_pp_extent_lock);
1578         return 0;
1579 }
1580
1581 static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
1582                                                 const char __user *buf,
1583                                                 size_t len,
1584                                                 loff_t *off)
1585 {
1586         struct seq_file *seq = file->private_data;
1587         struct ll_sb_info *sbi = seq->private;
1588         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1589         int i;
1590         __s64 value;
1591
1592         if (len == 0)
1593                 return -EINVAL;
1594
1595         value = ll_stats_pid_write(buf, len);
1596
1597         if (value == 0)
1598                 sbi->ll_rw_stats_on = 0;
1599         else
1600                 sbi->ll_rw_stats_on = 1;
1601
1602         spin_lock(&sbi->ll_pp_extent_lock);
1603         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1604                 io_extents->pp_extents[i].pid = 0;
1605                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1606                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1607         }
1608         spin_unlock(&sbi->ll_pp_extent_lock);
1609         return len;
1610 }
1611
1612 LDEBUGFS_SEQ_FOPS(ll_rw_extents_stats_pp);
1613
1614 static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
1615 {
1616         struct timespec64 now;
1617         struct ll_sb_info *sbi = seq->private;
1618         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1619
1620         ktime_get_real_ts64(&now);
1621
1622         if (!sbi->ll_rw_stats_on) {
1623                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1624                 return 0;
1625         }
1626         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1627                    (s64)now.tv_sec, now.tv_nsec);
1628
1629         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1630         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1631                    "extents", "calls", "%", "cum%",
1632                    "calls", "%", "cum%");
1633         spin_lock(&sbi->ll_lock);
1634         ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
1635         spin_unlock(&sbi->ll_lock);
1636
1637         return 0;
1638 }
1639
1640 static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
1641                                              const char __user *buf,
1642                                              size_t len, loff_t *off)
1643 {
1644         struct seq_file *seq = file->private_data;
1645         struct ll_sb_info *sbi = seq->private;
1646         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1647         int i;
1648         __s64 value;
1649
1650         if (len == 0)
1651                 return -EINVAL;
1652
1653         value = ll_stats_pid_write(buf, len);
1654
1655         if (value == 0)
1656                 sbi->ll_rw_stats_on = 0;
1657         else
1658                 sbi->ll_rw_stats_on = 1;
1659
1660         spin_lock(&sbi->ll_pp_extent_lock);
1661         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
1662                 io_extents->pp_extents[i].pid = 0;
1663                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1664                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1665         }
1666         spin_unlock(&sbi->ll_pp_extent_lock);
1667
1668         return len;
1669 }
1670
1671 LDEBUGFS_SEQ_FOPS(ll_rw_extents_stats);
1672
1673 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1674                        struct ll_file_data *file, loff_t pos,
1675                        size_t count, int rw)
1676 {
1677         int i, cur = -1;
1678         struct ll_rw_process_info *process;
1679         struct ll_rw_process_info *offset;
1680         int *off_count = &sbi->ll_rw_offset_entry_count;
1681         int *process_count = &sbi->ll_offset_process_count;
1682         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1683
1684         if(!sbi->ll_rw_stats_on)
1685                 return;
1686         process = sbi->ll_rw_process_info;
1687         offset = sbi->ll_rw_offset_info;
1688
1689         spin_lock(&sbi->ll_pp_extent_lock);
1690         /* Extent statistics */
1691         for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1692                 if(io_extents->pp_extents[i].pid == pid) {
1693                         cur = i;
1694                         break;
1695                 }
1696         }
1697
1698         if (cur == -1) {
1699                 /* new process */
1700                 sbi->ll_extent_process_count =
1701                         (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
1702                 cur = sbi->ll_extent_process_count;
1703                 io_extents->pp_extents[cur].pid = pid;
1704                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
1705                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
1706         }
1707
1708         for(i = 0; (count >= BIT(LL_HIST_START << i)) &&
1709              (i < (LL_HIST_MAX - 1)); i++);
1710         if (rw == 0) {
1711                 io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
1712                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
1713         } else {
1714                 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
1715                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
1716         }
1717         spin_unlock(&sbi->ll_pp_extent_lock);
1718
1719         spin_lock(&sbi->ll_process_lock);
1720         /* Offset statistics */
1721         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1722                 if (process[i].rw_pid == pid) {
1723                         if (process[i].rw_last_file != file) {
1724                                 process[i].rw_range_start = pos;
1725                                 process[i].rw_last_file_pos = pos + count;
1726                                 process[i].rw_smallest_extent = count;
1727                                 process[i].rw_largest_extent = count;
1728                                 process[i].rw_offset = 0;
1729                                 process[i].rw_last_file = file;
1730                                 spin_unlock(&sbi->ll_process_lock);
1731                                 return;
1732                         }
1733                         if (process[i].rw_last_file_pos != pos) {
1734                                 *off_count =
1735                                     (*off_count + 1) % LL_OFFSET_HIST_MAX;
1736                                 offset[*off_count].rw_op = process[i].rw_op;
1737                                 offset[*off_count].rw_pid = pid;
1738                                 offset[*off_count].rw_range_start =
1739                                         process[i].rw_range_start;
1740                                 offset[*off_count].rw_range_end =
1741                                         process[i].rw_last_file_pos;
1742                                 offset[*off_count].rw_smallest_extent =
1743                                         process[i].rw_smallest_extent;
1744                                 offset[*off_count].rw_largest_extent =
1745                                         process[i].rw_largest_extent;
1746                                 offset[*off_count].rw_offset =
1747                                         process[i].rw_offset;
1748                                 process[i].rw_op = rw;
1749                                 process[i].rw_range_start = pos;
1750                                 process[i].rw_smallest_extent = count;
1751                                 process[i].rw_largest_extent = count;
1752                                 process[i].rw_offset = pos -
1753                                         process[i].rw_last_file_pos;
1754                         }
1755                         if(process[i].rw_smallest_extent > count)
1756                                 process[i].rw_smallest_extent = count;
1757                         if(process[i].rw_largest_extent < count)
1758                                 process[i].rw_largest_extent = count;
1759                         process[i].rw_last_file_pos = pos + count;
1760                         spin_unlock(&sbi->ll_process_lock);
1761                         return;
1762                 }
1763         }
1764         *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
1765         process[*process_count].rw_pid = pid;
1766         process[*process_count].rw_op = rw;
1767         process[*process_count].rw_range_start = pos;
1768         process[*process_count].rw_last_file_pos = pos + count;
1769         process[*process_count].rw_smallest_extent = count;
1770         process[*process_count].rw_largest_extent = count;
1771         process[*process_count].rw_offset = 0;
1772         process[*process_count].rw_last_file = file;
1773         spin_unlock(&sbi->ll_process_lock);
1774 }
1775
1776 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
1777 {
1778         struct timespec64 now;
1779         struct ll_sb_info *sbi = seq->private;
1780         struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
1781         struct ll_rw_process_info *process = sbi->ll_rw_process_info;
1782         int i;
1783
1784         ktime_get_real_ts64(&now);
1785
1786         if (!sbi->ll_rw_stats_on) {
1787                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1788                 return 0;
1789         }
1790         spin_lock(&sbi->ll_process_lock);
1791
1792         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1793                    (s64)now.tv_sec, now.tv_nsec);
1794         seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
1795                    "R/W", "PID", "RANGE START", "RANGE END",
1796                    "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
1797
1798         /* We stored the discontiguous offsets here; print them first */
1799         for (i = 0; i < LL_OFFSET_HIST_MAX; i++) {
1800                 if (offset[i].rw_pid != 0)
1801                         seq_printf(seq,
1802                                   "%3c %10d %14llu %14llu %17lu %17lu %14llu\n",
1803                                    offset[i].rw_op == READ ? 'R' : 'W',
1804                                    offset[i].rw_pid,
1805                                    offset[i].rw_range_start,
1806                                    offset[i].rw_range_end,
1807                                    (unsigned long)offset[i].rw_smallest_extent,
1808                                    (unsigned long)offset[i].rw_largest_extent,
1809                                    offset[i].rw_offset);
1810         }
1811
1812         /* Then print the current offsets for each process */
1813         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1814                 if (process[i].rw_pid != 0)
1815                         seq_printf(seq,
1816                                   "%3c %10d %14llu %14llu %17lu %17lu %14llu\n",
1817                                    process[i].rw_op == READ ? 'R' : 'W',
1818                                    process[i].rw_pid,
1819                                    process[i].rw_range_start,
1820                                    process[i].rw_last_file_pos,
1821                                    (unsigned long)process[i].rw_smallest_extent,
1822                                    (unsigned long)process[i].rw_largest_extent,
1823                                    process[i].rw_offset);
1824         }
1825         spin_unlock(&sbi->ll_process_lock);
1826
1827         return 0;
1828 }
1829
1830 static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
1831                                             const char __user *buf,
1832                                             size_t len, loff_t *off)
1833 {
1834         struct seq_file *seq = file->private_data;
1835         struct ll_sb_info *sbi = seq->private;
1836         struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
1837         struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
1838         __s64 value;
1839
1840         if (len == 0)
1841                 return -EINVAL;
1842
1843         value = ll_stats_pid_write(buf, len);
1844
1845         if (value == 0)
1846                 sbi->ll_rw_stats_on = 0;
1847         else
1848                 sbi->ll_rw_stats_on = 1;
1849
1850         spin_lock(&sbi->ll_process_lock);
1851         sbi->ll_offset_process_count = 0;
1852         sbi->ll_rw_offset_entry_count = 0;
1853         memset(process_info, 0, sizeof(struct ll_rw_process_info) *
1854                LL_PROCESS_HIST_MAX);
1855         memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
1856                LL_OFFSET_HIST_MAX);
1857         spin_unlock(&sbi->ll_process_lock);
1858
1859         return len;
1860 }
1861
1862 LDEBUGFS_SEQ_FOPS(ll_rw_offset_stats);