Whamcloud - gitweb
LU-8066 llite: move remaining single item proc files to sysfs
[fs/lustre-release.git] / lustre / llite / lproc_llite.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32 #define DEBUG_SUBSYSTEM S_LLITE
33
34 #include <linux/version.h>
35 #include <linux/user_namespace.h>
36 #ifdef HAVE_UIDGID_HEADER
37 # include <linux/uidgid.h>
38 #endif
39 #include <uapi/linux/lustre/lustre_param.h>
40 #include <lprocfs_status.h>
41 #include <obd_support.h>
42
43 #include "llite_internal.h"
44 #include "vvp_internal.h"
45
46 struct proc_dir_entry *proc_lustre_fs_root;
47 static struct kobject *llite_kobj;
48
49 int llite_tunables_register(void)
50 {
51         int rc = 0;
52
53         proc_lustre_fs_root = lprocfs_register("llite", proc_lustre_root,
54                                                NULL, NULL);
55         if (IS_ERR(proc_lustre_fs_root)) {
56                 rc = PTR_ERR(proc_lustre_fs_root);
57                 CERROR("cannot register '/proc/fs/lustre/llite': rc = %d\n",
58                        rc);
59                 proc_lustre_fs_root = NULL;
60                 return rc;
61         }
62
63         llite_kobj = class_setup_tunables("llite");
64         if (IS_ERR(llite_kobj)) {
65                 rc = PTR_ERR(llite_kobj);
66                 llite_kobj = NULL;
67         }
68
69         return rc;
70 }
71
72 void llite_tunables_unregister(void)
73 {
74         if (llite_kobj)
75                 kobject_put(llite_kobj);
76
77         lprocfs_remove(&proc_lustre_fs_root);
78 }
79
80 #ifdef CONFIG_PROC_FS
81 /* /proc/lustre/llite mount point registration */
82 static const struct file_operations ll_rw_extents_stats_fops;
83 static const struct file_operations ll_rw_extents_stats_pp_fops;
84 static const struct file_operations ll_rw_offset_stats_fops;
85 static __s64 ll_stats_pid_write(const char __user *buf, size_t len);
86
87 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
88                               char *buf)
89 {
90         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
91                                               ll_kset.kobj);
92         struct obd_statfs osfs;
93         int rc;
94
95         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
96         if (rc)
97                 return rc;
98
99         return sprintf(buf, "%u\n", osfs.os_bsize);
100 }
101 LUSTRE_RO_ATTR(blocksize);
102
103 static ssize_t stat_blocksize_show(struct kobject *kobj, struct attribute *attr,
104                                    char *buf)
105 {
106         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
107                                               ll_kset.kobj);
108
109         return sprintf(buf, "%u\n", sbi->ll_stat_blksize);
110 }
111
112 static ssize_t stat_blocksize_store(struct kobject *kobj,
113                                     struct attribute *attr,
114                                     const char *buffer,
115                                     size_t count)
116 {
117         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
118                                               ll_kset.kobj);
119         unsigned int val;
120         int rc;
121
122         rc = kstrtouint(buffer, 10, &val);
123         if (rc)
124                 return rc;
125
126         if (val != 0 && (val < PAGE_SIZE || (val & (val - 1))) != 0)
127                 return -ERANGE;
128
129         sbi->ll_stat_blksize = val;
130
131         return count;
132 }
133 LUSTRE_RW_ATTR(stat_blocksize);
134
135 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
136                                 char *buf)
137 {
138         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
139                                               ll_kset.kobj);
140         struct obd_statfs osfs;
141         u32 blk_size;
142         u64 result;
143         int rc;
144
145         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
146         if (rc)
147                 return rc;
148
149         blk_size = osfs.os_bsize >> 10;
150         result = osfs.os_blocks;
151
152         while (blk_size >>= 1)
153                 result <<= 1;
154
155         return sprintf(buf, "%llu\n", result);
156 }
157 LUSTRE_RO_ATTR(kbytestotal);
158
159 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
160                                char *buf)
161 {
162         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
163                                               ll_kset.kobj);
164         struct obd_statfs osfs;
165         u32 blk_size;
166         u64 result;
167         int rc;
168
169         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
170         if (rc)
171                 return rc;
172
173         blk_size = osfs.os_bsize >> 10;
174         result = osfs.os_bfree;
175
176         while (blk_size >>= 1)
177                 result <<= 1;
178
179         return sprintf(buf, "%llu\n", result);
180 }
181 LUSTRE_RO_ATTR(kbytesfree);
182
183 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
184                                 char *buf)
185 {
186         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
187                                               ll_kset.kobj);
188         struct obd_statfs osfs;
189         u32 blk_size;
190         u64 result;
191         int rc;
192
193         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
194         if (rc)
195                 return rc;
196
197         blk_size = osfs.os_bsize >> 10;
198         result = osfs.os_bavail;
199
200         while (blk_size >>= 1)
201                 result <<= 1;
202
203         return sprintf(buf, "%llu\n", result);
204 }
205 LUSTRE_RO_ATTR(kbytesavail);
206
207 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
208                                char *buf)
209 {
210         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
211                                               ll_kset.kobj);
212         struct obd_statfs osfs;
213         int rc;
214
215         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
216         if (rc)
217                 return rc;
218
219         return sprintf(buf, "%llu\n", osfs.os_files);
220 }
221 LUSTRE_RO_ATTR(filestotal);
222
223 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
224                               char *buf)
225 {
226         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
227                                               ll_kset.kobj);
228         struct obd_statfs osfs;
229         int rc;
230
231         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
232         if (rc)
233                 return rc;
234
235         return sprintf(buf, "%llu\n", osfs.os_ffree);
236 }
237 LUSTRE_RO_ATTR(filesfree);
238
239 static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
240                                 char *buf)
241 {
242         return sprintf(buf, "local client\n");
243 }
244 LUSTRE_RO_ATTR(client_type);
245
246 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
247                            char *buf)
248 {
249         return sprintf(buf, "lustre\n");
250 }
251 LUSTRE_RO_ATTR(fstype);
252
253 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
254                          char *buf)
255 {
256         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
257                                               ll_kset.kobj);
258
259         return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
260 }
261 LUSTRE_RO_ATTR(uuid);
262
263 static int ll_site_stats_seq_show(struct seq_file *m, void *v)
264 {
265         struct super_block *sb = m->private;
266
267         /*
268          * See description of statistical counters in struct cl_site, and
269          * struct lu_site.
270          */
271         return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
272 }
273 LPROC_SEQ_FOPS_RO(ll_site_stats);
274
275 static int ll_max_readahead_mb_seq_show(struct seq_file *m, void *v)
276 {
277         struct super_block *sb = m->private;
278         struct ll_sb_info *sbi = ll_s2sbi(sb);
279         long pages_number;
280         int mult;
281
282         spin_lock(&sbi->ll_lock);
283         pages_number = sbi->ll_ra_info.ra_max_pages;
284         spin_unlock(&sbi->ll_lock);
285
286         mult = 1 << (20 - PAGE_SHIFT);
287         return lprocfs_seq_read_frac_helper(m, pages_number, mult);
288 }
289
290 static ssize_t
291 ll_max_readahead_mb_seq_write(struct file *file, const char __user *buffer,
292                               size_t count, loff_t *off)
293 {
294         struct seq_file *m = file->private_data;
295         struct super_block *sb = m->private;
296         struct ll_sb_info *sbi = ll_s2sbi(sb);
297         __s64 pages_number;
298         int rc;
299
300         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
301         if (rc)
302                 return rc;
303
304         pages_number >>= PAGE_SHIFT;
305
306         if (pages_number < 0 || pages_number > totalram_pages / 2) {
307                 /* 1/2 of RAM */
308                 CERROR("%s: can't set max_readahead_mb=%lu > %luMB\n",
309                        ll_get_fsname(sb, NULL, 0),
310                        (unsigned long)pages_number >> (20 - PAGE_SHIFT),
311                        totalram_pages >> (20 - PAGE_SHIFT + 1));
312                 return -ERANGE;
313         }
314
315         spin_lock(&sbi->ll_lock);
316         sbi->ll_ra_info.ra_max_pages = pages_number;
317         spin_unlock(&sbi->ll_lock);
318         return count;
319 }
320 LPROC_SEQ_FOPS(ll_max_readahead_mb);
321
322 static int ll_max_readahead_per_file_mb_seq_show(struct seq_file *m, void *v)
323 {
324         struct super_block *sb = m->private;
325         struct ll_sb_info *sbi = ll_s2sbi(sb);
326         long pages_number;
327         int mult;
328
329         spin_lock(&sbi->ll_lock);
330         pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
331         spin_unlock(&sbi->ll_lock);
332
333         mult = 1 << (20 - PAGE_SHIFT);
334         return lprocfs_seq_read_frac_helper(m, pages_number, mult);
335 }
336
337 static ssize_t
338 ll_max_readahead_per_file_mb_seq_write(struct file *file,
339                                        const char __user *buffer,
340                                        size_t count, loff_t *off)
341 {
342         struct seq_file *m = file->private_data;
343         struct super_block *sb = m->private;
344         struct ll_sb_info *sbi = ll_s2sbi(sb);
345         int rc;
346         __s64 pages_number;
347
348         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
349         if (rc)
350                 return rc;
351
352         pages_number >>= PAGE_SHIFT;
353
354         if (pages_number < 0 || pages_number > sbi->ll_ra_info.ra_max_pages) {
355                 CERROR("%s: can't set max_readahead_per_file_mb=%lu > "
356                        "max_read_ahead_mb=%lu\n", ll_get_fsname(sb, NULL, 0),
357                        (unsigned long)pages_number >> (20 - PAGE_SHIFT),
358                        sbi->ll_ra_info.ra_max_pages >> (20 - PAGE_SHIFT));
359                 return -ERANGE;
360         }
361
362         spin_lock(&sbi->ll_lock);
363         sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
364         spin_unlock(&sbi->ll_lock);
365         return count;
366 }
367 LPROC_SEQ_FOPS(ll_max_readahead_per_file_mb);
368
369 static int ll_max_read_ahead_whole_mb_seq_show(struct seq_file *m, void *v)
370 {
371         struct super_block *sb = m->private;
372         struct ll_sb_info *sbi = ll_s2sbi(sb);
373         long pages_number;
374         int mult;
375
376         spin_lock(&sbi->ll_lock);
377         pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
378         spin_unlock(&sbi->ll_lock);
379
380         mult = 1 << (20 - PAGE_SHIFT);
381         return lprocfs_seq_read_frac_helper(m, pages_number, mult);
382 }
383
384 static ssize_t
385 ll_max_read_ahead_whole_mb_seq_write(struct file *file,
386                                      const char __user *buffer,
387                                      size_t count, loff_t *off)
388 {
389         struct seq_file *m = file->private_data;
390         struct super_block *sb = m->private;
391         struct ll_sb_info *sbi = ll_s2sbi(sb);
392         int rc;
393         __s64 pages_number;
394
395         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
396         if (rc)
397                 return rc;
398
399         pages_number >>= PAGE_SHIFT;
400
401         /* Cap this at the current max readahead window size, the readahead
402          * algorithm does this anyway so it's pointless to set it larger. */
403         if (pages_number < 0 ||
404             pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
405                 int pages_shift = 20 - PAGE_SHIFT;
406                 CERROR("%s: can't set max_read_ahead_whole_mb=%lu > "
407                        "max_read_ahead_per_file_mb=%lu\n",
408                        ll_get_fsname(sb, NULL, 0),
409                        (unsigned long)pages_number >> pages_shift,
410                        sbi->ll_ra_info.ra_max_pages_per_file >> pages_shift);
411                 return -ERANGE;
412         }
413
414         spin_lock(&sbi->ll_lock);
415         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
416         spin_unlock(&sbi->ll_lock);
417         return count;
418 }
419 LPROC_SEQ_FOPS(ll_max_read_ahead_whole_mb);
420
421 static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
422 {
423         struct super_block     *sb    = m->private;
424         struct ll_sb_info      *sbi   = ll_s2sbi(sb);
425         struct cl_client_cache *cache = sbi->ll_cache;
426         int shift = 20 - PAGE_SHIFT;
427         long max_cached_mb;
428         long unused_mb;
429
430         max_cached_mb = cache->ccc_lru_max >> shift;
431         unused_mb = atomic_long_read(&cache->ccc_lru_left) >> shift;
432         seq_printf(m, "users: %d\n"
433                    "max_cached_mb: %ld\n"
434                    "used_mb: %ld\n"
435                    "unused_mb: %ld\n"
436                    "reclaim_count: %u\n",
437                    atomic_read(&cache->ccc_users),
438                    max_cached_mb,
439                    max_cached_mb - unused_mb,
440                    unused_mb,
441                    cache->ccc_lru_shrinkers);
442         return 0;
443 }
444
445 static ssize_t
446 ll_max_cached_mb_seq_write(struct file *file, const char __user *buffer,
447                            size_t count, loff_t *off)
448 {
449         struct seq_file *m = file->private_data;
450         struct super_block *sb = m->private;
451         struct ll_sb_info *sbi = ll_s2sbi(sb);
452         struct cl_client_cache *cache = sbi->ll_cache;
453         struct lu_env *env;
454         long diff = 0;
455         long nrpages = 0;
456         __u16 refcheck;
457         __s64 pages_number;
458         long rc;
459         char kernbuf[128];
460         ENTRY;
461
462         if (count >= sizeof(kernbuf))
463                 RETURN(-EINVAL);
464
465         if (copy_from_user(kernbuf, buffer, count))
466                 RETURN(-EFAULT);
467         kernbuf[count] = 0;
468
469         buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
470                   kernbuf;
471         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
472         if (rc)
473                 RETURN(rc);
474
475         pages_number >>= PAGE_SHIFT;
476
477         if (pages_number < 0 || pages_number > totalram_pages) {
478                 CERROR("%s: can't set max cache more than %lu MB\n",
479                        ll_get_fsname(sb, NULL, 0),
480                        totalram_pages >> (20 - PAGE_SHIFT));
481                 RETURN(-ERANGE);
482         }
483         /* Allow enough cache so clients can make well-formed RPCs */
484         pages_number = max_t(long, pages_number, PTLRPC_MAX_BRW_PAGES);
485
486         spin_lock(&sbi->ll_lock);
487         diff = pages_number - cache->ccc_lru_max;
488         spin_unlock(&sbi->ll_lock);
489
490         /* easy - add more LRU slots. */
491         if (diff >= 0) {
492                 atomic_long_add(diff, &cache->ccc_lru_left);
493                 GOTO(out, rc = 0);
494         }
495
496         env = cl_env_get(&refcheck);
497         if (IS_ERR(env))
498                 RETURN(rc);
499
500         diff = -diff;
501         while (diff > 0) {
502                 long tmp;
503
504                 /* reduce LRU budget from free slots. */
505                 do {
506                         long ov, nv;
507
508                         ov = atomic_long_read(&cache->ccc_lru_left);
509                         if (ov == 0)
510                                 break;
511
512                         nv = ov > diff ? ov - diff : 0;
513                         rc = atomic_long_cmpxchg(&cache->ccc_lru_left, ov, nv);
514                         if (likely(ov == rc)) {
515                                 diff -= ov - nv;
516                                 nrpages += ov - nv;
517                                 break;
518                         }
519                 } while (1);
520
521                 if (diff <= 0)
522                         break;
523
524                 if (sbi->ll_dt_exp == NULL) { /* being initialized */
525                         rc = -ENODEV;
526                         break;
527                 }
528
529                 /* difficult - have to ask OSCs to drop LRU slots. */
530                 tmp = diff << 1;
531                 rc = obd_set_info_async(env, sbi->ll_dt_exp,
532                                 sizeof(KEY_CACHE_LRU_SHRINK),
533                                 KEY_CACHE_LRU_SHRINK,
534                                 sizeof(tmp), &tmp, NULL);
535                 if (rc < 0)
536                         break;
537         }
538         cl_env_put(env, &refcheck);
539
540 out:
541         if (rc >= 0) {
542                 spin_lock(&sbi->ll_lock);
543                 cache->ccc_lru_max = pages_number;
544                 spin_unlock(&sbi->ll_lock);
545                 rc = count;
546         } else {
547                 atomic_long_add(nrpages, &cache->ccc_lru_left);
548         }
549         return rc;
550 }
551 LPROC_SEQ_FOPS(ll_max_cached_mb);
552
553 static ssize_t checksums_show(struct kobject *kobj, struct attribute *attr,
554                               char *buf)
555 {
556         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
557                                               ll_kset.kobj);
558
559         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
560 }
561
562 static ssize_t checksums_store(struct kobject *kobj, struct attribute *attr,
563                                const char *buffer, size_t count)
564 {
565         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
566                                               ll_kset.kobj);
567         bool val;
568         int tmp;
569         int rc;
570
571         if (!sbi->ll_dt_exp)
572                 /* Not set up yet */
573                 return -EAGAIN;
574
575         rc = kstrtobool(buffer, &val);
576         if (rc)
577                 return rc;
578         if (val)
579                 sbi->ll_flags |= LL_SBI_CHECKSUM;
580         else
581                 sbi->ll_flags &= ~LL_SBI_CHECKSUM;
582         tmp = val;
583
584         rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
585                                 KEY_CHECKSUM, sizeof(tmp), &tmp, NULL);
586         if (rc)
587                 CWARN("Failed to set OSC checksum flags: %d\n", rc);
588
589         return count;
590 }
591 LUSTRE_RW_ATTR(checksums);
592
593 LUSTRE_ATTR(checksum_pages, 0644, checksums_show, checksums_store);
594
595 static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
596                               enum stats_track_type type)
597 {
598         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
599                                               ll_kset.kobj);
600
601         if (sbi->ll_stats_track_type == type)
602                 return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
603         else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
604                 return sprintf(buf, "0 (all)\n");
605
606         return sprintf(buf, "untracked\n");
607 }
608
609 static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
610                               size_t count, enum stats_track_type type)
611 {
612         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
613                                               ll_kset.kobj);
614         unsigned long pid;
615         int rc;
616
617         rc = kstrtoul(buffer, 10, &pid);
618         if (rc)
619                 return rc;
620
621         sbi->ll_stats_track_id = pid;
622         if (pid == 0)
623                 sbi->ll_stats_track_type = STATS_TRACK_ALL;
624         else
625                 sbi->ll_stats_track_type = type;
626         lprocfs_clear_stats(sbi->ll_stats);
627         return count;
628 }
629
630 static ssize_t stats_track_pid_show(struct kobject *kobj,
631                                     struct attribute *attr,
632                                     char *buf)
633 {
634         return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
635 }
636
637 static ssize_t stats_track_pid_store(struct kobject *kobj,
638                                      struct attribute *attr,
639                                      const char *buffer,
640                                      size_t count)
641 {
642         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
643 }
644 LUSTRE_RW_ATTR(stats_track_pid);
645
646 static ssize_t stats_track_ppid_show(struct kobject *kobj,
647                                      struct attribute *attr,
648                                      char *buf)
649 {
650         return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
651 }
652
653 static ssize_t stats_track_ppid_store(struct kobject *kobj,
654                                       struct attribute *attr,
655                                       const char *buffer,
656                                       size_t count)
657 {
658         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
659 }
660 LUSTRE_RW_ATTR(stats_track_ppid);
661
662 static ssize_t stats_track_gid_show(struct kobject *kobj,
663                                     struct attribute *attr,
664                                     char *buf)
665 {
666         return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
667 }
668
669 static ssize_t stats_track_gid_store(struct kobject *kobj,
670                                      struct attribute *attr,
671                                      const char *buffer,
672                                      size_t count)
673 {
674         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
675 }
676 LUSTRE_RW_ATTR(stats_track_gid);
677
678 static ssize_t statahead_running_max_show(struct kobject *kobj,
679                                           struct attribute *attr,
680                                           char *buf)
681 {
682         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
683                                               ll_kset.kobj);
684
685         return snprintf(buf, 16, "%u\n", sbi->ll_sa_running_max);
686 }
687
688 static ssize_t statahead_running_max_store(struct kobject *kobj,
689                                            struct attribute *attr,
690                                            const char *buffer,
691                                            size_t count)
692 {
693         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
694                                               ll_kset.kobj);
695         unsigned long val;
696         int rc;
697
698         rc = kstrtoul(buffer, 0, &val);
699         if (rc)
700                 return rc;
701
702         if (val <= LL_SA_RUNNING_MAX) {
703                 sbi->ll_sa_running_max = val;
704                 return count;
705         }
706
707         CERROR("Bad statahead_running_max value %lu. Valid values "
708                "are in the range [0, %d]\n", val, LL_SA_RUNNING_MAX);
709
710         return -ERANGE;
711 }
712 LUSTRE_RW_ATTR(statahead_running_max);
713
714 static ssize_t statahead_max_show(struct kobject *kobj,
715                                   struct attribute *attr,
716                                   char *buf)
717 {
718         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
719                                               ll_kset.kobj);
720
721         return sprintf(buf, "%u\n", sbi->ll_sa_max);
722 }
723
724 static ssize_t statahead_max_store(struct kobject *kobj,
725                                    struct attribute *attr,
726                                    const char *buffer,
727                                    size_t count)
728 {
729         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
730                                               ll_kset.kobj);
731         unsigned long val;
732         int rc;
733
734         rc = kstrtoul(buffer, 0, &val);
735         if (rc)
736                 return rc;
737
738         if (val <= LL_SA_RPC_MAX)
739                 sbi->ll_sa_max = val;
740         else
741                 CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
742                        val, LL_SA_RPC_MAX);
743
744         return count;
745 }
746 LUSTRE_RW_ATTR(statahead_max);
747
748 static ssize_t statahead_agl_show(struct kobject *kobj,
749                                   struct attribute *attr,
750                                   char *buf)
751 {
752         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
753                                               ll_kset.kobj);
754
755         return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
756 }
757
758 static ssize_t statahead_agl_store(struct kobject *kobj,
759                                    struct attribute *attr,
760                                    const char *buffer,
761                                    size_t count)
762 {
763         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
764                                               ll_kset.kobj);
765         bool val;
766         int rc;
767
768         rc = kstrtobool(buffer, &val);
769         if (rc)
770                 return rc;
771
772         if (val)
773                 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
774         else
775                 sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
776
777         return count;
778 }
779 LUSTRE_RW_ATTR(statahead_agl);
780
781 static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
782 {
783         struct super_block *sb = m->private;
784         struct ll_sb_info *sbi = ll_s2sbi(sb);
785
786         seq_printf(m, "statahead total: %u\n"
787                     "statahead wrong: %u\n"
788                     "agl total: %u\n",
789                     atomic_read(&sbi->ll_sa_total),
790                     atomic_read(&sbi->ll_sa_wrong),
791                     atomic_read(&sbi->ll_agl_total));
792         return 0;
793 }
794 LPROC_SEQ_FOPS_RO(ll_statahead_stats);
795
796 static ssize_t lazystatfs_show(struct kobject *kobj,
797                                struct attribute *attr,
798                                char *buf)
799 {
800         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
801                                               ll_kset.kobj);
802
803         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_LAZYSTATFS) ? 1 : 0);
804 }
805
806 static ssize_t lazystatfs_store(struct kobject *kobj,
807                                 struct attribute *attr,
808                                 const char *buffer,
809                                 size_t count)
810 {
811         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
812                                               ll_kset.kobj);
813         bool val;
814         int rc;
815
816         rc = kstrtobool(buffer, &val);
817         if (rc)
818                 return rc;
819
820         if (val)
821                 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
822         else
823                 sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
824
825         return count;
826 }
827 LUSTRE_RW_ATTR(lazystatfs);
828
829 static ssize_t max_easize_show(struct kobject *kobj,
830                                struct attribute *attr,
831                                char *buf)
832 {
833         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
834                                               ll_kset.kobj);
835         unsigned int ealen;
836         int rc;
837
838         rc = ll_get_max_mdsize(sbi, &ealen);
839         if (rc)
840                 return rc;
841
842         return sprintf(buf, "%u\n", ealen);
843 }
844 LUSTRE_RO_ATTR(max_easize);
845
846 /**
847  * Get default_easize.
848  *
849  * \see client_obd::cl_default_mds_easize
850  *
851  * \param[in] m         seq_file handle
852  * \param[in] v         unused for single entry
853  *
854  * \retval 0            on success
855  * \retval negative     negated errno on failure
856  */
857 static ssize_t default_easize_show(struct kobject *kobj,
858                                    struct attribute *attr,
859                                    char *buf)
860 {
861         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
862                                               ll_kset.kobj);
863         unsigned int ealen;
864         int rc;
865
866         rc = ll_get_default_mdsize(sbi, &ealen);
867         if (rc)
868                 return rc;
869
870         return sprintf(buf, "%u\n", ealen);
871 }
872
873 /**
874  * Set default_easize.
875  *
876  * Range checking on the passed value is handled by
877  * ll_set_default_mdsize().
878  *
879  * \see client_obd::cl_default_mds_easize
880  *
881  * \param[in] file      proc file
882  * \param[in] buffer    string passed from user space
883  * \param[in] count     \a buffer length
884  * \param[in] off       unused for single entry
885  *
886  * \retval positive     \a count on success
887  * \retval negative     negated errno on failure
888  */
889 static ssize_t default_easize_store(struct kobject *kobj,
890                                     struct attribute *attr,
891                                     const char *buffer,
892                                     size_t count)
893 {
894         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
895                                               ll_kset.kobj);
896         unsigned int val;
897         int rc;
898
899         if (count == 0)
900                 return 0;
901
902         rc = kstrtouint(buffer, 10, &val);
903         if (rc)
904                 return rc;
905
906         rc = ll_set_default_mdsize(sbi, val);
907         if (rc)
908                 return rc;
909
910         return count;
911 }
912 LUSTRE_RW_ATTR(default_easize);
913
914 static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
915 {
916         const char *str[] = LL_SBI_FLAGS;
917         struct super_block *sb = m->private;
918         int flags = ll_s2sbi(sb)->ll_flags;
919         int i = 0;
920
921         while (flags != 0) {
922                 if (ARRAY_SIZE(str) <= i) {
923                         CERROR("%s: Revise array LL_SBI_FLAGS to match sbi "
924                                 "flags please.\n", ll_get_fsname(sb, NULL, 0));
925                         return -EINVAL;
926                 }
927
928                 if (flags & 0x1)
929                         seq_printf(m, "%s ", str[i]);
930                 flags >>= 1;
931                 ++i;
932         }
933         seq_printf(m, "\b\n");
934         return 0;
935 }
936 LPROC_SEQ_FOPS_RO(ll_sbi_flags);
937
938 static ssize_t xattr_cache_show(struct kobject *kobj,
939                                 struct attribute *attr,
940                                 char *buf)
941 {
942         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
943                                               ll_kset.kobj);
944
945         return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled);
946 }
947
948 static ssize_t xattr_cache_store(struct kobject *kobj,
949                                  struct attribute *attr,
950                                  const char *buffer,
951                                  size_t count)
952 {
953         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
954                                               ll_kset.kobj);
955         bool val;
956         int rc;
957
958         rc = kstrtobool(buffer, &val);
959         if (rc)
960                 return rc;
961
962         if (val && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
963                 return -ENOTSUPP;
964
965         sbi->ll_xattr_cache_enabled = val;
966         sbi->ll_xattr_cache_set = 1;
967
968         return count;
969 }
970 LUSTRE_RW_ATTR(xattr_cache);
971
972 static ssize_t tiny_write_show(struct kobject *kobj,
973                                struct attribute *attr,
974                                char *buf)
975 {
976         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
977                                               ll_kset.kobj);
978
979         return sprintf(buf, "%u\n", !!(sbi->ll_flags & LL_SBI_TINY_WRITE));
980 }
981
982 static ssize_t tiny_write_store(struct kobject *kobj,
983                                 struct attribute *attr,
984                                 const char *buffer,
985                                 size_t count)
986 {
987         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
988                                               ll_kset.kobj);
989         bool val;
990         int rc;
991
992         rc = kstrtobool(buffer, &val);
993         if (rc)
994                 return rc;
995
996         spin_lock(&sbi->ll_lock);
997         if (val)
998                 sbi->ll_flags |= LL_SBI_TINY_WRITE;
999         else
1000                 sbi->ll_flags &= ~LL_SBI_TINY_WRITE;
1001         spin_unlock(&sbi->ll_lock);
1002
1003         return count;
1004 }
1005 LUSTRE_RW_ATTR(tiny_write);
1006
1007 static ssize_t fast_read_show(struct kobject *kobj,
1008                               struct attribute *attr,
1009                               char *buf)
1010 {
1011         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1012                                               ll_kset.kobj);
1013
1014         return sprintf(buf, "%u\n", !!(sbi->ll_flags & LL_SBI_FAST_READ));
1015 }
1016
1017 static ssize_t fast_read_store(struct kobject *kobj,
1018                                struct attribute *attr,
1019                                const char *buffer,
1020                                size_t count)
1021 {
1022         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1023                                               ll_kset.kobj);
1024         bool val;
1025         int rc;
1026
1027         rc = kstrtobool(buffer, &val);
1028         if (rc)
1029                 return rc;
1030
1031         spin_lock(&sbi->ll_lock);
1032         if (val)
1033                 sbi->ll_flags |= LL_SBI_FAST_READ;
1034         else
1035                 sbi->ll_flags &= ~LL_SBI_FAST_READ;
1036         spin_unlock(&sbi->ll_lock);
1037
1038         return count;
1039 }
1040 LUSTRE_RW_ATTR(fast_read);
1041
1042 static ssize_t pio_show(struct kobject *kobj,
1043                         struct attribute *attr,
1044                         char *buf)
1045 {
1046         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1047                                               ll_kset.kobj);
1048
1049         return sprintf(buf, "%u\n", !!(sbi->ll_flags & LL_SBI_PIO));
1050 }
1051
1052 static ssize_t pio_store(struct kobject *kobj,
1053                          struct attribute *attr,
1054                          const char *buffer,
1055                          size_t count)
1056 {
1057         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1058                                               ll_kset.kobj);
1059         bool val;
1060         int rc;
1061
1062         rc = kstrtobool(buffer, &val);
1063         if (rc)
1064                 return rc;
1065
1066         spin_lock(&sbi->ll_lock);
1067         if (val)
1068                 sbi->ll_flags |= LL_SBI_PIO;
1069         else
1070                 sbi->ll_flags &= ~LL_SBI_PIO;
1071         spin_unlock(&sbi->ll_lock);
1072
1073         return count;
1074 }
1075 LUSTRE_RW_ATTR(pio);
1076
1077 static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
1078 {
1079         struct super_block      *sb    = m->private;
1080         struct ll_sb_info       *sbi   = ll_s2sbi(sb);
1081         struct cl_client_cache  *cache = sbi->ll_cache;
1082         long pages;
1083         int mb;
1084
1085         pages = atomic_long_read(&cache->ccc_unstable_nr);
1086         mb    = (pages * PAGE_SIZE) >> 20;
1087
1088         seq_printf(m, "unstable_check:     %8d\n"
1089                    "unstable_pages: %12ld\n"
1090                    "unstable_mb:        %8d\n",
1091                    cache->ccc_unstable_check, pages, mb);
1092         return 0;
1093 }
1094
1095 static ssize_t ll_unstable_stats_seq_write(struct file *file,
1096                                            const char __user *buffer,
1097                                            size_t count, loff_t *unused)
1098 {
1099         struct seq_file *seq = file->private_data;
1100         struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)seq->private);
1101         char kernbuf[128];
1102         bool val;
1103         int rc;
1104
1105         if (count == 0)
1106                 return 0;
1107         if (count >= sizeof(kernbuf))
1108                 return -EINVAL;
1109
1110         if (copy_from_user(kernbuf, buffer, count))
1111                 return -EFAULT;
1112         kernbuf[count] = 0;
1113
1114         buffer += lprocfs_find_named_value(kernbuf, "unstable_check:", &count) -
1115                   kernbuf;
1116         rc = kstrtobool_from_user(buffer, count, &val);
1117         if (rc < 0)
1118                 return rc;
1119
1120         /* borrow lru lock to set the value */
1121         spin_lock(&sbi->ll_cache->ccc_lru_lock);
1122         sbi->ll_cache->ccc_unstable_check = val;
1123         spin_unlock(&sbi->ll_cache->ccc_lru_lock);
1124
1125         return count;
1126 }
1127 LPROC_SEQ_FOPS(ll_unstable_stats);
1128
1129 static int ll_root_squash_seq_show(struct seq_file *m, void *v)
1130 {
1131         struct super_block *sb = m->private;
1132         struct ll_sb_info *sbi = ll_s2sbi(sb);
1133         struct root_squash_info *squash = &sbi->ll_squash;
1134
1135         seq_printf(m, "%u:%u\n", squash->rsi_uid, squash->rsi_gid);
1136         return 0;
1137 }
1138
1139 static ssize_t ll_root_squash_seq_write(struct file *file,
1140                                         const char __user *buffer,
1141                                         size_t count, loff_t *off)
1142 {
1143         struct seq_file *m = file->private_data;
1144         struct super_block *sb = m->private;
1145         struct ll_sb_info *sbi = ll_s2sbi(sb);
1146         struct root_squash_info *squash = &sbi->ll_squash;
1147
1148         return lprocfs_wr_root_squash(buffer, count, squash,
1149                                       ll_get_fsname(sb, NULL, 0));
1150 }
1151 LPROC_SEQ_FOPS(ll_root_squash);
1152
1153 static int ll_nosquash_nids_seq_show(struct seq_file *m, void *v)
1154 {
1155         struct super_block *sb = m->private;
1156         struct ll_sb_info *sbi = ll_s2sbi(sb);
1157         struct root_squash_info *squash = &sbi->ll_squash;
1158         int len;
1159
1160         down_read(&squash->rsi_sem);
1161         if (!list_empty(&squash->rsi_nosquash_nids)) {
1162                 len = cfs_print_nidlist(m->buf + m->count, m->size - m->count,
1163                                         &squash->rsi_nosquash_nids);
1164                 m->count += len;
1165                 seq_putc(m, '\n');
1166         } else {
1167                 seq_puts(m, "NONE\n");
1168         }
1169         up_read(&squash->rsi_sem);
1170
1171         return 0;
1172 }
1173
1174 static ssize_t ll_nosquash_nids_seq_write(struct file *file,
1175                                           const char __user *buffer,
1176                                           size_t count, loff_t *off)
1177 {
1178         struct seq_file *m = file->private_data;
1179         struct super_block *sb = m->private;
1180         struct ll_sb_info *sbi = ll_s2sbi(sb);
1181         struct root_squash_info *squash = &sbi->ll_squash;
1182         int rc;
1183
1184         rc = lprocfs_wr_nosquash_nids(buffer, count, squash,
1185                                       ll_get_fsname(sb, NULL, 0));
1186         if (rc < 0)
1187                 return rc;
1188
1189         ll_compute_rootsquash_state(sbi);
1190
1191         return rc;
1192 }
1193 LPROC_SEQ_FOPS(ll_nosquash_nids);
1194
1195 struct lprocfs_vars lprocfs_llite_obd_vars[] = {
1196         { .name =       "site",
1197           .fops =       &ll_site_stats_fops                     },
1198         { .name =       "max_read_ahead_mb",
1199           .fops =       &ll_max_readahead_mb_fops               },
1200         { .name =       "max_read_ahead_per_file_mb",
1201           .fops =       &ll_max_readahead_per_file_mb_fops      },
1202         { .name =       "max_read_ahead_whole_mb",
1203           .fops =       &ll_max_read_ahead_whole_mb_fops        },
1204         { .name =       "max_cached_mb",
1205           .fops =       &ll_max_cached_mb_fops                  },
1206         { .name =       "statahead_stats",
1207           .fops =       &ll_statahead_stats_fops                },
1208         { .name =       "sbi_flags",
1209           .fops =       &ll_sbi_flags_fops                      },
1210         { .name =       "unstable_stats",
1211           .fops =       &ll_unstable_stats_fops                 },
1212         { .name =       "root_squash",
1213           .fops =       &ll_root_squash_fops                    },
1214         { .name =       "nosquash_nids",
1215           .fops =       &ll_nosquash_nids_fops                  },
1216         { NULL }
1217 };
1218
1219 #define MAX_STRING_SIZE 128
1220
1221 static struct attribute *llite_attrs[] = {
1222         &lustre_attr_blocksize.attr,
1223         &lustre_attr_stat_blocksize.attr,
1224         &lustre_attr_kbytestotal.attr,
1225         &lustre_attr_kbytesfree.attr,
1226         &lustre_attr_kbytesavail.attr,
1227         &lustre_attr_filestotal.attr,
1228         &lustre_attr_filesfree.attr,
1229         &lustre_attr_client_type.attr,
1230         &lustre_attr_fstype.attr,
1231         &lustre_attr_uuid.attr,
1232         &lustre_attr_checksums.attr,
1233         &lustre_attr_checksum_pages.attr,
1234         &lustre_attr_stats_track_pid.attr,
1235         &lustre_attr_stats_track_ppid.attr,
1236         &lustre_attr_stats_track_gid.attr,
1237         &lustre_attr_statahead_running_max.attr,
1238         &lustre_attr_statahead_max.attr,
1239         &lustre_attr_statahead_agl.attr,
1240         &lustre_attr_lazystatfs.attr,
1241         &lustre_attr_max_easize.attr,
1242         &lustre_attr_default_easize.attr,
1243         &lustre_attr_xattr_cache.attr,
1244         &lustre_attr_fast_read.attr,
1245         &lustre_attr_pio.attr,
1246         &lustre_attr_tiny_write.attr,
1247         NULL,
1248 };
1249
1250 static void llite_kobj_release(struct kobject *kobj)
1251 {
1252         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1253                                               ll_kset.kobj);
1254         complete(&sbi->ll_kobj_unregister);
1255 }
1256
1257 static struct kobj_type llite_ktype = {
1258         .default_attrs  = llite_attrs,
1259         .sysfs_ops      = &lustre_sysfs_ops,
1260         .release        = llite_kobj_release,
1261 };
1262
1263 static const struct llite_file_opcode {
1264         __u32       opcode;
1265         __u32       type;
1266         const char *opname;
1267 } llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
1268         /* file operation */
1269         { LPROC_LL_DIRTY_HITS,     LPROCFS_TYPE_REGS, "dirty_pages_hits" },
1270         { LPROC_LL_DIRTY_MISSES,   LPROCFS_TYPE_REGS, "dirty_pages_misses" },
1271         { LPROC_LL_READ_BYTES,     LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
1272                                    "read_bytes" },
1273         { LPROC_LL_WRITE_BYTES,    LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
1274                                    "write_bytes" },
1275         { LPROC_LL_BRW_READ,       LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
1276                                    "brw_read" },
1277         { LPROC_LL_BRW_WRITE,      LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
1278                                    "brw_write" },
1279         { LPROC_LL_IOCTL,          LPROCFS_TYPE_REGS, "ioctl" },
1280         { LPROC_LL_OPEN,           LPROCFS_TYPE_REGS, "open" },
1281         { LPROC_LL_RELEASE,        LPROCFS_TYPE_REGS, "close" },
1282         { LPROC_LL_MAP,            LPROCFS_TYPE_REGS, "mmap" },
1283         { LPROC_LL_FAULT,          LPROCFS_TYPE_REGS, "page_fault" },
1284         { LPROC_LL_MKWRITE,        LPROCFS_TYPE_REGS, "page_mkwrite" },
1285         { LPROC_LL_LLSEEK,         LPROCFS_TYPE_REGS, "seek" },
1286         { LPROC_LL_FSYNC,          LPROCFS_TYPE_REGS, "fsync" },
1287         { LPROC_LL_READDIR,        LPROCFS_TYPE_REGS, "readdir" },
1288         /* inode operation */
1289         { LPROC_LL_SETATTR,        LPROCFS_TYPE_REGS, "setattr" },
1290         { LPROC_LL_TRUNC,          LPROCFS_TYPE_REGS, "truncate" },
1291         { LPROC_LL_FLOCK,          LPROCFS_TYPE_REGS, "flock" },
1292         { LPROC_LL_GETATTR,        LPROCFS_TYPE_REGS, "getattr" },
1293         /* dir inode operation */
1294         { LPROC_LL_CREATE,         LPROCFS_TYPE_REGS, "create" },
1295         { LPROC_LL_LINK,           LPROCFS_TYPE_REGS, "link" },
1296         { LPROC_LL_UNLINK,         LPROCFS_TYPE_REGS, "unlink" },
1297         { LPROC_LL_SYMLINK,        LPROCFS_TYPE_REGS, "symlink" },
1298         { LPROC_LL_MKDIR,          LPROCFS_TYPE_REGS, "mkdir" },
1299         { LPROC_LL_RMDIR,          LPROCFS_TYPE_REGS, "rmdir" },
1300         { LPROC_LL_MKNOD,          LPROCFS_TYPE_REGS, "mknod" },
1301         { LPROC_LL_RENAME,         LPROCFS_TYPE_REGS, "rename" },
1302         /* special inode operation */
1303         { LPROC_LL_STAFS,          LPROCFS_TYPE_REGS, "statfs" },
1304         { LPROC_LL_ALLOC_INODE,    LPROCFS_TYPE_REGS, "alloc_inode" },
1305         { LPROC_LL_SETXATTR,       LPROCFS_TYPE_REGS, "setxattr" },
1306         { LPROC_LL_GETXATTR,       LPROCFS_TYPE_REGS, "getxattr" },
1307         { LPROC_LL_GETXATTR_HITS,  LPROCFS_TYPE_REGS, "getxattr_hits" },
1308         { LPROC_LL_LISTXATTR,      LPROCFS_TYPE_REGS, "listxattr" },
1309         { LPROC_LL_REMOVEXATTR,    LPROCFS_TYPE_REGS, "removexattr" },
1310         { LPROC_LL_INODE_PERM,     LPROCFS_TYPE_REGS, "inode_permission" },
1311 };
1312
1313 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count)
1314 {
1315         if (!sbi->ll_stats)
1316                 return;
1317         if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
1318                 lprocfs_counter_add(sbi->ll_stats, op, count);
1319         else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
1320                  sbi->ll_stats_track_id == current->pid)
1321                 lprocfs_counter_add(sbi->ll_stats, op, count);
1322         else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
1323                  sbi->ll_stats_track_id == current->parent->pid)
1324                 lprocfs_counter_add(sbi->ll_stats, op, count);
1325         else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
1326                  sbi->ll_stats_track_id ==
1327                         from_kgid(&init_user_ns, current_gid()))
1328                 lprocfs_counter_add(sbi->ll_stats, op, count);
1329 }
1330 EXPORT_SYMBOL(ll_stats_ops_tally);
1331
1332 static const char *ra_stat_string[] = {
1333         [RA_STAT_HIT] = "hits",
1334         [RA_STAT_MISS] = "misses",
1335         [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
1336         [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
1337         [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
1338         [RA_STAT_FAILED_MATCH] = "failed lock match",
1339         [RA_STAT_DISCARDED] = "read but discarded",
1340         [RA_STAT_ZERO_LEN] = "zero length file",
1341         [RA_STAT_ZERO_WINDOW] = "zero size window",
1342         [RA_STAT_EOF] = "read-ahead to EOF",
1343         [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
1344         [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
1345         [RA_STAT_FAILED_REACH_END] = "failed to reach end"
1346 };
1347
1348 LPROC_SEQ_FOPS_RO_TYPE(llite, name);
1349 LPROC_SEQ_FOPS_RO_TYPE(llite, uuid);
1350
1351 int ll_debugfs_register_super(struct super_block *sb, const char *name)
1352 {
1353         struct lustre_sb_info *lsi = s2lsi(sb);
1354         struct ll_sb_info *sbi = ll_s2sbi(sb);
1355         struct lprocfs_vars lvars[2];
1356         int err, id, rc;
1357
1358         ENTRY;
1359         memset(lvars, 0, sizeof(lvars));
1360         lvars[0].name = name;
1361
1362         LASSERT(sbi != NULL);
1363
1364         sbi->ll_proc_root = lprocfs_register(name, proc_lustre_fs_root,
1365                                              NULL, NULL);
1366         if (IS_ERR(sbi->ll_proc_root)) {
1367                 err = PTR_ERR(sbi->ll_proc_root);
1368                 sbi->ll_proc_root = NULL;
1369                 RETURN(err);
1370         }
1371
1372         rc = lprocfs_seq_create(sbi->ll_proc_root, "dump_page_cache", 0444,
1373                                 &vvp_dump_pgcache_file_ops, sbi);
1374         if (rc)
1375                 CWARN("Error adding the dump_page_cache file\n");
1376
1377         rc = lprocfs_seq_create(sbi->ll_proc_root, "extents_stats", 0644,
1378                                 &ll_rw_extents_stats_fops, sbi);
1379         if (rc)
1380                 CWARN("Error adding the extent_stats file\n");
1381
1382         rc = lprocfs_seq_create(sbi->ll_proc_root, "extents_stats_per_process",
1383                                 0644, &ll_rw_extents_stats_pp_fops, sbi);
1384         if (rc)
1385                 CWARN("Error adding the extents_stats_per_process file\n");
1386
1387         rc = lprocfs_seq_create(sbi->ll_proc_root, "offset_stats", 0644,
1388                                 &ll_rw_offset_stats_fops, sbi);
1389         if (rc)
1390                 CWARN("Error adding the offset_stats file\n");
1391
1392         /* File operations stats */
1393         sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
1394                                             LPROCFS_STATS_FLAG_NONE);
1395         if (sbi->ll_stats == NULL)
1396                 GOTO(out_proc, err = -ENOMEM);
1397
1398         /* do counter init */
1399         for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
1400                 __u32 type = llite_opcode_table[id].type;
1401                 void *ptr = NULL;
1402                 if (type & LPROCFS_TYPE_REGS)
1403                         ptr = "regs";
1404                 else if (type & LPROCFS_TYPE_BYTES)
1405                         ptr = "bytes";
1406                 else if (type & LPROCFS_TYPE_PAGES)
1407                         ptr = "pages";
1408                 lprocfs_counter_init(sbi->ll_stats,
1409                                      llite_opcode_table[id].opcode,
1410                                      (type & LPROCFS_CNTR_AVGMINMAX),
1411                                      llite_opcode_table[id].opname, ptr);
1412         }
1413
1414         err = lprocfs_register_stats(sbi->ll_proc_root, "stats", sbi->ll_stats);
1415         if (err)
1416                 GOTO(out_stats, err);
1417
1418         sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
1419                                                LPROCFS_STATS_FLAG_NONE);
1420         if (sbi->ll_ra_stats == NULL)
1421                 GOTO(out_stats, err = -ENOMEM);
1422
1423         for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
1424                 lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
1425                                      ra_stat_string[id], "pages");
1426         err = lprocfs_register_stats(sbi->ll_proc_root, "read_ahead_stats",
1427                                      sbi->ll_ra_stats);
1428         if (err)
1429                 GOTO(out_ra_stats, err);
1430
1431         err = lprocfs_add_vars(sbi->ll_proc_root, lprocfs_llite_obd_vars, sb);
1432         if (err)
1433                 GOTO(out_ra_stats, err);
1434
1435         /* Yes we also register sysfs mount kset here as well */
1436         sbi->ll_kset.kobj.parent = llite_kobj;
1437         sbi->ll_kset.kobj.ktype = &llite_ktype;
1438         init_completion(&sbi->ll_kobj_unregister);
1439         err = kobject_set_name(&sbi->ll_kset.kobj, "%s", name);
1440         if (err)
1441                 GOTO(out_ra_stats, err);
1442
1443         err = kset_register(&sbi->ll_kset);
1444         if (err)
1445                 GOTO(out_ra_stats, err);
1446
1447         lsi->lsi_kobj = kobject_get(&sbi->ll_kset.kobj);
1448
1449         RETURN(0);
1450 out_ra_stats:
1451         lprocfs_free_stats(&sbi->ll_ra_stats);
1452 out_stats:
1453         lprocfs_free_stats(&sbi->ll_stats);
1454 out_proc:
1455         lprocfs_remove(&sbi->ll_proc_root);
1456
1457         RETURN(err);
1458 }
1459
1460 int lprocfs_ll_register_obd(struct super_block *sb, const char *obdname)
1461 {
1462         struct lprocfs_vars lvars[2];
1463         struct ll_sb_info *sbi = ll_s2sbi(sb);
1464         struct obd_device *obd;
1465         struct proc_dir_entry *dir;
1466         char name[MAX_STRING_SIZE + 1];
1467         int err;
1468         ENTRY;
1469
1470         memset(lvars, 0, sizeof(lvars));
1471
1472         name[MAX_STRING_SIZE] = '\0';
1473         lvars[0].name = name;
1474
1475         LASSERT(sbi != NULL);
1476         LASSERT(obdname != NULL);
1477
1478         obd = class_name2obd(obdname);
1479
1480         LASSERT(obd != NULL);
1481         LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
1482         LASSERT(obd->obd_type->typ_name != NULL);
1483
1484         dir = proc_mkdir(obd->obd_type->typ_name, sbi->ll_proc_root);
1485         if (dir == NULL)
1486                 GOTO(out, err = -ENOMEM);
1487
1488         snprintf(name, MAX_STRING_SIZE, "common_name");
1489         lvars[0].fops = &llite_name_fops;
1490         err = lprocfs_add_vars(dir, lvars, obd);
1491         if (err)
1492                 GOTO(out, err);
1493
1494         snprintf(name, MAX_STRING_SIZE, "uuid");
1495         lvars[0].fops = &llite_uuid_fops;
1496         err = lprocfs_add_vars(dir, lvars, obd);
1497         if (err)
1498                 GOTO(out, err);
1499
1500 out:
1501         if (err) {
1502                 lprocfs_remove(&sbi->ll_proc_root);
1503                 lprocfs_free_stats(&sbi->ll_ra_stats);
1504                 lprocfs_free_stats(&sbi->ll_stats);
1505         }
1506         RETURN(err);
1507 }
1508
1509 void ll_debugfs_unregister_super(struct super_block *sb)
1510 {
1511         struct lustre_sb_info *lsi = s2lsi(sb);
1512         struct ll_sb_info *sbi = ll_s2sbi(sb);
1513
1514         kobject_put(lsi->lsi_kobj);
1515
1516         kset_unregister(&sbi->ll_kset);
1517         wait_for_completion(&sbi->ll_kobj_unregister);
1518
1519         if (sbi->ll_proc_root) {
1520                 lprocfs_remove(&sbi->ll_proc_root);
1521                 lprocfs_free_stats(&sbi->ll_ra_stats);
1522                 lprocfs_free_stats(&sbi->ll_stats);
1523         }
1524 }
1525 #undef MAX_STRING_SIZE
1526
1527 #define pct(a,b) (b ? a * 100 / b : 0)
1528
1529 static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
1530                                    struct seq_file *seq, int which)
1531 {
1532         unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
1533         unsigned long start, end, r, w;
1534         char *unitp = "KMGTPEZY";
1535         int i, units = 10;
1536         struct per_process_info *pp_info = &io_extents->pp_extents[which];
1537
1538         read_cum = 0;
1539         write_cum = 0;
1540         start = 0;
1541
1542         for(i = 0; i < LL_HIST_MAX; i++) {
1543                 read_tot += pp_info->pp_r_hist.oh_buckets[i];
1544                 write_tot += pp_info->pp_w_hist.oh_buckets[i];
1545         }
1546
1547         for(i = 0; i < LL_HIST_MAX; i++) {
1548                 r = pp_info->pp_r_hist.oh_buckets[i];
1549                 w = pp_info->pp_w_hist.oh_buckets[i];
1550                 read_cum += r;
1551                 write_cum += w;
1552                 end = 1 << (i + LL_HIST_START - units);
1553                 seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu  | "
1554                            "%14lu %4lu %4lu\n", start, *unitp, end, *unitp,
1555                            (i == LL_HIST_MAX - 1) ? '+' : ' ',
1556                            r, pct(r, read_tot), pct(read_cum, read_tot),
1557                            w, pct(w, write_tot), pct(write_cum, write_tot));
1558                 start = end;
1559                 if (start == 1<<10) {
1560                         start = 1;
1561                         units += 10;
1562                         unitp++;
1563                 }
1564                 if (read_cum == read_tot && write_cum == write_tot)
1565                         break;
1566         }
1567 }
1568
1569 static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
1570 {
1571         struct timespec64 now;
1572         struct ll_sb_info *sbi = seq->private;
1573         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1574         int k;
1575
1576         ktime_get_real_ts64(&now);
1577
1578         if (!sbi->ll_rw_stats_on) {
1579                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1580                 return 0;
1581         }
1582         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1583                    (s64)now.tv_sec, now.tv_nsec);
1584         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1585         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1586                    "extents", "calls", "%", "cum%",
1587                    "calls", "%", "cum%");
1588         spin_lock(&sbi->ll_pp_extent_lock);
1589         for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
1590                 if (io_extents->pp_extents[k].pid != 0) {
1591                         seq_printf(seq, "\nPID: %d\n",
1592                                    io_extents->pp_extents[k].pid);
1593                         ll_display_extents_info(io_extents, seq, k);
1594                 }
1595         }
1596         spin_unlock(&sbi->ll_pp_extent_lock);
1597         return 0;
1598 }
1599
1600 static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
1601                                                 const char __user *buf,
1602                                                 size_t len,
1603                                                 loff_t *off)
1604 {
1605         struct seq_file *seq = file->private_data;
1606         struct ll_sb_info *sbi = seq->private;
1607         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1608         int i;
1609         __s64 value;
1610
1611         if (len == 0)
1612                 return -EINVAL;
1613
1614         value = ll_stats_pid_write(buf, len);
1615
1616         if (value == 0)
1617                 sbi->ll_rw_stats_on = 0;
1618         else
1619                 sbi->ll_rw_stats_on = 1;
1620
1621         spin_lock(&sbi->ll_pp_extent_lock);
1622         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1623                 io_extents->pp_extents[i].pid = 0;
1624                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1625                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1626         }
1627         spin_unlock(&sbi->ll_pp_extent_lock);
1628         return len;
1629 }
1630
1631 LPROC_SEQ_FOPS(ll_rw_extents_stats_pp);
1632
1633 static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
1634 {
1635         struct timespec64 now;
1636         struct ll_sb_info *sbi = seq->private;
1637         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1638
1639         ktime_get_real_ts64(&now);
1640
1641         if (!sbi->ll_rw_stats_on) {
1642                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1643                 return 0;
1644         }
1645         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1646                    (s64)now.tv_sec, now.tv_nsec);
1647
1648         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1649         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1650                    "extents", "calls", "%", "cum%",
1651                    "calls", "%", "cum%");
1652         spin_lock(&sbi->ll_lock);
1653         ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
1654         spin_unlock(&sbi->ll_lock);
1655
1656         return 0;
1657 }
1658
1659 static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
1660                                              const char __user *buf,
1661                                              size_t len, loff_t *off)
1662 {
1663         struct seq_file *seq = file->private_data;
1664         struct ll_sb_info *sbi = seq->private;
1665         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1666         int i;
1667         __s64 value;
1668
1669         if (len == 0)
1670                 return -EINVAL;
1671
1672         value = ll_stats_pid_write(buf, len);
1673
1674         if (value == 0)
1675                 sbi->ll_rw_stats_on = 0;
1676         else
1677                 sbi->ll_rw_stats_on = 1;
1678
1679         spin_lock(&sbi->ll_pp_extent_lock);
1680         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
1681                 io_extents->pp_extents[i].pid = 0;
1682                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1683                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1684         }
1685         spin_unlock(&sbi->ll_pp_extent_lock);
1686
1687         return len;
1688 }
1689 LPROC_SEQ_FOPS(ll_rw_extents_stats);
1690
1691 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1692                        struct ll_file_data *file, loff_t pos,
1693                        size_t count, int rw)
1694 {
1695         int i, cur = -1;
1696         struct ll_rw_process_info *process;
1697         struct ll_rw_process_info *offset;
1698         int *off_count = &sbi->ll_rw_offset_entry_count;
1699         int *process_count = &sbi->ll_offset_process_count;
1700         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1701
1702         if(!sbi->ll_rw_stats_on)
1703                 return;
1704         process = sbi->ll_rw_process_info;
1705         offset = sbi->ll_rw_offset_info;
1706
1707         spin_lock(&sbi->ll_pp_extent_lock);
1708         /* Extent statistics */
1709         for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1710                 if(io_extents->pp_extents[i].pid == pid) {
1711                         cur = i;
1712                         break;
1713                 }
1714         }
1715
1716         if (cur == -1) {
1717                 /* new process */
1718                 sbi->ll_extent_process_count =
1719                         (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
1720                 cur = sbi->ll_extent_process_count;
1721                 io_extents->pp_extents[cur].pid = pid;
1722                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
1723                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
1724         }
1725
1726         for(i = 0; (count >= (1 << LL_HIST_START << i)) &&
1727              (i < (LL_HIST_MAX - 1)); i++);
1728         if (rw == 0) {
1729                 io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
1730                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
1731         } else {
1732                 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
1733                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
1734         }
1735         spin_unlock(&sbi->ll_pp_extent_lock);
1736
1737         spin_lock(&sbi->ll_process_lock);
1738         /* Offset statistics */
1739         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1740                 if (process[i].rw_pid == pid) {
1741                         if (process[i].rw_last_file != file) {
1742                                 process[i].rw_range_start = pos;
1743                                 process[i].rw_last_file_pos = pos + count;
1744                                 process[i].rw_smallest_extent = count;
1745                                 process[i].rw_largest_extent = count;
1746                                 process[i].rw_offset = 0;
1747                                 process[i].rw_last_file = file;
1748                                 spin_unlock(&sbi->ll_process_lock);
1749                                 return;
1750                         }
1751                         if (process[i].rw_last_file_pos != pos) {
1752                                 *off_count =
1753                                     (*off_count + 1) % LL_OFFSET_HIST_MAX;
1754                                 offset[*off_count].rw_op = process[i].rw_op;
1755                                 offset[*off_count].rw_pid = pid;
1756                                 offset[*off_count].rw_range_start =
1757                                         process[i].rw_range_start;
1758                                 offset[*off_count].rw_range_end =
1759                                         process[i].rw_last_file_pos;
1760                                 offset[*off_count].rw_smallest_extent =
1761                                         process[i].rw_smallest_extent;
1762                                 offset[*off_count].rw_largest_extent =
1763                                         process[i].rw_largest_extent;
1764                                 offset[*off_count].rw_offset =
1765                                         process[i].rw_offset;
1766                                 process[i].rw_op = rw;
1767                                 process[i].rw_range_start = pos;
1768                                 process[i].rw_smallest_extent = count;
1769                                 process[i].rw_largest_extent = count;
1770                                 process[i].rw_offset = pos -
1771                                         process[i].rw_last_file_pos;
1772                         }
1773                         if(process[i].rw_smallest_extent > count)
1774                                 process[i].rw_smallest_extent = count;
1775                         if(process[i].rw_largest_extent < count)
1776                                 process[i].rw_largest_extent = count;
1777                         process[i].rw_last_file_pos = pos + count;
1778                         spin_unlock(&sbi->ll_process_lock);
1779                         return;
1780                 }
1781         }
1782         *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
1783         process[*process_count].rw_pid = pid;
1784         process[*process_count].rw_op = rw;
1785         process[*process_count].rw_range_start = pos;
1786         process[*process_count].rw_last_file_pos = pos + count;
1787         process[*process_count].rw_smallest_extent = count;
1788         process[*process_count].rw_largest_extent = count;
1789         process[*process_count].rw_offset = 0;
1790         process[*process_count].rw_last_file = file;
1791         spin_unlock(&sbi->ll_process_lock);
1792 }
1793
1794 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
1795 {
1796         struct timespec64 now;
1797         struct ll_sb_info *sbi = seq->private;
1798         struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
1799         struct ll_rw_process_info *process = sbi->ll_rw_process_info;
1800         int i;
1801
1802         ktime_get_real_ts64(&now);
1803
1804         if (!sbi->ll_rw_stats_on) {
1805                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1806                 return 0;
1807         }
1808         spin_lock(&sbi->ll_process_lock);
1809
1810         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1811                    (s64)now.tv_sec, now.tv_nsec);
1812         seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
1813                    "R/W", "PID", "RANGE START", "RANGE END",
1814                    "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
1815
1816         /* We stored the discontiguous offsets here; print them first */
1817         for (i = 0; i < LL_OFFSET_HIST_MAX; i++) {
1818                 if (offset[i].rw_pid != 0)
1819                         seq_printf(seq,
1820                                   "%3c %10d %14llu %14llu %17lu %17lu %14llu\n",
1821                                    offset[i].rw_op == READ ? 'R' : 'W',
1822                                    offset[i].rw_pid,
1823                                    offset[i].rw_range_start,
1824                                    offset[i].rw_range_end,
1825                                    (unsigned long)offset[i].rw_smallest_extent,
1826                                    (unsigned long)offset[i].rw_largest_extent,
1827                                    offset[i].rw_offset);
1828         }
1829
1830         /* Then print the current offsets for each process */
1831         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1832                 if (process[i].rw_pid != 0)
1833                         seq_printf(seq,
1834                                   "%3c %10d %14llu %14llu %17lu %17lu %14llu\n",
1835                                    process[i].rw_op == READ ? 'R' : 'W',
1836                                    process[i].rw_pid,
1837                                    process[i].rw_range_start,
1838                                    process[i].rw_last_file_pos,
1839                                    (unsigned long)process[i].rw_smallest_extent,
1840                                    (unsigned long)process[i].rw_largest_extent,
1841                                    process[i].rw_offset);
1842         }
1843         spin_unlock(&sbi->ll_process_lock);
1844
1845         return 0;
1846 }
1847
1848 static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
1849                                             const char __user *buf,
1850                                             size_t len, loff_t *off)
1851 {
1852         struct seq_file *seq = file->private_data;
1853         struct ll_sb_info *sbi = seq->private;
1854         struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
1855         struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
1856         __s64 value;
1857
1858         if (len == 0)
1859                 return -EINVAL;
1860
1861         value = ll_stats_pid_write(buf, len);
1862
1863         if (value == 0)
1864                 sbi->ll_rw_stats_on = 0;
1865         else
1866                 sbi->ll_rw_stats_on = 1;
1867
1868         spin_lock(&sbi->ll_process_lock);
1869         sbi->ll_offset_process_count = 0;
1870         sbi->ll_rw_offset_entry_count = 0;
1871         memset(process_info, 0, sizeof(struct ll_rw_process_info) *
1872                LL_PROCESS_HIST_MAX);
1873         memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
1874                LL_OFFSET_HIST_MAX);
1875         spin_unlock(&sbi->ll_process_lock);
1876
1877         return len;
1878 }
1879
1880 /**
1881  * ll_stats_pid_write() - Determine if stats collection should be enabled
1882  * @buf: Buffer containing the data written
1883  * @len: Number of bytes in the buffer
1884  *
1885  * Several proc files begin collecting stats when a value is written, and stop
1886  * collecting when either '0' or 'disable' is written. This function checks the
1887  * written value to see if collection should be enabled or disabled.
1888  *
1889  * Return: If '0' or 'disable' is provided, 0 is returned. If the text
1890  * equivalent of a number is written, that number is returned. Otherwise,
1891  * 1 is returned. Non-zero return values indicate collection should be enabled.
1892  */
1893 static __s64 ll_stats_pid_write(const char __user *buf, size_t len)
1894 {
1895         unsigned long long value = 1;
1896         int rc;
1897         char kernbuf[16];
1898
1899         rc = kstrtoull_from_user(buf, len, 0, &value);
1900         if (rc < 0 && len < sizeof(kernbuf)) {
1901
1902                 if (copy_from_user(kernbuf, buf, len))
1903                         return -EFAULT;
1904                 kernbuf[len] = 0;
1905
1906                 if (kernbuf[len - 1] == '\n')
1907                         kernbuf[len - 1] = 0;
1908
1909                 if (strncasecmp(kernbuf, "disable", 7) == 0)
1910                         value = 0;
1911         }
1912
1913         return value;
1914 }
1915
1916 LPROC_SEQ_FOPS(ll_rw_offset_stats);
1917 #endif /* CONFIG_PROC_FS */