Whamcloud - gitweb
beefbc5b163533926159f759172bed8ed304bde2
[fs/lustre-release.git] / lustre / llite / lproc_llite.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32 #define DEBUG_SUBSYSTEM S_LLITE
33
34 #include <linux/version.h>
35 #include <linux/user_namespace.h>
36 #ifdef HAVE_UIDGID_HEADER
37 # include <linux/uidgid.h>
38 #endif
39 #include <uapi/linux/lustre/lustre_param.h>
40 #include <lprocfs_status.h>
41 #include <obd_support.h>
42
43 #include "llite_internal.h"
44 #include "vvp_internal.h"
45
46 struct proc_dir_entry *proc_lustre_fs_root;
47 static struct kobject *llite_kobj;
48
49 int llite_tunables_register(void)
50 {
51         int rc = 0;
52
53         proc_lustre_fs_root = lprocfs_register("llite", proc_lustre_root,
54                                                NULL, NULL);
55         if (IS_ERR(proc_lustre_fs_root)) {
56                 rc = PTR_ERR(proc_lustre_fs_root);
57                 CERROR("cannot register '/proc/fs/lustre/llite': rc = %d\n",
58                        rc);
59                 proc_lustre_fs_root = NULL;
60                 return rc;
61         }
62
63         llite_kobj = class_setup_tunables("llite");
64         if (IS_ERR(llite_kobj)) {
65                 rc = PTR_ERR(llite_kobj);
66                 llite_kobj = NULL;
67         }
68
69         return rc;
70 }
71
72 void llite_tunables_unregister(void)
73 {
74         if (llite_kobj)
75                 kobject_put(llite_kobj);
76
77         lprocfs_remove(&proc_lustre_fs_root);
78 }
79
80 #ifdef CONFIG_PROC_FS
81 /* /proc/lustre/llite mount point registration */
82 static const struct file_operations ll_rw_extents_stats_fops;
83 static const struct file_operations ll_rw_extents_stats_pp_fops;
84 static const struct file_operations ll_rw_offset_stats_fops;
85 static __s64 ll_stats_pid_write(const char __user *buf, size_t len);
86
87 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
88                               char *buf)
89 {
90         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
91                                               ll_kset.kobj);
92         struct obd_statfs osfs;
93         int rc;
94
95         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
96         if (rc)
97                 return rc;
98
99         return sprintf(buf, "%u\n", osfs.os_bsize);
100 }
101 LUSTRE_RO_ATTR(blocksize);
102
103 static int ll_stat_blksize_seq_show(struct seq_file *m, void *v)
104 {
105         struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)m->private);
106
107         seq_printf(m, "%u\n", sbi->ll_stat_blksize);
108
109         return 0;
110 }
111
112 static ssize_t ll_stat_blksize_seq_write(struct file *file,
113                                          const char __user *buffer,
114                                          size_t count, loff_t *off)
115 {
116         struct seq_file *m = file->private_data;
117         struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)m->private);
118         unsigned int val;
119         int rc;
120
121         rc = kstrtouint_from_user(buffer, count, 0, &val);
122         if (rc)
123                 return rc;
124
125         if (val != 0 && (val < PAGE_SIZE || (val & (val - 1))) != 0)
126                 return -ERANGE;
127
128         sbi->ll_stat_blksize = val;
129
130         return count;
131 }
132 LPROC_SEQ_FOPS(ll_stat_blksize);
133
134 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
135                                 char *buf)
136 {
137         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
138                                               ll_kset.kobj);
139         struct obd_statfs osfs;
140         u32 blk_size;
141         u64 result;
142         int rc;
143
144         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
145         if (rc)
146                 return rc;
147
148         blk_size = osfs.os_bsize >> 10;
149         result = osfs.os_blocks;
150
151         while (blk_size >>= 1)
152                 result <<= 1;
153
154         return sprintf(buf, "%llu\n", result);
155 }
156 LUSTRE_RO_ATTR(kbytestotal);
157
158 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
159                                char *buf)
160 {
161         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
162                                               ll_kset.kobj);
163         struct obd_statfs osfs;
164         u32 blk_size;
165         u64 result;
166         int rc;
167
168         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
169         if (rc)
170                 return rc;
171
172         blk_size = osfs.os_bsize >> 10;
173         result = osfs.os_bfree;
174
175         while (blk_size >>= 1)
176                 result <<= 1;
177
178         return sprintf(buf, "%llu\n", result);
179 }
180 LUSTRE_RO_ATTR(kbytesfree);
181
182 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
183                                 char *buf)
184 {
185         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
186                                               ll_kset.kobj);
187         struct obd_statfs osfs;
188         u32 blk_size;
189         u64 result;
190         int rc;
191
192         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
193         if (rc)
194                 return rc;
195
196         blk_size = osfs.os_bsize >> 10;
197         result = osfs.os_bavail;
198
199         while (blk_size >>= 1)
200                 result <<= 1;
201
202         return sprintf(buf, "%llu\n", result);
203 }
204 LUSTRE_RO_ATTR(kbytesavail);
205
206 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
207                                char *buf)
208 {
209         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
210                                               ll_kset.kobj);
211         struct obd_statfs osfs;
212         int rc;
213
214         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
215         if (rc)
216                 return rc;
217
218         return sprintf(buf, "%llu\n", osfs.os_files);
219 }
220 LUSTRE_RO_ATTR(filestotal);
221
222 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
223                               char *buf)
224 {
225         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
226                                               ll_kset.kobj);
227         struct obd_statfs osfs;
228         int rc;
229
230         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
231         if (rc)
232                 return rc;
233
234         return sprintf(buf, "%llu\n", osfs.os_ffree);
235 }
236 LUSTRE_RO_ATTR(filesfree);
237
238 static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
239                                 char *buf)
240 {
241         return sprintf(buf, "local client\n");
242 }
243 LUSTRE_RO_ATTR(client_type);
244
245 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
246                            char *buf)
247 {
248         return sprintf(buf, "lustre\n");
249 }
250 LUSTRE_RO_ATTR(fstype);
251
252 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
253                          char *buf)
254 {
255         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
256                                               ll_kset.kobj);
257
258         return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
259 }
260 LUSTRE_RO_ATTR(uuid);
261
262 static int ll_site_stats_seq_show(struct seq_file *m, void *v)
263 {
264         struct super_block *sb = m->private;
265
266         /*
267          * See description of statistical counters in struct cl_site, and
268          * struct lu_site.
269          */
270         return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
271 }
272 LPROC_SEQ_FOPS_RO(ll_site_stats);
273
274 static int ll_max_readahead_mb_seq_show(struct seq_file *m, void *v)
275 {
276         struct super_block *sb = m->private;
277         struct ll_sb_info *sbi = ll_s2sbi(sb);
278         long pages_number;
279         int mult;
280
281         spin_lock(&sbi->ll_lock);
282         pages_number = sbi->ll_ra_info.ra_max_pages;
283         spin_unlock(&sbi->ll_lock);
284
285         mult = 1 << (20 - PAGE_SHIFT);
286         return lprocfs_seq_read_frac_helper(m, pages_number, mult);
287 }
288
289 static ssize_t
290 ll_max_readahead_mb_seq_write(struct file *file, const char __user *buffer,
291                               size_t count, loff_t *off)
292 {
293         struct seq_file *m = file->private_data;
294         struct super_block *sb = m->private;
295         struct ll_sb_info *sbi = ll_s2sbi(sb);
296         __s64 pages_number;
297         int rc;
298
299         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
300         if (rc)
301                 return rc;
302
303         pages_number >>= PAGE_SHIFT;
304
305         if (pages_number < 0 || pages_number > totalram_pages / 2) {
306                 /* 1/2 of RAM */
307                 CERROR("%s: can't set max_readahead_mb=%lu > %luMB\n",
308                        ll_get_fsname(sb, NULL, 0),
309                        (unsigned long)pages_number >> (20 - PAGE_SHIFT),
310                        totalram_pages >> (20 - PAGE_SHIFT + 1));
311                 return -ERANGE;
312         }
313
314         spin_lock(&sbi->ll_lock);
315         sbi->ll_ra_info.ra_max_pages = pages_number;
316         spin_unlock(&sbi->ll_lock);
317         return count;
318 }
319 LPROC_SEQ_FOPS(ll_max_readahead_mb);
320
321 static int ll_max_readahead_per_file_mb_seq_show(struct seq_file *m, void *v)
322 {
323         struct super_block *sb = m->private;
324         struct ll_sb_info *sbi = ll_s2sbi(sb);
325         long pages_number;
326         int mult;
327
328         spin_lock(&sbi->ll_lock);
329         pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
330         spin_unlock(&sbi->ll_lock);
331
332         mult = 1 << (20 - PAGE_SHIFT);
333         return lprocfs_seq_read_frac_helper(m, pages_number, mult);
334 }
335
336 static ssize_t
337 ll_max_readahead_per_file_mb_seq_write(struct file *file,
338                                        const char __user *buffer,
339                                        size_t count, loff_t *off)
340 {
341         struct seq_file *m = file->private_data;
342         struct super_block *sb = m->private;
343         struct ll_sb_info *sbi = ll_s2sbi(sb);
344         int rc;
345         __s64 pages_number;
346
347         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
348         if (rc)
349                 return rc;
350
351         pages_number >>= PAGE_SHIFT;
352
353         if (pages_number < 0 || pages_number > sbi->ll_ra_info.ra_max_pages) {
354                 CERROR("%s: can't set max_readahead_per_file_mb=%lu > "
355                        "max_read_ahead_mb=%lu\n", ll_get_fsname(sb, NULL, 0),
356                        (unsigned long)pages_number >> (20 - PAGE_SHIFT),
357                        sbi->ll_ra_info.ra_max_pages >> (20 - PAGE_SHIFT));
358                 return -ERANGE;
359         }
360
361         spin_lock(&sbi->ll_lock);
362         sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
363         spin_unlock(&sbi->ll_lock);
364         return count;
365 }
366 LPROC_SEQ_FOPS(ll_max_readahead_per_file_mb);
367
368 static int ll_max_read_ahead_whole_mb_seq_show(struct seq_file *m, void *v)
369 {
370         struct super_block *sb = m->private;
371         struct ll_sb_info *sbi = ll_s2sbi(sb);
372         long pages_number;
373         int mult;
374
375         spin_lock(&sbi->ll_lock);
376         pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
377         spin_unlock(&sbi->ll_lock);
378
379         mult = 1 << (20 - PAGE_SHIFT);
380         return lprocfs_seq_read_frac_helper(m, pages_number, mult);
381 }
382
383 static ssize_t
384 ll_max_read_ahead_whole_mb_seq_write(struct file *file,
385                                      const char __user *buffer,
386                                      size_t count, loff_t *off)
387 {
388         struct seq_file *m = file->private_data;
389         struct super_block *sb = m->private;
390         struct ll_sb_info *sbi = ll_s2sbi(sb);
391         int rc;
392         __s64 pages_number;
393
394         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
395         if (rc)
396                 return rc;
397
398         pages_number >>= PAGE_SHIFT;
399
400         /* Cap this at the current max readahead window size, the readahead
401          * algorithm does this anyway so it's pointless to set it larger. */
402         if (pages_number < 0 ||
403             pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
404                 int pages_shift = 20 - PAGE_SHIFT;
405                 CERROR("%s: can't set max_read_ahead_whole_mb=%lu > "
406                        "max_read_ahead_per_file_mb=%lu\n",
407                        ll_get_fsname(sb, NULL, 0),
408                        (unsigned long)pages_number >> pages_shift,
409                        sbi->ll_ra_info.ra_max_pages_per_file >> pages_shift);
410                 return -ERANGE;
411         }
412
413         spin_lock(&sbi->ll_lock);
414         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
415         spin_unlock(&sbi->ll_lock);
416         return count;
417 }
418 LPROC_SEQ_FOPS(ll_max_read_ahead_whole_mb);
419
420 static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
421 {
422         struct super_block     *sb    = m->private;
423         struct ll_sb_info      *sbi   = ll_s2sbi(sb);
424         struct cl_client_cache *cache = sbi->ll_cache;
425         int shift = 20 - PAGE_SHIFT;
426         long max_cached_mb;
427         long unused_mb;
428
429         max_cached_mb = cache->ccc_lru_max >> shift;
430         unused_mb = atomic_long_read(&cache->ccc_lru_left) >> shift;
431         seq_printf(m, "users: %d\n"
432                    "max_cached_mb: %ld\n"
433                    "used_mb: %ld\n"
434                    "unused_mb: %ld\n"
435                    "reclaim_count: %u\n",
436                    atomic_read(&cache->ccc_users),
437                    max_cached_mb,
438                    max_cached_mb - unused_mb,
439                    unused_mb,
440                    cache->ccc_lru_shrinkers);
441         return 0;
442 }
443
444 static ssize_t
445 ll_max_cached_mb_seq_write(struct file *file, const char __user *buffer,
446                            size_t count, loff_t *off)
447 {
448         struct seq_file *m = file->private_data;
449         struct super_block *sb = m->private;
450         struct ll_sb_info *sbi = ll_s2sbi(sb);
451         struct cl_client_cache *cache = sbi->ll_cache;
452         struct lu_env *env;
453         long diff = 0;
454         long nrpages = 0;
455         __u16 refcheck;
456         __s64 pages_number;
457         long rc;
458         char kernbuf[128];
459         ENTRY;
460
461         if (count >= sizeof(kernbuf))
462                 RETURN(-EINVAL);
463
464         if (copy_from_user(kernbuf, buffer, count))
465                 RETURN(-EFAULT);
466         kernbuf[count] = 0;
467
468         buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
469                   kernbuf;
470         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
471         if (rc)
472                 RETURN(rc);
473
474         pages_number >>= PAGE_SHIFT;
475
476         if (pages_number < 0 || pages_number > totalram_pages) {
477                 CERROR("%s: can't set max cache more than %lu MB\n",
478                        ll_get_fsname(sb, NULL, 0),
479                        totalram_pages >> (20 - PAGE_SHIFT));
480                 RETURN(-ERANGE);
481         }
482         /* Allow enough cache so clients can make well-formed RPCs */
483         pages_number = max_t(long, pages_number, PTLRPC_MAX_BRW_PAGES);
484
485         spin_lock(&sbi->ll_lock);
486         diff = pages_number - cache->ccc_lru_max;
487         spin_unlock(&sbi->ll_lock);
488
489         /* easy - add more LRU slots. */
490         if (diff >= 0) {
491                 atomic_long_add(diff, &cache->ccc_lru_left);
492                 GOTO(out, rc = 0);
493         }
494
495         env = cl_env_get(&refcheck);
496         if (IS_ERR(env))
497                 RETURN(rc);
498
499         diff = -diff;
500         while (diff > 0) {
501                 long tmp;
502
503                 /* reduce LRU budget from free slots. */
504                 do {
505                         long ov, nv;
506
507                         ov = atomic_long_read(&cache->ccc_lru_left);
508                         if (ov == 0)
509                                 break;
510
511                         nv = ov > diff ? ov - diff : 0;
512                         rc = atomic_long_cmpxchg(&cache->ccc_lru_left, ov, nv);
513                         if (likely(ov == rc)) {
514                                 diff -= ov - nv;
515                                 nrpages += ov - nv;
516                                 break;
517                         }
518                 } while (1);
519
520                 if (diff <= 0)
521                         break;
522
523                 if (sbi->ll_dt_exp == NULL) { /* being initialized */
524                         rc = -ENODEV;
525                         break;
526                 }
527
528                 /* difficult - have to ask OSCs to drop LRU slots. */
529                 tmp = diff << 1;
530                 rc = obd_set_info_async(env, sbi->ll_dt_exp,
531                                 sizeof(KEY_CACHE_LRU_SHRINK),
532                                 KEY_CACHE_LRU_SHRINK,
533                                 sizeof(tmp), &tmp, NULL);
534                 if (rc < 0)
535                         break;
536         }
537         cl_env_put(env, &refcheck);
538
539 out:
540         if (rc >= 0) {
541                 spin_lock(&sbi->ll_lock);
542                 cache->ccc_lru_max = pages_number;
543                 spin_unlock(&sbi->ll_lock);
544                 rc = count;
545         } else {
546                 atomic_long_add(nrpages, &cache->ccc_lru_left);
547         }
548         return rc;
549 }
550 LPROC_SEQ_FOPS(ll_max_cached_mb);
551
552 static ssize_t checksums_show(struct kobject *kobj, struct attribute *attr,
553                               char *buf)
554 {
555         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
556                                               ll_kset.kobj);
557
558         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
559 }
560
561 static ssize_t checksums_store(struct kobject *kobj, struct attribute *attr,
562                                const char *buffer, size_t count)
563 {
564         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
565                                               ll_kset.kobj);
566         bool val;
567         int tmp;
568         int rc;
569
570         if (!sbi->ll_dt_exp)
571                 /* Not set up yet */
572                 return -EAGAIN;
573
574         rc = kstrtobool(buffer, &val);
575         if (rc)
576                 return rc;
577         if (val)
578                 sbi->ll_flags |= LL_SBI_CHECKSUM;
579         else
580                 sbi->ll_flags &= ~LL_SBI_CHECKSUM;
581         tmp = val;
582
583         rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
584                                 KEY_CHECKSUM, sizeof(tmp), &tmp, NULL);
585         if (rc)
586                 CWARN("Failed to set OSC checksum flags: %d\n", rc);
587
588         return count;
589 }
590 LUSTRE_RW_ATTR(checksums);
591
592 LUSTRE_ATTR(checksum_pages, 0644, checksums_show, checksums_store);
593
594 static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
595                               enum stats_track_type type)
596 {
597         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
598                                               ll_kset.kobj);
599
600         if (sbi->ll_stats_track_type == type)
601                 return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
602         else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
603                 return sprintf(buf, "0 (all)\n");
604
605         return sprintf(buf, "untracked\n");
606 }
607
608 static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
609                               size_t count, enum stats_track_type type)
610 {
611         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
612                                               ll_kset.kobj);
613         unsigned long pid;
614         int rc;
615
616         rc = kstrtoul(buffer, 10, &pid);
617         if (rc)
618                 return rc;
619
620         sbi->ll_stats_track_id = pid;
621         if (pid == 0)
622                 sbi->ll_stats_track_type = STATS_TRACK_ALL;
623         else
624                 sbi->ll_stats_track_type = type;
625         lprocfs_clear_stats(sbi->ll_stats);
626         return count;
627 }
628
629 static ssize_t stats_track_pid_show(struct kobject *kobj,
630                                     struct attribute *attr,
631                                     char *buf)
632 {
633         return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
634 }
635
636 static ssize_t stats_track_pid_store(struct kobject *kobj,
637                                      struct attribute *attr,
638                                      const char *buffer,
639                                      size_t count)
640 {
641         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
642 }
643 LUSTRE_RW_ATTR(stats_track_pid);
644
645 static ssize_t stats_track_ppid_show(struct kobject *kobj,
646                                      struct attribute *attr,
647                                      char *buf)
648 {
649         return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
650 }
651
652 static ssize_t stats_track_ppid_store(struct kobject *kobj,
653                                       struct attribute *attr,
654                                       const char *buffer,
655                                       size_t count)
656 {
657         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
658 }
659 LUSTRE_RW_ATTR(stats_track_ppid);
660
661 static ssize_t stats_track_gid_show(struct kobject *kobj,
662                                     struct attribute *attr,
663                                     char *buf)
664 {
665         return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
666 }
667
668 static ssize_t stats_track_gid_store(struct kobject *kobj,
669                                      struct attribute *attr,
670                                      const char *buffer,
671                                      size_t count)
672 {
673         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
674 }
675 LUSTRE_RW_ATTR(stats_track_gid);
676
677 static ssize_t statahead_running_max_show(struct kobject *kobj,
678                                           struct attribute *attr,
679                                           char *buf)
680 {
681         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
682                                               ll_kset.kobj);
683
684         return snprintf(buf, 16, "%u\n", sbi->ll_sa_running_max);
685 }
686
687 static ssize_t statahead_running_max_store(struct kobject *kobj,
688                                            struct attribute *attr,
689                                            const char *buffer,
690                                            size_t count)
691 {
692         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
693                                               ll_kset.kobj);
694         unsigned long val;
695         int rc;
696
697         rc = kstrtoul(buffer, 0, &val);
698         if (rc)
699                 return rc;
700
701         if (val <= LL_SA_RUNNING_MAX) {
702                 sbi->ll_sa_running_max = val;
703                 return count;
704         }
705
706         CERROR("Bad statahead_running_max value %lu. Valid values "
707                "are in the range [0, %d]\n", val, LL_SA_RUNNING_MAX);
708
709         return -ERANGE;
710 }
711 LUSTRE_RW_ATTR(statahead_running_max);
712
713 static ssize_t statahead_max_show(struct kobject *kobj,
714                                   struct attribute *attr,
715                                   char *buf)
716 {
717         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
718                                               ll_kset.kobj);
719
720         return sprintf(buf, "%u\n", sbi->ll_sa_max);
721 }
722
723 static ssize_t statahead_max_store(struct kobject *kobj,
724                                    struct attribute *attr,
725                                    const char *buffer,
726                                    size_t count)
727 {
728         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
729                                               ll_kset.kobj);
730         unsigned long val;
731         int rc;
732
733         rc = kstrtoul(buffer, 0, &val);
734         if (rc)
735                 return rc;
736
737         if (val <= LL_SA_RPC_MAX)
738                 sbi->ll_sa_max = val;
739         else
740                 CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
741                        val, LL_SA_RPC_MAX);
742
743         return count;
744 }
745 LUSTRE_RW_ATTR(statahead_max);
746
747 static ssize_t statahead_agl_show(struct kobject *kobj,
748                                   struct attribute *attr,
749                                   char *buf)
750 {
751         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
752                                               ll_kset.kobj);
753
754         return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
755 }
756
757 static ssize_t statahead_agl_store(struct kobject *kobj,
758                                    struct attribute *attr,
759                                    const char *buffer,
760                                    size_t count)
761 {
762         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
763                                               ll_kset.kobj);
764         bool val;
765         int rc;
766
767         rc = kstrtobool(buffer, &val);
768         if (rc)
769                 return rc;
770
771         if (val)
772                 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
773         else
774                 sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
775
776         return count;
777 }
778 LUSTRE_RW_ATTR(statahead_agl);
779
780 static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
781 {
782         struct super_block *sb = m->private;
783         struct ll_sb_info *sbi = ll_s2sbi(sb);
784
785         seq_printf(m, "statahead total: %u\n"
786                     "statahead wrong: %u\n"
787                     "agl total: %u\n",
788                     atomic_read(&sbi->ll_sa_total),
789                     atomic_read(&sbi->ll_sa_wrong),
790                     atomic_read(&sbi->ll_agl_total));
791         return 0;
792 }
793 LPROC_SEQ_FOPS_RO(ll_statahead_stats);
794
795 static ssize_t lazystatfs_show(struct kobject *kobj,
796                                struct attribute *attr,
797                                char *buf)
798 {
799         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
800                                               ll_kset.kobj);
801
802         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_LAZYSTATFS) ? 1 : 0);
803 }
804
805 static ssize_t lazystatfs_store(struct kobject *kobj,
806                                 struct attribute *attr,
807                                 const char *buffer,
808                                 size_t count)
809 {
810         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
811                                               ll_kset.kobj);
812         bool val;
813         int rc;
814
815         rc = kstrtobool(buffer, &val);
816         if (rc)
817                 return rc;
818
819         if (val)
820                 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
821         else
822                 sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
823
824         return count;
825 }
826 LUSTRE_RW_ATTR(lazystatfs);
827
828 static ssize_t max_easize_show(struct kobject *kobj,
829                                struct attribute *attr,
830                                char *buf)
831 {
832         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
833                                               ll_kset.kobj);
834         unsigned int ealen;
835         int rc;
836
837         rc = ll_get_max_mdsize(sbi, &ealen);
838         if (rc)
839                 return rc;
840
841         return sprintf(buf, "%u\n", ealen);
842 }
843 LUSTRE_RO_ATTR(max_easize);
844
845 /**
846  * Get default_easize.
847  *
848  * \see client_obd::cl_default_mds_easize
849  *
850  * \param[in] m         seq_file handle
851  * \param[in] v         unused for single entry
852  *
853  * \retval 0            on success
854  * \retval negative     negated errno on failure
855  */
856 static ssize_t default_easize_show(struct kobject *kobj,
857                                    struct attribute *attr,
858                                    char *buf)
859 {
860         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
861                                               ll_kset.kobj);
862         unsigned int ealen;
863         int rc;
864
865         rc = ll_get_default_mdsize(sbi, &ealen);
866         if (rc)
867                 return rc;
868
869         return sprintf(buf, "%u\n", ealen);
870 }
871
872 /**
873  * Set default_easize.
874  *
875  * Range checking on the passed value is handled by
876  * ll_set_default_mdsize().
877  *
878  * \see client_obd::cl_default_mds_easize
879  *
880  * \param[in] file      proc file
881  * \param[in] buffer    string passed from user space
882  * \param[in] count     \a buffer length
883  * \param[in] off       unused for single entry
884  *
885  * \retval positive     \a count on success
886  * \retval negative     negated errno on failure
887  */
888 static ssize_t default_easize_store(struct kobject *kobj,
889                                     struct attribute *attr,
890                                     const char *buffer,
891                                     size_t count)
892 {
893         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
894                                               ll_kset.kobj);
895         unsigned int val;
896         int rc;
897
898         if (count == 0)
899                 return 0;
900
901         rc = kstrtouint(buffer, 10, &val);
902         if (rc)
903                 return rc;
904
905         rc = ll_set_default_mdsize(sbi, val);
906         if (rc)
907                 return rc;
908
909         return count;
910 }
911 LUSTRE_RW_ATTR(default_easize);
912
913 static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
914 {
915         const char *str[] = LL_SBI_FLAGS;
916         struct super_block *sb = m->private;
917         int flags = ll_s2sbi(sb)->ll_flags;
918         int i = 0;
919
920         while (flags != 0) {
921                 if (ARRAY_SIZE(str) <= i) {
922                         CERROR("%s: Revise array LL_SBI_FLAGS to match sbi "
923                                 "flags please.\n", ll_get_fsname(sb, NULL, 0));
924                         return -EINVAL;
925                 }
926
927                 if (flags & 0x1)
928                         seq_printf(m, "%s ", str[i]);
929                 flags >>= 1;
930                 ++i;
931         }
932         seq_printf(m, "\b\n");
933         return 0;
934 }
935 LPROC_SEQ_FOPS_RO(ll_sbi_flags);
936
937 static ssize_t xattr_cache_show(struct kobject *kobj,
938                                 struct attribute *attr,
939                                 char *buf)
940 {
941         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
942                                               ll_kset.kobj);
943
944         return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled);
945 }
946
947 static ssize_t xattr_cache_store(struct kobject *kobj,
948                                  struct attribute *attr,
949                                  const char *buffer,
950                                  size_t count)
951 {
952         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
953                                               ll_kset.kobj);
954         bool val;
955         int rc;
956
957         rc = kstrtobool(buffer, &val);
958         if (rc)
959                 return rc;
960
961         if (val && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
962                 return -ENOTSUPP;
963
964         sbi->ll_xattr_cache_enabled = val;
965         sbi->ll_xattr_cache_set = 1;
966
967         return count;
968 }
969 LUSTRE_RW_ATTR(xattr_cache);
970
971 static int ll_tiny_write_seq_show(struct seq_file *m, void *v)
972 {
973         struct super_block *sb = m->private;
974         struct ll_sb_info *sbi = ll_s2sbi(sb);
975
976         seq_printf(m, "%u\n", !!(sbi->ll_flags & LL_SBI_TINY_WRITE));
977         return 0;
978 }
979
980 static ssize_t ll_tiny_write_seq_write(
981         struct file *file, const char __user *buffer, size_t count, loff_t *off)
982 {
983         struct seq_file *m = file->private_data;
984         struct super_block *sb = m->private;
985         struct ll_sb_info *sbi = ll_s2sbi(sb);
986         bool val;
987         int rc;
988
989         rc = kstrtobool_from_user(buffer, count, &val);
990         if (rc)
991                 return rc;
992
993         spin_lock(&sbi->ll_lock);
994         if (val)
995                 sbi->ll_flags |= LL_SBI_TINY_WRITE;
996         else
997                 sbi->ll_flags &= ~LL_SBI_TINY_WRITE;
998         spin_unlock(&sbi->ll_lock);
999
1000         return count;
1001 }
1002 LPROC_SEQ_FOPS(ll_tiny_write);
1003
1004 static int ll_fast_read_seq_show(struct seq_file *m, void *v)
1005 {
1006         struct super_block *sb = m->private;
1007         struct ll_sb_info *sbi = ll_s2sbi(sb);
1008
1009         seq_printf(m, "%u\n", !!(sbi->ll_flags & LL_SBI_FAST_READ));
1010         return 0;
1011 }
1012
1013 static ssize_t
1014 ll_fast_read_seq_write(struct file *file, const char __user *buffer,
1015                        size_t count, loff_t *off)
1016 {
1017         struct seq_file *m = file->private_data;
1018         struct super_block *sb = m->private;
1019         struct ll_sb_info *sbi = ll_s2sbi(sb);
1020         bool val;
1021         int rc;
1022
1023         rc = kstrtobool_from_user(buffer, count, &val);
1024         if (rc)
1025                 return rc;
1026
1027         spin_lock(&sbi->ll_lock);
1028         if (val)
1029                 sbi->ll_flags |= LL_SBI_FAST_READ;
1030         else
1031                 sbi->ll_flags &= ~LL_SBI_FAST_READ;
1032         spin_unlock(&sbi->ll_lock);
1033
1034         return count;
1035 }
1036 LPROC_SEQ_FOPS(ll_fast_read);
1037
1038 static int ll_pio_seq_show(struct seq_file *m, void *v)
1039 {
1040         struct super_block *sb = m->private;
1041         struct ll_sb_info *sbi = ll_s2sbi(sb);
1042
1043         seq_printf(m, "%u\n", !!(sbi->ll_flags & LL_SBI_PIO));
1044         return 0;
1045 }
1046
1047 static ssize_t ll_pio_seq_write(struct file *file, const char __user *buffer,
1048                                 size_t count, loff_t *off)
1049 {
1050         struct seq_file *m = file->private_data;
1051         struct super_block *sb = m->private;
1052         struct ll_sb_info *sbi = ll_s2sbi(sb);
1053         bool val;
1054         int rc;
1055
1056         rc = kstrtobool_from_user(buffer, count, &val);
1057         if (rc)
1058                 return rc;
1059
1060         spin_lock(&sbi->ll_lock);
1061         if (val)
1062                 sbi->ll_flags |= LL_SBI_PIO;
1063         else
1064                 sbi->ll_flags &= ~LL_SBI_PIO;
1065         spin_unlock(&sbi->ll_lock);
1066
1067         return count;
1068 }
1069 LPROC_SEQ_FOPS(ll_pio);
1070
1071 static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
1072 {
1073         struct super_block      *sb    = m->private;
1074         struct ll_sb_info       *sbi   = ll_s2sbi(sb);
1075         struct cl_client_cache  *cache = sbi->ll_cache;
1076         long pages;
1077         int mb;
1078
1079         pages = atomic_long_read(&cache->ccc_unstable_nr);
1080         mb    = (pages * PAGE_SIZE) >> 20;
1081
1082         seq_printf(m, "unstable_check:     %8d\n"
1083                    "unstable_pages: %12ld\n"
1084                    "unstable_mb:        %8d\n",
1085                    cache->ccc_unstable_check, pages, mb);
1086         return 0;
1087 }
1088
1089 static ssize_t ll_unstable_stats_seq_write(struct file *file,
1090                                            const char __user *buffer,
1091                                            size_t count, loff_t *unused)
1092 {
1093         struct seq_file *seq = file->private_data;
1094         struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)seq->private);
1095         char kernbuf[128];
1096         bool val;
1097         int rc;
1098
1099         if (count == 0)
1100                 return 0;
1101         if (count >= sizeof(kernbuf))
1102                 return -EINVAL;
1103
1104         if (copy_from_user(kernbuf, buffer, count))
1105                 return -EFAULT;
1106         kernbuf[count] = 0;
1107
1108         buffer += lprocfs_find_named_value(kernbuf, "unstable_check:", &count) -
1109                   kernbuf;
1110         rc = kstrtobool_from_user(buffer, count, &val);
1111         if (rc < 0)
1112                 return rc;
1113
1114         /* borrow lru lock to set the value */
1115         spin_lock(&sbi->ll_cache->ccc_lru_lock);
1116         sbi->ll_cache->ccc_unstable_check = val;
1117         spin_unlock(&sbi->ll_cache->ccc_lru_lock);
1118
1119         return count;
1120 }
1121 LPROC_SEQ_FOPS(ll_unstable_stats);
1122
1123 static int ll_root_squash_seq_show(struct seq_file *m, void *v)
1124 {
1125         struct super_block *sb = m->private;
1126         struct ll_sb_info *sbi = ll_s2sbi(sb);
1127         struct root_squash_info *squash = &sbi->ll_squash;
1128
1129         seq_printf(m, "%u:%u\n", squash->rsi_uid, squash->rsi_gid);
1130         return 0;
1131 }
1132
1133 static ssize_t ll_root_squash_seq_write(struct file *file,
1134                                         const char __user *buffer,
1135                                         size_t count, loff_t *off)
1136 {
1137         struct seq_file *m = file->private_data;
1138         struct super_block *sb = m->private;
1139         struct ll_sb_info *sbi = ll_s2sbi(sb);
1140         struct root_squash_info *squash = &sbi->ll_squash;
1141
1142         return lprocfs_wr_root_squash(buffer, count, squash,
1143                                       ll_get_fsname(sb, NULL, 0));
1144 }
1145 LPROC_SEQ_FOPS(ll_root_squash);
1146
1147 static int ll_nosquash_nids_seq_show(struct seq_file *m, void *v)
1148 {
1149         struct super_block *sb = m->private;
1150         struct ll_sb_info *sbi = ll_s2sbi(sb);
1151         struct root_squash_info *squash = &sbi->ll_squash;
1152         int len;
1153
1154         down_read(&squash->rsi_sem);
1155         if (!list_empty(&squash->rsi_nosquash_nids)) {
1156                 len = cfs_print_nidlist(m->buf + m->count, m->size - m->count,
1157                                         &squash->rsi_nosquash_nids);
1158                 m->count += len;
1159                 seq_putc(m, '\n');
1160         } else {
1161                 seq_puts(m, "NONE\n");
1162         }
1163         up_read(&squash->rsi_sem);
1164
1165         return 0;
1166 }
1167
1168 static ssize_t ll_nosquash_nids_seq_write(struct file *file,
1169                                           const char __user *buffer,
1170                                           size_t count, loff_t *off)
1171 {
1172         struct seq_file *m = file->private_data;
1173         struct super_block *sb = m->private;
1174         struct ll_sb_info *sbi = ll_s2sbi(sb);
1175         struct root_squash_info *squash = &sbi->ll_squash;
1176         int rc;
1177
1178         rc = lprocfs_wr_nosquash_nids(buffer, count, squash,
1179                                       ll_get_fsname(sb, NULL, 0));
1180         if (rc < 0)
1181                 return rc;
1182
1183         ll_compute_rootsquash_state(sbi);
1184
1185         return rc;
1186 }
1187 LPROC_SEQ_FOPS(ll_nosquash_nids);
1188
1189 struct lprocfs_vars lprocfs_llite_obd_vars[] = {
1190         { .name =       "site",
1191           .fops =       &ll_site_stats_fops                     },
1192         { .name =       "stat_blocksize",
1193           .fops =       &ll_stat_blksize_fops                   },
1194         { .name =       "max_read_ahead_mb",
1195           .fops =       &ll_max_readahead_mb_fops               },
1196         { .name =       "max_read_ahead_per_file_mb",
1197           .fops =       &ll_max_readahead_per_file_mb_fops      },
1198         { .name =       "max_read_ahead_whole_mb",
1199           .fops =       &ll_max_read_ahead_whole_mb_fops        },
1200         { .name =       "max_cached_mb",
1201           .fops =       &ll_max_cached_mb_fops                  },
1202         { .name =       "statahead_stats",
1203           .fops =       &ll_statahead_stats_fops                },
1204         { .name =       "sbi_flags",
1205           .fops =       &ll_sbi_flags_fops                      },
1206         { .name =       "unstable_stats",
1207           .fops =       &ll_unstable_stats_fops                 },
1208         { .name =       "root_squash",
1209           .fops =       &ll_root_squash_fops                    },
1210         { .name =       "nosquash_nids",
1211           .fops =       &ll_nosquash_nids_fops                  },
1212         { .name =       "fast_read",
1213           .fops =       &ll_fast_read_fops,                     },
1214         { .name =       "pio",
1215           .fops =       &ll_pio_fops,                           },
1216         { .name =       "tiny_write",
1217           .fops =       &ll_tiny_write_fops,                    },
1218         { NULL }
1219 };
1220
1221 #define MAX_STRING_SIZE 128
1222
1223 static struct attribute *llite_attrs[] = {
1224         &lustre_attr_blocksize.attr,
1225         &lustre_attr_kbytestotal.attr,
1226         &lustre_attr_kbytesfree.attr,
1227         &lustre_attr_kbytesavail.attr,
1228         &lustre_attr_filestotal.attr,
1229         &lustre_attr_filesfree.attr,
1230         &lustre_attr_client_type.attr,
1231         &lustre_attr_fstype.attr,
1232         &lustre_attr_uuid.attr,
1233         &lustre_attr_checksums.attr,
1234         &lustre_attr_checksum_pages.attr,
1235         &lustre_attr_stats_track_pid.attr,
1236         &lustre_attr_stats_track_ppid.attr,
1237         &lustre_attr_stats_track_gid.attr,
1238         &lustre_attr_statahead_running_max.attr,
1239         &lustre_attr_statahead_max.attr,
1240         &lustre_attr_statahead_agl.attr,
1241         &lustre_attr_lazystatfs.attr,
1242         &lustre_attr_max_easize.attr,
1243         &lustre_attr_default_easize.attr,
1244         &lustre_attr_xattr_cache.attr,
1245         NULL,
1246 };
1247
1248 static void llite_kobj_release(struct kobject *kobj)
1249 {
1250         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1251                                               ll_kset.kobj);
1252         complete(&sbi->ll_kobj_unregister);
1253 }
1254
1255 static struct kobj_type llite_ktype = {
1256         .default_attrs  = llite_attrs,
1257         .sysfs_ops      = &lustre_sysfs_ops,
1258         .release        = llite_kobj_release,
1259 };
1260
1261 static const struct llite_file_opcode {
1262         __u32       opcode;
1263         __u32       type;
1264         const char *opname;
1265 } llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
1266         /* file operation */
1267         { LPROC_LL_DIRTY_HITS,     LPROCFS_TYPE_REGS, "dirty_pages_hits" },
1268         { LPROC_LL_DIRTY_MISSES,   LPROCFS_TYPE_REGS, "dirty_pages_misses" },
1269         { LPROC_LL_READ_BYTES,     LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
1270                                    "read_bytes" },
1271         { LPROC_LL_WRITE_BYTES,    LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
1272                                    "write_bytes" },
1273         { LPROC_LL_BRW_READ,       LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
1274                                    "brw_read" },
1275         { LPROC_LL_BRW_WRITE,      LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
1276                                    "brw_write" },
1277         { LPROC_LL_IOCTL,          LPROCFS_TYPE_REGS, "ioctl" },
1278         { LPROC_LL_OPEN,           LPROCFS_TYPE_REGS, "open" },
1279         { LPROC_LL_RELEASE,        LPROCFS_TYPE_REGS, "close" },
1280         { LPROC_LL_MAP,            LPROCFS_TYPE_REGS, "mmap" },
1281         { LPROC_LL_FAULT,          LPROCFS_TYPE_REGS, "page_fault" },
1282         { LPROC_LL_MKWRITE,        LPROCFS_TYPE_REGS, "page_mkwrite" },
1283         { LPROC_LL_LLSEEK,         LPROCFS_TYPE_REGS, "seek" },
1284         { LPROC_LL_FSYNC,          LPROCFS_TYPE_REGS, "fsync" },
1285         { LPROC_LL_READDIR,        LPROCFS_TYPE_REGS, "readdir" },
1286         /* inode operation */
1287         { LPROC_LL_SETATTR,        LPROCFS_TYPE_REGS, "setattr" },
1288         { LPROC_LL_TRUNC,          LPROCFS_TYPE_REGS, "truncate" },
1289         { LPROC_LL_FLOCK,          LPROCFS_TYPE_REGS, "flock" },
1290         { LPROC_LL_GETATTR,        LPROCFS_TYPE_REGS, "getattr" },
1291         /* dir inode operation */
1292         { LPROC_LL_CREATE,         LPROCFS_TYPE_REGS, "create" },
1293         { LPROC_LL_LINK,           LPROCFS_TYPE_REGS, "link" },
1294         { LPROC_LL_UNLINK,         LPROCFS_TYPE_REGS, "unlink" },
1295         { LPROC_LL_SYMLINK,        LPROCFS_TYPE_REGS, "symlink" },
1296         { LPROC_LL_MKDIR,          LPROCFS_TYPE_REGS, "mkdir" },
1297         { LPROC_LL_RMDIR,          LPROCFS_TYPE_REGS, "rmdir" },
1298         { LPROC_LL_MKNOD,          LPROCFS_TYPE_REGS, "mknod" },
1299         { LPROC_LL_RENAME,         LPROCFS_TYPE_REGS, "rename" },
1300         /* special inode operation */
1301         { LPROC_LL_STAFS,          LPROCFS_TYPE_REGS, "statfs" },
1302         { LPROC_LL_ALLOC_INODE,    LPROCFS_TYPE_REGS, "alloc_inode" },
1303         { LPROC_LL_SETXATTR,       LPROCFS_TYPE_REGS, "setxattr" },
1304         { LPROC_LL_GETXATTR,       LPROCFS_TYPE_REGS, "getxattr" },
1305         { LPROC_LL_GETXATTR_HITS,  LPROCFS_TYPE_REGS, "getxattr_hits" },
1306         { LPROC_LL_LISTXATTR,      LPROCFS_TYPE_REGS, "listxattr" },
1307         { LPROC_LL_REMOVEXATTR,    LPROCFS_TYPE_REGS, "removexattr" },
1308         { LPROC_LL_INODE_PERM,     LPROCFS_TYPE_REGS, "inode_permission" },
1309 };
1310
1311 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count)
1312 {
1313         if (!sbi->ll_stats)
1314                 return;
1315         if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
1316                 lprocfs_counter_add(sbi->ll_stats, op, count);
1317         else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
1318                  sbi->ll_stats_track_id == current->pid)
1319                 lprocfs_counter_add(sbi->ll_stats, op, count);
1320         else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
1321                  sbi->ll_stats_track_id == current->parent->pid)
1322                 lprocfs_counter_add(sbi->ll_stats, op, count);
1323         else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
1324                  sbi->ll_stats_track_id ==
1325                         from_kgid(&init_user_ns, current_gid()))
1326                 lprocfs_counter_add(sbi->ll_stats, op, count);
1327 }
1328 EXPORT_SYMBOL(ll_stats_ops_tally);
1329
1330 static const char *ra_stat_string[] = {
1331         [RA_STAT_HIT] = "hits",
1332         [RA_STAT_MISS] = "misses",
1333         [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
1334         [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
1335         [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
1336         [RA_STAT_FAILED_MATCH] = "failed lock match",
1337         [RA_STAT_DISCARDED] = "read but discarded",
1338         [RA_STAT_ZERO_LEN] = "zero length file",
1339         [RA_STAT_ZERO_WINDOW] = "zero size window",
1340         [RA_STAT_EOF] = "read-ahead to EOF",
1341         [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
1342         [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
1343         [RA_STAT_FAILED_REACH_END] = "failed to reach end"
1344 };
1345
1346 LPROC_SEQ_FOPS_RO_TYPE(llite, name);
1347 LPROC_SEQ_FOPS_RO_TYPE(llite, uuid);
1348
1349 int ll_debugfs_register_super(struct super_block *sb, const char *name)
1350 {
1351         struct lustre_sb_info *lsi = s2lsi(sb);
1352         struct ll_sb_info *sbi = ll_s2sbi(sb);
1353         struct lprocfs_vars lvars[2];
1354         int err, id, rc;
1355
1356         ENTRY;
1357         memset(lvars, 0, sizeof(lvars));
1358         lvars[0].name = name;
1359
1360         LASSERT(sbi != NULL);
1361
1362         sbi->ll_proc_root = lprocfs_register(name, proc_lustre_fs_root,
1363                                              NULL, NULL);
1364         if (IS_ERR(sbi->ll_proc_root)) {
1365                 err = PTR_ERR(sbi->ll_proc_root);
1366                 sbi->ll_proc_root = NULL;
1367                 RETURN(err);
1368         }
1369
1370         rc = lprocfs_seq_create(sbi->ll_proc_root, "dump_page_cache", 0444,
1371                                 &vvp_dump_pgcache_file_ops, sbi);
1372         if (rc)
1373                 CWARN("Error adding the dump_page_cache file\n");
1374
1375         rc = lprocfs_seq_create(sbi->ll_proc_root, "extents_stats", 0644,
1376                                 &ll_rw_extents_stats_fops, sbi);
1377         if (rc)
1378                 CWARN("Error adding the extent_stats file\n");
1379
1380         rc = lprocfs_seq_create(sbi->ll_proc_root, "extents_stats_per_process",
1381                                 0644, &ll_rw_extents_stats_pp_fops, sbi);
1382         if (rc)
1383                 CWARN("Error adding the extents_stats_per_process file\n");
1384
1385         rc = lprocfs_seq_create(sbi->ll_proc_root, "offset_stats", 0644,
1386                                 &ll_rw_offset_stats_fops, sbi);
1387         if (rc)
1388                 CWARN("Error adding the offset_stats file\n");
1389
1390         /* File operations stats */
1391         sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
1392                                             LPROCFS_STATS_FLAG_NONE);
1393         if (sbi->ll_stats == NULL)
1394                 GOTO(out_proc, err = -ENOMEM);
1395
1396         /* do counter init */
1397         for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
1398                 __u32 type = llite_opcode_table[id].type;
1399                 void *ptr = NULL;
1400                 if (type & LPROCFS_TYPE_REGS)
1401                         ptr = "regs";
1402                 else if (type & LPROCFS_TYPE_BYTES)
1403                         ptr = "bytes";
1404                 else if (type & LPROCFS_TYPE_PAGES)
1405                         ptr = "pages";
1406                 lprocfs_counter_init(sbi->ll_stats,
1407                                      llite_opcode_table[id].opcode,
1408                                      (type & LPROCFS_CNTR_AVGMINMAX),
1409                                      llite_opcode_table[id].opname, ptr);
1410         }
1411
1412         err = lprocfs_register_stats(sbi->ll_proc_root, "stats", sbi->ll_stats);
1413         if (err)
1414                 GOTO(out_stats, err);
1415
1416         sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
1417                                                LPROCFS_STATS_FLAG_NONE);
1418         if (sbi->ll_ra_stats == NULL)
1419                 GOTO(out_stats, err = -ENOMEM);
1420
1421         for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
1422                 lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
1423                                      ra_stat_string[id], "pages");
1424         err = lprocfs_register_stats(sbi->ll_proc_root, "read_ahead_stats",
1425                                      sbi->ll_ra_stats);
1426         if (err)
1427                 GOTO(out_ra_stats, err);
1428
1429         err = lprocfs_add_vars(sbi->ll_proc_root, lprocfs_llite_obd_vars, sb);
1430         if (err)
1431                 GOTO(out_ra_stats, err);
1432
1433         /* Yes we also register sysfs mount kset here as well */
1434         sbi->ll_kset.kobj.parent = llite_kobj;
1435         sbi->ll_kset.kobj.ktype = &llite_ktype;
1436         init_completion(&sbi->ll_kobj_unregister);
1437         err = kobject_set_name(&sbi->ll_kset.kobj, "%s", name);
1438         if (err)
1439                 GOTO(out_ra_stats, err);
1440
1441         err = kset_register(&sbi->ll_kset);
1442         if (err)
1443                 GOTO(out_ra_stats, err);
1444
1445         lsi->lsi_kobj = kobject_get(&sbi->ll_kset.kobj);
1446
1447         RETURN(0);
1448 out_ra_stats:
1449         lprocfs_free_stats(&sbi->ll_ra_stats);
1450 out_stats:
1451         lprocfs_free_stats(&sbi->ll_stats);
1452 out_proc:
1453         lprocfs_remove(&sbi->ll_proc_root);
1454
1455         RETURN(err);
1456 }
1457
1458 int lprocfs_ll_register_obd(struct super_block *sb, const char *obdname)
1459 {
1460         struct lprocfs_vars lvars[2];
1461         struct ll_sb_info *sbi = ll_s2sbi(sb);
1462         struct obd_device *obd;
1463         struct proc_dir_entry *dir;
1464         char name[MAX_STRING_SIZE + 1];
1465         int err;
1466         ENTRY;
1467
1468         memset(lvars, 0, sizeof(lvars));
1469
1470         name[MAX_STRING_SIZE] = '\0';
1471         lvars[0].name = name;
1472
1473         LASSERT(sbi != NULL);
1474         LASSERT(obdname != NULL);
1475
1476         obd = class_name2obd(obdname);
1477
1478         LASSERT(obd != NULL);
1479         LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
1480         LASSERT(obd->obd_type->typ_name != NULL);
1481
1482         dir = proc_mkdir(obd->obd_type->typ_name, sbi->ll_proc_root);
1483         if (dir == NULL)
1484                 GOTO(out, err = -ENOMEM);
1485
1486         snprintf(name, MAX_STRING_SIZE, "common_name");
1487         lvars[0].fops = &llite_name_fops;
1488         err = lprocfs_add_vars(dir, lvars, obd);
1489         if (err)
1490                 GOTO(out, err);
1491
1492         snprintf(name, MAX_STRING_SIZE, "uuid");
1493         lvars[0].fops = &llite_uuid_fops;
1494         err = lprocfs_add_vars(dir, lvars, obd);
1495         if (err)
1496                 GOTO(out, err);
1497
1498 out:
1499         if (err) {
1500                 lprocfs_remove(&sbi->ll_proc_root);
1501                 lprocfs_free_stats(&sbi->ll_ra_stats);
1502                 lprocfs_free_stats(&sbi->ll_stats);
1503         }
1504         RETURN(err);
1505 }
1506
1507 void ll_debugfs_unregister_super(struct super_block *sb)
1508 {
1509         struct lustre_sb_info *lsi = s2lsi(sb);
1510         struct ll_sb_info *sbi = ll_s2sbi(sb);
1511
1512         kobject_put(lsi->lsi_kobj);
1513
1514         kset_unregister(&sbi->ll_kset);
1515         wait_for_completion(&sbi->ll_kobj_unregister);
1516
1517         if (sbi->ll_proc_root) {
1518                 lprocfs_remove(&sbi->ll_proc_root);
1519                 lprocfs_free_stats(&sbi->ll_ra_stats);
1520                 lprocfs_free_stats(&sbi->ll_stats);
1521         }
1522 }
1523 #undef MAX_STRING_SIZE
1524
1525 #define pct(a,b) (b ? a * 100 / b : 0)
1526
1527 static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
1528                                    struct seq_file *seq, int which)
1529 {
1530         unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
1531         unsigned long start, end, r, w;
1532         char *unitp = "KMGTPEZY";
1533         int i, units = 10;
1534         struct per_process_info *pp_info = &io_extents->pp_extents[which];
1535
1536         read_cum = 0;
1537         write_cum = 0;
1538         start = 0;
1539
1540         for(i = 0; i < LL_HIST_MAX; i++) {
1541                 read_tot += pp_info->pp_r_hist.oh_buckets[i];
1542                 write_tot += pp_info->pp_w_hist.oh_buckets[i];
1543         }
1544
1545         for(i = 0; i < LL_HIST_MAX; i++) {
1546                 r = pp_info->pp_r_hist.oh_buckets[i];
1547                 w = pp_info->pp_w_hist.oh_buckets[i];
1548                 read_cum += r;
1549                 write_cum += w;
1550                 end = 1 << (i + LL_HIST_START - units);
1551                 seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu  | "
1552                            "%14lu %4lu %4lu\n", start, *unitp, end, *unitp,
1553                            (i == LL_HIST_MAX - 1) ? '+' : ' ',
1554                            r, pct(r, read_tot), pct(read_cum, read_tot),
1555                            w, pct(w, write_tot), pct(write_cum, write_tot));
1556                 start = end;
1557                 if (start == 1<<10) {
1558                         start = 1;
1559                         units += 10;
1560                         unitp++;
1561                 }
1562                 if (read_cum == read_tot && write_cum == write_tot)
1563                         break;
1564         }
1565 }
1566
1567 static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
1568 {
1569         struct timespec64 now;
1570         struct ll_sb_info *sbi = seq->private;
1571         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1572         int k;
1573
1574         ktime_get_real_ts64(&now);
1575
1576         if (!sbi->ll_rw_stats_on) {
1577                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1578                 return 0;
1579         }
1580         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1581                    (s64)now.tv_sec, now.tv_nsec);
1582         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1583         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1584                    "extents", "calls", "%", "cum%",
1585                    "calls", "%", "cum%");
1586         spin_lock(&sbi->ll_pp_extent_lock);
1587         for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
1588                 if (io_extents->pp_extents[k].pid != 0) {
1589                         seq_printf(seq, "\nPID: %d\n",
1590                                    io_extents->pp_extents[k].pid);
1591                         ll_display_extents_info(io_extents, seq, k);
1592                 }
1593         }
1594         spin_unlock(&sbi->ll_pp_extent_lock);
1595         return 0;
1596 }
1597
1598 static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
1599                                                 const char __user *buf,
1600                                                 size_t len,
1601                                                 loff_t *off)
1602 {
1603         struct seq_file *seq = file->private_data;
1604         struct ll_sb_info *sbi = seq->private;
1605         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1606         int i;
1607         __s64 value;
1608
1609         if (len == 0)
1610                 return -EINVAL;
1611
1612         value = ll_stats_pid_write(buf, len);
1613
1614         if (value == 0)
1615                 sbi->ll_rw_stats_on = 0;
1616         else
1617                 sbi->ll_rw_stats_on = 1;
1618
1619         spin_lock(&sbi->ll_pp_extent_lock);
1620         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1621                 io_extents->pp_extents[i].pid = 0;
1622                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1623                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1624         }
1625         spin_unlock(&sbi->ll_pp_extent_lock);
1626         return len;
1627 }
1628
1629 LPROC_SEQ_FOPS(ll_rw_extents_stats_pp);
1630
1631 static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
1632 {
1633         struct timespec64 now;
1634         struct ll_sb_info *sbi = seq->private;
1635         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1636
1637         ktime_get_real_ts64(&now);
1638
1639         if (!sbi->ll_rw_stats_on) {
1640                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1641                 return 0;
1642         }
1643         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1644                    (s64)now.tv_sec, now.tv_nsec);
1645
1646         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1647         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1648                    "extents", "calls", "%", "cum%",
1649                    "calls", "%", "cum%");
1650         spin_lock(&sbi->ll_lock);
1651         ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
1652         spin_unlock(&sbi->ll_lock);
1653
1654         return 0;
1655 }
1656
1657 static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
1658                                              const char __user *buf,
1659                                              size_t len, loff_t *off)
1660 {
1661         struct seq_file *seq = file->private_data;
1662         struct ll_sb_info *sbi = seq->private;
1663         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1664         int i;
1665         __s64 value;
1666
1667         if (len == 0)
1668                 return -EINVAL;
1669
1670         value = ll_stats_pid_write(buf, len);
1671
1672         if (value == 0)
1673                 sbi->ll_rw_stats_on = 0;
1674         else
1675                 sbi->ll_rw_stats_on = 1;
1676
1677         spin_lock(&sbi->ll_pp_extent_lock);
1678         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
1679                 io_extents->pp_extents[i].pid = 0;
1680                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1681                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1682         }
1683         spin_unlock(&sbi->ll_pp_extent_lock);
1684
1685         return len;
1686 }
1687 LPROC_SEQ_FOPS(ll_rw_extents_stats);
1688
1689 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1690                        struct ll_file_data *file, loff_t pos,
1691                        size_t count, int rw)
1692 {
1693         int i, cur = -1;
1694         struct ll_rw_process_info *process;
1695         struct ll_rw_process_info *offset;
1696         int *off_count = &sbi->ll_rw_offset_entry_count;
1697         int *process_count = &sbi->ll_offset_process_count;
1698         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1699
1700         if(!sbi->ll_rw_stats_on)
1701                 return;
1702         process = sbi->ll_rw_process_info;
1703         offset = sbi->ll_rw_offset_info;
1704
1705         spin_lock(&sbi->ll_pp_extent_lock);
1706         /* Extent statistics */
1707         for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1708                 if(io_extents->pp_extents[i].pid == pid) {
1709                         cur = i;
1710                         break;
1711                 }
1712         }
1713
1714         if (cur == -1) {
1715                 /* new process */
1716                 sbi->ll_extent_process_count =
1717                         (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
1718                 cur = sbi->ll_extent_process_count;
1719                 io_extents->pp_extents[cur].pid = pid;
1720                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
1721                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
1722         }
1723
1724         for(i = 0; (count >= (1 << LL_HIST_START << i)) &&
1725              (i < (LL_HIST_MAX - 1)); i++);
1726         if (rw == 0) {
1727                 io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
1728                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
1729         } else {
1730                 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
1731                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
1732         }
1733         spin_unlock(&sbi->ll_pp_extent_lock);
1734
1735         spin_lock(&sbi->ll_process_lock);
1736         /* Offset statistics */
1737         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1738                 if (process[i].rw_pid == pid) {
1739                         if (process[i].rw_last_file != file) {
1740                                 process[i].rw_range_start = pos;
1741                                 process[i].rw_last_file_pos = pos + count;
1742                                 process[i].rw_smallest_extent = count;
1743                                 process[i].rw_largest_extent = count;
1744                                 process[i].rw_offset = 0;
1745                                 process[i].rw_last_file = file;
1746                                 spin_unlock(&sbi->ll_process_lock);
1747                                 return;
1748                         }
1749                         if (process[i].rw_last_file_pos != pos) {
1750                                 *off_count =
1751                                     (*off_count + 1) % LL_OFFSET_HIST_MAX;
1752                                 offset[*off_count].rw_op = process[i].rw_op;
1753                                 offset[*off_count].rw_pid = pid;
1754                                 offset[*off_count].rw_range_start =
1755                                         process[i].rw_range_start;
1756                                 offset[*off_count].rw_range_end =
1757                                         process[i].rw_last_file_pos;
1758                                 offset[*off_count].rw_smallest_extent =
1759                                         process[i].rw_smallest_extent;
1760                                 offset[*off_count].rw_largest_extent =
1761                                         process[i].rw_largest_extent;
1762                                 offset[*off_count].rw_offset =
1763                                         process[i].rw_offset;
1764                                 process[i].rw_op = rw;
1765                                 process[i].rw_range_start = pos;
1766                                 process[i].rw_smallest_extent = count;
1767                                 process[i].rw_largest_extent = count;
1768                                 process[i].rw_offset = pos -
1769                                         process[i].rw_last_file_pos;
1770                         }
1771                         if(process[i].rw_smallest_extent > count)
1772                                 process[i].rw_smallest_extent = count;
1773                         if(process[i].rw_largest_extent < count)
1774                                 process[i].rw_largest_extent = count;
1775                         process[i].rw_last_file_pos = pos + count;
1776                         spin_unlock(&sbi->ll_process_lock);
1777                         return;
1778                 }
1779         }
1780         *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
1781         process[*process_count].rw_pid = pid;
1782         process[*process_count].rw_op = rw;
1783         process[*process_count].rw_range_start = pos;
1784         process[*process_count].rw_last_file_pos = pos + count;
1785         process[*process_count].rw_smallest_extent = count;
1786         process[*process_count].rw_largest_extent = count;
1787         process[*process_count].rw_offset = 0;
1788         process[*process_count].rw_last_file = file;
1789         spin_unlock(&sbi->ll_process_lock);
1790 }
1791
1792 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
1793 {
1794         struct timespec64 now;
1795         struct ll_sb_info *sbi = seq->private;
1796         struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
1797         struct ll_rw_process_info *process = sbi->ll_rw_process_info;
1798         int i;
1799
1800         ktime_get_real_ts64(&now);
1801
1802         if (!sbi->ll_rw_stats_on) {
1803                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1804                 return 0;
1805         }
1806         spin_lock(&sbi->ll_process_lock);
1807
1808         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1809                    (s64)now.tv_sec, now.tv_nsec);
1810         seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
1811                    "R/W", "PID", "RANGE START", "RANGE END",
1812                    "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
1813
1814         /* We stored the discontiguous offsets here; print them first */
1815         for (i = 0; i < LL_OFFSET_HIST_MAX; i++) {
1816                 if (offset[i].rw_pid != 0)
1817                         seq_printf(seq,
1818                                   "%3c %10d %14llu %14llu %17lu %17lu %14llu\n",
1819                                    offset[i].rw_op == READ ? 'R' : 'W',
1820                                    offset[i].rw_pid,
1821                                    offset[i].rw_range_start,
1822                                    offset[i].rw_range_end,
1823                                    (unsigned long)offset[i].rw_smallest_extent,
1824                                    (unsigned long)offset[i].rw_largest_extent,
1825                                    offset[i].rw_offset);
1826         }
1827
1828         /* Then print the current offsets for each process */
1829         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1830                 if (process[i].rw_pid != 0)
1831                         seq_printf(seq,
1832                                   "%3c %10d %14llu %14llu %17lu %17lu %14llu\n",
1833                                    process[i].rw_op == READ ? 'R' : 'W',
1834                                    process[i].rw_pid,
1835                                    process[i].rw_range_start,
1836                                    process[i].rw_last_file_pos,
1837                                    (unsigned long)process[i].rw_smallest_extent,
1838                                    (unsigned long)process[i].rw_largest_extent,
1839                                    process[i].rw_offset);
1840         }
1841         spin_unlock(&sbi->ll_process_lock);
1842
1843         return 0;
1844 }
1845
1846 static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
1847                                             const char __user *buf,
1848                                             size_t len, loff_t *off)
1849 {
1850         struct seq_file *seq = file->private_data;
1851         struct ll_sb_info *sbi = seq->private;
1852         struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
1853         struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
1854         __s64 value;
1855
1856         if (len == 0)
1857                 return -EINVAL;
1858
1859         value = ll_stats_pid_write(buf, len);
1860
1861         if (value == 0)
1862                 sbi->ll_rw_stats_on = 0;
1863         else
1864                 sbi->ll_rw_stats_on = 1;
1865
1866         spin_lock(&sbi->ll_process_lock);
1867         sbi->ll_offset_process_count = 0;
1868         sbi->ll_rw_offset_entry_count = 0;
1869         memset(process_info, 0, sizeof(struct ll_rw_process_info) *
1870                LL_PROCESS_HIST_MAX);
1871         memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
1872                LL_OFFSET_HIST_MAX);
1873         spin_unlock(&sbi->ll_process_lock);
1874
1875         return len;
1876 }
1877
1878 /**
1879  * ll_stats_pid_write() - Determine if stats collection should be enabled
1880  * @buf: Buffer containing the data written
1881  * @len: Number of bytes in the buffer
1882  *
1883  * Several proc files begin collecting stats when a value is written, and stop
1884  * collecting when either '0' or 'disable' is written. This function checks the
1885  * written value to see if collection should be enabled or disabled.
1886  *
1887  * Return: If '0' or 'disable' is provided, 0 is returned. If the text
1888  * equivalent of a number is written, that number is returned. Otherwise,
1889  * 1 is returned. Non-zero return values indicate collection should be enabled.
1890  */
1891 static __s64 ll_stats_pid_write(const char __user *buf, size_t len)
1892 {
1893         unsigned long long value = 1;
1894         int rc;
1895         char kernbuf[16];
1896
1897         rc = kstrtoull_from_user(buf, len, 0, &value);
1898         if (rc < 0 && len < sizeof(kernbuf)) {
1899
1900                 if (copy_from_user(kernbuf, buf, len))
1901                         return -EFAULT;
1902                 kernbuf[len] = 0;
1903
1904                 if (kernbuf[len - 1] == '\n')
1905                         kernbuf[len - 1] = 0;
1906
1907                 if (strncasecmp(kernbuf, "disable", 7) == 0)
1908                         value = 0;
1909         }
1910
1911         return value;
1912 }
1913
1914 LPROC_SEQ_FOPS(ll_rw_offset_stats);
1915 #endif /* CONFIG_PROC_FS */