Whamcloud - gitweb
LU-8066 llite: move /proc/fs/lustre/llite/statahead_{max, agl} to sysfs
[fs/lustre-release.git] / lustre / llite / lproc_llite.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32 #define DEBUG_SUBSYSTEM S_LLITE
33
34 #include <linux/version.h>
35 #include <linux/user_namespace.h>
36 #ifdef HAVE_UIDGID_HEADER
37 # include <linux/uidgid.h>
38 #endif
39 #include <uapi/linux/lustre/lustre_param.h>
40 #include <lprocfs_status.h>
41 #include <obd_support.h>
42
43 #include "llite_internal.h"
44 #include "vvp_internal.h"
45
46 struct proc_dir_entry *proc_lustre_fs_root;
47 static struct kobject *llite_kobj;
48
49 int llite_tunables_register(void)
50 {
51         int rc = 0;
52
53         proc_lustre_fs_root = lprocfs_register("llite", proc_lustre_root,
54                                                NULL, NULL);
55         if (IS_ERR(proc_lustre_fs_root)) {
56                 rc = PTR_ERR(proc_lustre_fs_root);
57                 CERROR("cannot register '/proc/fs/lustre/llite': rc = %d\n",
58                        rc);
59                 proc_lustre_fs_root = NULL;
60                 return rc;
61         }
62
63         llite_kobj = class_setup_tunables("llite");
64         if (IS_ERR(llite_kobj)) {
65                 rc = PTR_ERR(llite_kobj);
66                 llite_kobj = NULL;
67         }
68
69         return rc;
70 }
71
72 void llite_tunables_unregister(void)
73 {
74         if (llite_kobj)
75                 kobject_put(llite_kobj);
76
77         lprocfs_remove(&proc_lustre_fs_root);
78 }
79
80 #ifdef CONFIG_PROC_FS
81 /* /proc/lustre/llite mount point registration */
82 static const struct file_operations ll_rw_extents_stats_fops;
83 static const struct file_operations ll_rw_extents_stats_pp_fops;
84 static const struct file_operations ll_rw_offset_stats_fops;
85 static __s64 ll_stats_pid_write(const char __user *buf, size_t len);
86
87 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
88                               char *buf)
89 {
90         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
91                                               ll_kset.kobj);
92         struct obd_statfs osfs;
93         int rc;
94
95         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
96         if (rc)
97                 return rc;
98
99         return sprintf(buf, "%u\n", osfs.os_bsize);
100 }
101 LUSTRE_RO_ATTR(blocksize);
102
103 static int ll_stat_blksize_seq_show(struct seq_file *m, void *v)
104 {
105         struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)m->private);
106
107         seq_printf(m, "%u\n", sbi->ll_stat_blksize);
108
109         return 0;
110 }
111
112 static ssize_t ll_stat_blksize_seq_write(struct file *file,
113                                          const char __user *buffer,
114                                          size_t count, loff_t *off)
115 {
116         struct seq_file *m = file->private_data;
117         struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)m->private);
118         unsigned int val;
119         int rc;
120
121         rc = kstrtouint_from_user(buffer, count, 0, &val);
122         if (rc)
123                 return rc;
124
125         if (val != 0 && (val < PAGE_SIZE || (val & (val - 1))) != 0)
126                 return -ERANGE;
127
128         sbi->ll_stat_blksize = val;
129
130         return count;
131 }
132 LPROC_SEQ_FOPS(ll_stat_blksize);
133
134 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
135                                 char *buf)
136 {
137         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
138                                               ll_kset.kobj);
139         struct obd_statfs osfs;
140         u32 blk_size;
141         u64 result;
142         int rc;
143
144         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
145         if (rc)
146                 return rc;
147
148         blk_size = osfs.os_bsize >> 10;
149         result = osfs.os_blocks;
150
151         while (blk_size >>= 1)
152                 result <<= 1;
153
154         return sprintf(buf, "%llu\n", result);
155 }
156 LUSTRE_RO_ATTR(kbytestotal);
157
158 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
159                                char *buf)
160 {
161         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
162                                               ll_kset.kobj);
163         struct obd_statfs osfs;
164         u32 blk_size;
165         u64 result;
166         int rc;
167
168         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
169         if (rc)
170                 return rc;
171
172         blk_size = osfs.os_bsize >> 10;
173         result = osfs.os_bfree;
174
175         while (blk_size >>= 1)
176                 result <<= 1;
177
178         return sprintf(buf, "%llu\n", result);
179 }
180 LUSTRE_RO_ATTR(kbytesfree);
181
182 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
183                                 char *buf)
184 {
185         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
186                                               ll_kset.kobj);
187         struct obd_statfs osfs;
188         u32 blk_size;
189         u64 result;
190         int rc;
191
192         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
193         if (rc)
194                 return rc;
195
196         blk_size = osfs.os_bsize >> 10;
197         result = osfs.os_bavail;
198
199         while (blk_size >>= 1)
200                 result <<= 1;
201
202         return sprintf(buf, "%llu\n", result);
203 }
204 LUSTRE_RO_ATTR(kbytesavail);
205
206 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
207                                char *buf)
208 {
209         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
210                                               ll_kset.kobj);
211         struct obd_statfs osfs;
212         int rc;
213
214         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
215         if (rc)
216                 return rc;
217
218         return sprintf(buf, "%llu\n", osfs.os_files);
219 }
220 LUSTRE_RO_ATTR(filestotal);
221
222 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
223                               char *buf)
224 {
225         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
226                                               ll_kset.kobj);
227         struct obd_statfs osfs;
228         int rc;
229
230         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
231         if (rc)
232                 return rc;
233
234         return sprintf(buf, "%llu\n", osfs.os_ffree);
235 }
236 LUSTRE_RO_ATTR(filesfree);
237
238 static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
239                                 char *buf)
240 {
241         return sprintf(buf, "local client\n");
242 }
243 LUSTRE_RO_ATTR(client_type);
244
245 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
246                            char *buf)
247 {
248         return sprintf(buf, "lustre\n");
249 }
250 LUSTRE_RO_ATTR(fstype);
251
252 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
253                          char *buf)
254 {
255         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
256                                               ll_kset.kobj);
257
258         return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
259 }
260 LUSTRE_RO_ATTR(uuid);
261
262 static int ll_xattr_cache_seq_show(struct seq_file *m, void *v)
263 {
264         struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)m->private);
265
266         seq_printf(m, "%u\n", sbi->ll_xattr_cache_enabled);
267         return 0;
268 }
269
270 static ssize_t ll_xattr_cache_seq_write(struct file *file,
271                                         const char __user *buffer,
272                                         size_t count, loff_t *off)
273 {
274         struct seq_file *m = file->private_data;
275         struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)m->private);
276         bool val;
277         int rc;
278
279         rc = kstrtobool_from_user(buffer, count, &val);
280         if (rc)
281                 return rc;
282
283         if (val && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
284                 return -ENOTSUPP;
285
286         sbi->ll_xattr_cache_enabled = val;
287         sbi->ll_xattr_cache_set = 1;
288
289         return count;
290 }
291 LPROC_SEQ_FOPS(ll_xattr_cache);
292
293 static int ll_site_stats_seq_show(struct seq_file *m, void *v)
294 {
295         struct super_block *sb = m->private;
296
297         /*
298          * See description of statistical counters in struct cl_site, and
299          * struct lu_site.
300          */
301         return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
302 }
303 LPROC_SEQ_FOPS_RO(ll_site_stats);
304
305 static int ll_max_readahead_mb_seq_show(struct seq_file *m, void *v)
306 {
307         struct super_block *sb = m->private;
308         struct ll_sb_info *sbi = ll_s2sbi(sb);
309         long pages_number;
310         int mult;
311
312         spin_lock(&sbi->ll_lock);
313         pages_number = sbi->ll_ra_info.ra_max_pages;
314         spin_unlock(&sbi->ll_lock);
315
316         mult = 1 << (20 - PAGE_SHIFT);
317         return lprocfs_seq_read_frac_helper(m, pages_number, mult);
318 }
319
320 static ssize_t
321 ll_max_readahead_mb_seq_write(struct file *file, const char __user *buffer,
322                               size_t count, loff_t *off)
323 {
324         struct seq_file *m = file->private_data;
325         struct super_block *sb = m->private;
326         struct ll_sb_info *sbi = ll_s2sbi(sb);
327         __s64 pages_number;
328         int rc;
329
330         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
331         if (rc)
332                 return rc;
333
334         pages_number >>= PAGE_SHIFT;
335
336         if (pages_number < 0 || pages_number > totalram_pages / 2) {
337                 /* 1/2 of RAM */
338                 CERROR("%s: can't set max_readahead_mb=%lu > %luMB\n",
339                        ll_get_fsname(sb, NULL, 0),
340                        (unsigned long)pages_number >> (20 - PAGE_SHIFT),
341                        totalram_pages >> (20 - PAGE_SHIFT + 1));
342                 return -ERANGE;
343         }
344
345         spin_lock(&sbi->ll_lock);
346         sbi->ll_ra_info.ra_max_pages = pages_number;
347         spin_unlock(&sbi->ll_lock);
348         return count;
349 }
350 LPROC_SEQ_FOPS(ll_max_readahead_mb);
351
352 static int ll_max_readahead_per_file_mb_seq_show(struct seq_file *m, void *v)
353 {
354         struct super_block *sb = m->private;
355         struct ll_sb_info *sbi = ll_s2sbi(sb);
356         long pages_number;
357         int mult;
358
359         spin_lock(&sbi->ll_lock);
360         pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
361         spin_unlock(&sbi->ll_lock);
362
363         mult = 1 << (20 - PAGE_SHIFT);
364         return lprocfs_seq_read_frac_helper(m, pages_number, mult);
365 }
366
367 static ssize_t
368 ll_max_readahead_per_file_mb_seq_write(struct file *file,
369                                        const char __user *buffer,
370                                        size_t count, loff_t *off)
371 {
372         struct seq_file *m = file->private_data;
373         struct super_block *sb = m->private;
374         struct ll_sb_info *sbi = ll_s2sbi(sb);
375         int rc;
376         __s64 pages_number;
377
378         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
379         if (rc)
380                 return rc;
381
382         pages_number >>= PAGE_SHIFT;
383
384         if (pages_number < 0 || pages_number > sbi->ll_ra_info.ra_max_pages) {
385                 CERROR("%s: can't set max_readahead_per_file_mb=%lu > "
386                        "max_read_ahead_mb=%lu\n", ll_get_fsname(sb, NULL, 0),
387                        (unsigned long)pages_number >> (20 - PAGE_SHIFT),
388                        sbi->ll_ra_info.ra_max_pages >> (20 - PAGE_SHIFT));
389                 return -ERANGE;
390         }
391
392         spin_lock(&sbi->ll_lock);
393         sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
394         spin_unlock(&sbi->ll_lock);
395         return count;
396 }
397 LPROC_SEQ_FOPS(ll_max_readahead_per_file_mb);
398
399 static int ll_max_read_ahead_whole_mb_seq_show(struct seq_file *m, void *v)
400 {
401         struct super_block *sb = m->private;
402         struct ll_sb_info *sbi = ll_s2sbi(sb);
403         long pages_number;
404         int mult;
405
406         spin_lock(&sbi->ll_lock);
407         pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
408         spin_unlock(&sbi->ll_lock);
409
410         mult = 1 << (20 - PAGE_SHIFT);
411         return lprocfs_seq_read_frac_helper(m, pages_number, mult);
412 }
413
414 static ssize_t
415 ll_max_read_ahead_whole_mb_seq_write(struct file *file,
416                                      const char __user *buffer,
417                                      size_t count, loff_t *off)
418 {
419         struct seq_file *m = file->private_data;
420         struct super_block *sb = m->private;
421         struct ll_sb_info *sbi = ll_s2sbi(sb);
422         int rc;
423         __s64 pages_number;
424
425         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
426         if (rc)
427                 return rc;
428
429         pages_number >>= PAGE_SHIFT;
430
431         /* Cap this at the current max readahead window size, the readahead
432          * algorithm does this anyway so it's pointless to set it larger. */
433         if (pages_number < 0 ||
434             pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
435                 int pages_shift = 20 - PAGE_SHIFT;
436                 CERROR("%s: can't set max_read_ahead_whole_mb=%lu > "
437                        "max_read_ahead_per_file_mb=%lu\n",
438                        ll_get_fsname(sb, NULL, 0),
439                        (unsigned long)pages_number >> pages_shift,
440                        sbi->ll_ra_info.ra_max_pages_per_file >> pages_shift);
441                 return -ERANGE;
442         }
443
444         spin_lock(&sbi->ll_lock);
445         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
446         spin_unlock(&sbi->ll_lock);
447         return count;
448 }
449 LPROC_SEQ_FOPS(ll_max_read_ahead_whole_mb);
450
451 static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
452 {
453         struct super_block     *sb    = m->private;
454         struct ll_sb_info      *sbi   = ll_s2sbi(sb);
455         struct cl_client_cache *cache = sbi->ll_cache;
456         int shift = 20 - PAGE_SHIFT;
457         long max_cached_mb;
458         long unused_mb;
459
460         max_cached_mb = cache->ccc_lru_max >> shift;
461         unused_mb = atomic_long_read(&cache->ccc_lru_left) >> shift;
462         seq_printf(m, "users: %d\n"
463                    "max_cached_mb: %ld\n"
464                    "used_mb: %ld\n"
465                    "unused_mb: %ld\n"
466                    "reclaim_count: %u\n",
467                    atomic_read(&cache->ccc_users),
468                    max_cached_mb,
469                    max_cached_mb - unused_mb,
470                    unused_mb,
471                    cache->ccc_lru_shrinkers);
472         return 0;
473 }
474
475 static ssize_t
476 ll_max_cached_mb_seq_write(struct file *file, const char __user *buffer,
477                            size_t count, loff_t *off)
478 {
479         struct seq_file *m = file->private_data;
480         struct super_block *sb = m->private;
481         struct ll_sb_info *sbi = ll_s2sbi(sb);
482         struct cl_client_cache *cache = sbi->ll_cache;
483         struct lu_env *env;
484         long diff = 0;
485         long nrpages = 0;
486         __u16 refcheck;
487         __s64 pages_number;
488         long rc;
489         char kernbuf[128];
490         ENTRY;
491
492         if (count >= sizeof(kernbuf))
493                 RETURN(-EINVAL);
494
495         if (copy_from_user(kernbuf, buffer, count))
496                 RETURN(-EFAULT);
497         kernbuf[count] = 0;
498
499         buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
500                   kernbuf;
501         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
502         if (rc)
503                 RETURN(rc);
504
505         pages_number >>= PAGE_SHIFT;
506
507         if (pages_number < 0 || pages_number > totalram_pages) {
508                 CERROR("%s: can't set max cache more than %lu MB\n",
509                        ll_get_fsname(sb, NULL, 0),
510                        totalram_pages >> (20 - PAGE_SHIFT));
511                 RETURN(-ERANGE);
512         }
513         /* Allow enough cache so clients can make well-formed RPCs */
514         pages_number = max_t(long, pages_number, PTLRPC_MAX_BRW_PAGES);
515
516         spin_lock(&sbi->ll_lock);
517         diff = pages_number - cache->ccc_lru_max;
518         spin_unlock(&sbi->ll_lock);
519
520         /* easy - add more LRU slots. */
521         if (diff >= 0) {
522                 atomic_long_add(diff, &cache->ccc_lru_left);
523                 GOTO(out, rc = 0);
524         }
525
526         env = cl_env_get(&refcheck);
527         if (IS_ERR(env))
528                 RETURN(rc);
529
530         diff = -diff;
531         while (diff > 0) {
532                 long tmp;
533
534                 /* reduce LRU budget from free slots. */
535                 do {
536                         long ov, nv;
537
538                         ov = atomic_long_read(&cache->ccc_lru_left);
539                         if (ov == 0)
540                                 break;
541
542                         nv = ov > diff ? ov - diff : 0;
543                         rc = atomic_long_cmpxchg(&cache->ccc_lru_left, ov, nv);
544                         if (likely(ov == rc)) {
545                                 diff -= ov - nv;
546                                 nrpages += ov - nv;
547                                 break;
548                         }
549                 } while (1);
550
551                 if (diff <= 0)
552                         break;
553
554                 if (sbi->ll_dt_exp == NULL) { /* being initialized */
555                         rc = -ENODEV;
556                         break;
557                 }
558
559                 /* difficult - have to ask OSCs to drop LRU slots. */
560                 tmp = diff << 1;
561                 rc = obd_set_info_async(env, sbi->ll_dt_exp,
562                                 sizeof(KEY_CACHE_LRU_SHRINK),
563                                 KEY_CACHE_LRU_SHRINK,
564                                 sizeof(tmp), &tmp, NULL);
565                 if (rc < 0)
566                         break;
567         }
568         cl_env_put(env, &refcheck);
569
570 out:
571         if (rc >= 0) {
572                 spin_lock(&sbi->ll_lock);
573                 cache->ccc_lru_max = pages_number;
574                 spin_unlock(&sbi->ll_lock);
575                 rc = count;
576         } else {
577                 atomic_long_add(nrpages, &cache->ccc_lru_left);
578         }
579         return rc;
580 }
581 LPROC_SEQ_FOPS(ll_max_cached_mb);
582
583 static ssize_t checksum_pages_show(struct kobject *kobj, struct attribute *attr,
584                                    char *buf)
585 {
586         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
587                                               ll_kset.kobj);
588
589         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
590 }
591
592 static ssize_t checksum_pages_store(struct kobject *kobj,
593                                     struct attribute *attr,
594                                     const char *buffer,
595                                     size_t count)
596 {
597         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
598                                               ll_kset.kobj);
599         bool val;
600         int tmp;
601         int rc;
602
603         if (!sbi->ll_dt_exp)
604                 /* Not set up yet */
605                 return -EAGAIN;
606
607         rc = kstrtobool(buffer, &val);
608         if (rc)
609                 return rc;
610         if (val)
611                 sbi->ll_flags |= LL_SBI_CHECKSUM;
612         else
613                 sbi->ll_flags &= ~LL_SBI_CHECKSUM;
614         tmp = val;
615
616         rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
617                                 KEY_CHECKSUM, sizeof(tmp), &tmp, NULL);
618         if (rc)
619                 CWARN("Failed to set OSC checksum flags: %d\n", rc);
620
621         return count;
622 }
623 LUSTRE_RW_ATTR(checksum_pages);
624
625 static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
626                               enum stats_track_type type)
627 {
628         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
629                                               ll_kset.kobj);
630
631         if (sbi->ll_stats_track_type == type)
632                 return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
633         else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
634                 return sprintf(buf, "0 (all)\n");
635
636         return sprintf(buf, "untracked\n");
637 }
638
639 static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
640                               size_t count, enum stats_track_type type)
641 {
642         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
643                                               ll_kset.kobj);
644         unsigned long pid;
645         int rc;
646
647         rc = kstrtoul(buffer, 10, &pid);
648         if (rc)
649                 return rc;
650
651         sbi->ll_stats_track_id = pid;
652         if (pid == 0)
653                 sbi->ll_stats_track_type = STATS_TRACK_ALL;
654         else
655                 sbi->ll_stats_track_type = type;
656         lprocfs_clear_stats(sbi->ll_stats);
657         return count;
658 }
659
660 static ssize_t stats_track_pid_show(struct kobject *kobj,
661                                     struct attribute *attr,
662                                     char *buf)
663 {
664         return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
665 }
666
667 static ssize_t stats_track_pid_store(struct kobject *kobj,
668                                      struct attribute *attr,
669                                      const char *buffer,
670                                      size_t count)
671 {
672         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
673 }
674 LUSTRE_RW_ATTR(stats_track_pid);
675
676 static ssize_t stats_track_ppid_show(struct kobject *kobj,
677                                      struct attribute *attr,
678                                      char *buf)
679 {
680         return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
681 }
682
683 static ssize_t stats_track_ppid_store(struct kobject *kobj,
684                                       struct attribute *attr,
685                                       const char *buffer,
686                                       size_t count)
687 {
688         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
689 }
690 LUSTRE_RW_ATTR(stats_track_ppid);
691
692 static ssize_t stats_track_gid_show(struct kobject *kobj,
693                                     struct attribute *attr,
694                                     char *buf)
695 {
696         return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
697 }
698
699 static ssize_t stats_track_gid_store(struct kobject *kobj,
700                                      struct attribute *attr,
701                                      const char *buffer,
702                                      size_t count)
703 {
704         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
705 }
706 LUSTRE_RW_ATTR(stats_track_gid);
707
708 static ssize_t statahead_running_max_show(struct kobject *kobj,
709                                           struct attribute *attr,
710                                           char *buf)
711 {
712         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
713                                               ll_kset.kobj);
714
715         return snprintf(buf, 16, "%u\n", sbi->ll_sa_running_max);
716 }
717
718 static ssize_t statahead_running_max_store(struct kobject *kobj,
719                                            struct attribute *attr,
720                                            const char *buffer,
721                                            size_t count)
722 {
723         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
724                                               ll_kset.kobj);
725         unsigned long val;
726         int rc;
727
728         rc = kstrtoul(buffer, 0, &val);
729         if (rc)
730                 return rc;
731
732         if (val <= LL_SA_RUNNING_MAX) {
733                 sbi->ll_sa_running_max = val;
734                 return count;
735         }
736
737         CERROR("Bad statahead_running_max value %lu. Valid values "
738                "are in the range [0, %d]\n", val, LL_SA_RUNNING_MAX);
739
740         return -ERANGE;
741 }
742 LUSTRE_RW_ATTR(statahead_running_max);
743
744 static ssize_t statahead_max_show(struct kobject *kobj,
745                                   struct attribute *attr,
746                                   char *buf)
747 {
748         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
749                                               ll_kset.kobj);
750
751         return sprintf(buf, "%u\n", sbi->ll_sa_max);
752 }
753
754 static ssize_t statahead_max_store(struct kobject *kobj,
755                                    struct attribute *attr,
756                                    const char *buffer,
757                                    size_t count)
758 {
759         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
760                                               ll_kset.kobj);
761         unsigned long val;
762         int rc;
763
764         rc = kstrtoul(buffer, 0, &val);
765         if (rc)
766                 return rc;
767
768         if (val <= LL_SA_RPC_MAX)
769                 sbi->ll_sa_max = val;
770         else
771                 CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
772                        val, LL_SA_RPC_MAX);
773
774         return count;
775 }
776 LUSTRE_RW_ATTR(statahead_max);
777
778 static ssize_t statahead_agl_show(struct kobject *kobj,
779                                   struct attribute *attr,
780                                   char *buf)
781 {
782         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
783                                               ll_kset.kobj);
784
785         return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
786 }
787
788 static ssize_t statahead_agl_store(struct kobject *kobj,
789                                    struct attribute *attr,
790                                    const char *buffer,
791                                    size_t count)
792 {
793         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
794                                               ll_kset.kobj);
795         bool val;
796         int rc;
797
798         rc = kstrtobool(buffer, &val);
799         if (rc)
800                 return rc;
801
802         if (val)
803                 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
804         else
805                 sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
806
807         return count;
808 }
809 LUSTRE_RW_ATTR(statahead_agl);
810
811 static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
812 {
813         struct super_block *sb = m->private;
814         struct ll_sb_info *sbi = ll_s2sbi(sb);
815
816         seq_printf(m, "statahead total: %u\n"
817                     "statahead wrong: %u\n"
818                     "agl total: %u\n",
819                     atomic_read(&sbi->ll_sa_total),
820                     atomic_read(&sbi->ll_sa_wrong),
821                     atomic_read(&sbi->ll_agl_total));
822         return 0;
823 }
824 LPROC_SEQ_FOPS_RO(ll_statahead_stats);
825
826 static int ll_lazystatfs_seq_show(struct seq_file *m, void *v)
827 {
828         struct super_block *sb = m->private;
829         struct ll_sb_info *sbi = ll_s2sbi(sb);
830
831         seq_printf(m, "%u\n",
832                    (sbi->ll_flags & LL_SBI_LAZYSTATFS) ? 1 : 0);
833         return 0;
834 }
835
836 static ssize_t ll_lazystatfs_seq_write(struct file *file,
837                                        const char __user *buffer,
838                                         size_t count, loff_t *off)
839 {
840         struct seq_file *m = file->private_data;
841         struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)m->private);
842         bool val;
843         int rc;
844
845         rc = kstrtobool_from_user(buffer, count, &val);
846         if (rc)
847                 return rc;
848
849         if (val)
850                 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
851         else
852                 sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
853
854         return count;
855 }
856 LPROC_SEQ_FOPS(ll_lazystatfs);
857
858 static int ll_max_easize_seq_show(struct seq_file *m, void *v)
859 {
860         struct super_block *sb = m->private;
861         struct ll_sb_info *sbi = ll_s2sbi(sb);
862         unsigned int ealen;
863         int rc;
864
865         rc = ll_get_max_mdsize(sbi, &ealen);
866         if (rc)
867                 return rc;
868
869         seq_printf(m, "%u\n", ealen);
870         return 0;
871 }
872 LPROC_SEQ_FOPS_RO(ll_max_easize);
873
874 /**
875  * Get default_easize.
876  *
877  * \see client_obd::cl_default_mds_easize
878  *
879  * \param[in] m         seq_file handle
880  * \param[in] v         unused for single entry
881  *
882  * \retval 0            on success
883  * \retval negative     negated errno on failure
884  */
885 static int ll_default_easize_seq_show(struct seq_file *m, void *v)
886 {
887         struct super_block *sb = m->private;
888         struct ll_sb_info *sbi = ll_s2sbi(sb);
889         unsigned int ealen;
890         int rc;
891
892         rc = ll_get_default_mdsize(sbi, &ealen);
893         if (rc)
894                 return rc;
895
896         seq_printf(m, "%u\n", ealen);
897         return 0;
898 }
899
900 /**
901  * Set default_easize.
902  *
903  * Range checking on the passed value is handled by
904  * ll_set_default_mdsize().
905  *
906  * \see client_obd::cl_default_mds_easize
907  *
908  * \param[in] file      proc file
909  * \param[in] buffer    string passed from user space
910  * \param[in] count     \a buffer length
911  * \param[in] off       unused for single entry
912  *
913  * \retval positive     \a count on success
914  * \retval negative     negated errno on failure
915  */
916 static ssize_t ll_default_easize_seq_write(struct file *file,
917                                            const char __user *buffer,
918                                            size_t count, loff_t *unused)
919 {
920         struct seq_file *seq = file->private_data;
921         struct super_block *sb = (struct super_block *)seq->private;
922         struct ll_sb_info *sbi = ll_s2sbi(sb);
923         unsigned int val;
924         int rc;
925
926         if (count == 0)
927                 return 0;
928
929         rc = kstrtouint_from_user(buffer, count, 0, &val);
930         if (rc)
931                 return rc;
932
933         rc = ll_set_default_mdsize(sbi, val);
934         if (rc)
935                 return rc;
936
937         return count;
938 }
939 LPROC_SEQ_FOPS(ll_default_easize);
940
941 static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
942 {
943         const char *str[] = LL_SBI_FLAGS;
944         struct super_block *sb = m->private;
945         int flags = ll_s2sbi(sb)->ll_flags;
946         int i = 0;
947
948         while (flags != 0) {
949                 if (ARRAY_SIZE(str) <= i) {
950                         CERROR("%s: Revise array LL_SBI_FLAGS to match sbi "
951                                 "flags please.\n", ll_get_fsname(sb, NULL, 0));
952                         return -EINVAL;
953                 }
954
955                 if (flags & 0x1)
956                         seq_printf(m, "%s ", str[i]);
957                 flags >>= 1;
958                 ++i;
959         }
960         seq_printf(m, "\b\n");
961         return 0;
962 }
963 LPROC_SEQ_FOPS_RO(ll_sbi_flags);
964
965 static int ll_tiny_write_seq_show(struct seq_file *m, void *v)
966 {
967         struct super_block *sb = m->private;
968         struct ll_sb_info *sbi = ll_s2sbi(sb);
969
970         seq_printf(m, "%u\n", !!(sbi->ll_flags & LL_SBI_TINY_WRITE));
971         return 0;
972 }
973
974 static ssize_t ll_tiny_write_seq_write(
975         struct file *file, const char __user *buffer, size_t count, loff_t *off)
976 {
977         struct seq_file *m = file->private_data;
978         struct super_block *sb = m->private;
979         struct ll_sb_info *sbi = ll_s2sbi(sb);
980         bool val;
981         int rc;
982
983         rc = kstrtobool_from_user(buffer, count, &val);
984         if (rc)
985                 return rc;
986
987         spin_lock(&sbi->ll_lock);
988         if (val)
989                 sbi->ll_flags |= LL_SBI_TINY_WRITE;
990         else
991                 sbi->ll_flags &= ~LL_SBI_TINY_WRITE;
992         spin_unlock(&sbi->ll_lock);
993
994         return count;
995 }
996 LPROC_SEQ_FOPS(ll_tiny_write);
997
998 static int ll_fast_read_seq_show(struct seq_file *m, void *v)
999 {
1000         struct super_block *sb = m->private;
1001         struct ll_sb_info *sbi = ll_s2sbi(sb);
1002
1003         seq_printf(m, "%u\n", !!(sbi->ll_flags & LL_SBI_FAST_READ));
1004         return 0;
1005 }
1006
1007 static ssize_t
1008 ll_fast_read_seq_write(struct file *file, const char __user *buffer,
1009                        size_t count, loff_t *off)
1010 {
1011         struct seq_file *m = file->private_data;
1012         struct super_block *sb = m->private;
1013         struct ll_sb_info *sbi = ll_s2sbi(sb);
1014         bool val;
1015         int rc;
1016
1017         rc = kstrtobool_from_user(buffer, count, &val);
1018         if (rc)
1019                 return rc;
1020
1021         spin_lock(&sbi->ll_lock);
1022         if (val)
1023                 sbi->ll_flags |= LL_SBI_FAST_READ;
1024         else
1025                 sbi->ll_flags &= ~LL_SBI_FAST_READ;
1026         spin_unlock(&sbi->ll_lock);
1027
1028         return count;
1029 }
1030 LPROC_SEQ_FOPS(ll_fast_read);
1031
1032 static int ll_pio_seq_show(struct seq_file *m, void *v)
1033 {
1034         struct super_block *sb = m->private;
1035         struct ll_sb_info *sbi = ll_s2sbi(sb);
1036
1037         seq_printf(m, "%u\n", !!(sbi->ll_flags & LL_SBI_PIO));
1038         return 0;
1039 }
1040
1041 static ssize_t ll_pio_seq_write(struct file *file, const char __user *buffer,
1042                                 size_t count, loff_t *off)
1043 {
1044         struct seq_file *m = file->private_data;
1045         struct super_block *sb = m->private;
1046         struct ll_sb_info *sbi = ll_s2sbi(sb);
1047         bool val;
1048         int rc;
1049
1050         rc = kstrtobool_from_user(buffer, count, &val);
1051         if (rc)
1052                 return rc;
1053
1054         spin_lock(&sbi->ll_lock);
1055         if (val)
1056                 sbi->ll_flags |= LL_SBI_PIO;
1057         else
1058                 sbi->ll_flags &= ~LL_SBI_PIO;
1059         spin_unlock(&sbi->ll_lock);
1060
1061         return count;
1062 }
1063 LPROC_SEQ_FOPS(ll_pio);
1064
1065 static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
1066 {
1067         struct super_block      *sb    = m->private;
1068         struct ll_sb_info       *sbi   = ll_s2sbi(sb);
1069         struct cl_client_cache  *cache = sbi->ll_cache;
1070         long pages;
1071         int mb;
1072
1073         pages = atomic_long_read(&cache->ccc_unstable_nr);
1074         mb    = (pages * PAGE_SIZE) >> 20;
1075
1076         seq_printf(m, "unstable_check:     %8d\n"
1077                    "unstable_pages: %12ld\n"
1078                    "unstable_mb:        %8d\n",
1079                    cache->ccc_unstable_check, pages, mb);
1080         return 0;
1081 }
1082
1083 static ssize_t ll_unstable_stats_seq_write(struct file *file,
1084                                            const char __user *buffer,
1085                                            size_t count, loff_t *unused)
1086 {
1087         struct seq_file *seq = file->private_data;
1088         struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)seq->private);
1089         char kernbuf[128];
1090         bool val;
1091         int rc;
1092
1093         if (count == 0)
1094                 return 0;
1095         if (count >= sizeof(kernbuf))
1096                 return -EINVAL;
1097
1098         if (copy_from_user(kernbuf, buffer, count))
1099                 return -EFAULT;
1100         kernbuf[count] = 0;
1101
1102         buffer += lprocfs_find_named_value(kernbuf, "unstable_check:", &count) -
1103                   kernbuf;
1104         rc = kstrtobool_from_user(buffer, count, &val);
1105         if (rc < 0)
1106                 return rc;
1107
1108         /* borrow lru lock to set the value */
1109         spin_lock(&sbi->ll_cache->ccc_lru_lock);
1110         sbi->ll_cache->ccc_unstable_check = val;
1111         spin_unlock(&sbi->ll_cache->ccc_lru_lock);
1112
1113         return count;
1114 }
1115 LPROC_SEQ_FOPS(ll_unstable_stats);
1116
1117 static int ll_root_squash_seq_show(struct seq_file *m, void *v)
1118 {
1119         struct super_block *sb = m->private;
1120         struct ll_sb_info *sbi = ll_s2sbi(sb);
1121         struct root_squash_info *squash = &sbi->ll_squash;
1122
1123         seq_printf(m, "%u:%u\n", squash->rsi_uid, squash->rsi_gid);
1124         return 0;
1125 }
1126
1127 static ssize_t ll_root_squash_seq_write(struct file *file,
1128                                         const char __user *buffer,
1129                                         size_t count, loff_t *off)
1130 {
1131         struct seq_file *m = file->private_data;
1132         struct super_block *sb = m->private;
1133         struct ll_sb_info *sbi = ll_s2sbi(sb);
1134         struct root_squash_info *squash = &sbi->ll_squash;
1135
1136         return lprocfs_wr_root_squash(buffer, count, squash,
1137                                       ll_get_fsname(sb, NULL, 0));
1138 }
1139 LPROC_SEQ_FOPS(ll_root_squash);
1140
1141 static int ll_nosquash_nids_seq_show(struct seq_file *m, void *v)
1142 {
1143         struct super_block *sb = m->private;
1144         struct ll_sb_info *sbi = ll_s2sbi(sb);
1145         struct root_squash_info *squash = &sbi->ll_squash;
1146         int len;
1147
1148         down_read(&squash->rsi_sem);
1149         if (!list_empty(&squash->rsi_nosquash_nids)) {
1150                 len = cfs_print_nidlist(m->buf + m->count, m->size - m->count,
1151                                         &squash->rsi_nosquash_nids);
1152                 m->count += len;
1153                 seq_putc(m, '\n');
1154         } else {
1155                 seq_puts(m, "NONE\n");
1156         }
1157         up_read(&squash->rsi_sem);
1158
1159         return 0;
1160 }
1161
1162 static ssize_t ll_nosquash_nids_seq_write(struct file *file,
1163                                           const char __user *buffer,
1164                                           size_t count, loff_t *off)
1165 {
1166         struct seq_file *m = file->private_data;
1167         struct super_block *sb = m->private;
1168         struct ll_sb_info *sbi = ll_s2sbi(sb);
1169         struct root_squash_info *squash = &sbi->ll_squash;
1170         int rc;
1171
1172         rc = lprocfs_wr_nosquash_nids(buffer, count, squash,
1173                                       ll_get_fsname(sb, NULL, 0));
1174         if (rc < 0)
1175                 return rc;
1176
1177         ll_compute_rootsquash_state(sbi);
1178
1179         return rc;
1180 }
1181 LPROC_SEQ_FOPS(ll_nosquash_nids);
1182
1183 struct lprocfs_vars lprocfs_llite_obd_vars[] = {
1184         { .name =       "site",
1185           .fops =       &ll_site_stats_fops                     },
1186         { .name =       "stat_blocksize",
1187           .fops =       &ll_stat_blksize_fops                   },
1188         { .name =       "max_read_ahead_mb",
1189           .fops =       &ll_max_readahead_mb_fops               },
1190         { .name =       "max_read_ahead_per_file_mb",
1191           .fops =       &ll_max_readahead_per_file_mb_fops      },
1192         { .name =       "max_read_ahead_whole_mb",
1193           .fops =       &ll_max_read_ahead_whole_mb_fops        },
1194         { .name =       "max_cached_mb",
1195           .fops =       &ll_max_cached_mb_fops                  },
1196         { .name =       "statahead_stats",
1197           .fops =       &ll_statahead_stats_fops                },
1198         { .name =       "lazystatfs",
1199           .fops =       &ll_lazystatfs_fops                     },
1200         { .name =       "max_easize",
1201           .fops =       &ll_max_easize_fops                     },
1202         { .name =       "default_easize",
1203           .fops =       &ll_default_easize_fops                 },
1204         { .name =       "sbi_flags",
1205           .fops =       &ll_sbi_flags_fops                      },
1206         { .name =       "xattr_cache",
1207           .fops =       &ll_xattr_cache_fops                    },
1208         { .name =       "unstable_stats",
1209           .fops =       &ll_unstable_stats_fops                 },
1210         { .name =       "root_squash",
1211           .fops =       &ll_root_squash_fops                    },
1212         { .name =       "nosquash_nids",
1213           .fops =       &ll_nosquash_nids_fops                  },
1214         { .name =       "fast_read",
1215           .fops =       &ll_fast_read_fops,                     },
1216         { .name =       "pio",
1217           .fops =       &ll_pio_fops,                           },
1218         { .name =       "tiny_write",
1219           .fops =       &ll_tiny_write_fops,                    },
1220         { NULL }
1221 };
1222
1223 #define MAX_STRING_SIZE 128
1224
1225 static struct attribute *llite_attrs[] = {
1226         &lustre_attr_blocksize.attr,
1227         &lustre_attr_kbytestotal.attr,
1228         &lustre_attr_kbytesfree.attr,
1229         &lustre_attr_kbytesavail.attr,
1230         &lustre_attr_filestotal.attr,
1231         &lustre_attr_filesfree.attr,
1232         &lustre_attr_client_type.attr,
1233         &lustre_attr_fstype.attr,
1234         &lustre_attr_uuid.attr,
1235         &lustre_attr_checksum_pages.attr,
1236         &lustre_attr_stats_track_pid.attr,
1237         &lustre_attr_stats_track_ppid.attr,
1238         &lustre_attr_stats_track_gid.attr,
1239         &lustre_attr_statahead_running_max.attr,
1240         &lustre_attr_statahead_max.attr,
1241         &lustre_attr_statahead_agl.attr,
1242         NULL,
1243 };
1244
1245 static void llite_kobj_release(struct kobject *kobj)
1246 {
1247         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1248                                               ll_kset.kobj);
1249         complete(&sbi->ll_kobj_unregister);
1250 }
1251
1252 static struct kobj_type llite_ktype = {
1253         .default_attrs  = llite_attrs,
1254         .sysfs_ops      = &lustre_sysfs_ops,
1255         .release        = llite_kobj_release,
1256 };
1257
1258 static const struct llite_file_opcode {
1259         __u32       opcode;
1260         __u32       type;
1261         const char *opname;
1262 } llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
1263         /* file operation */
1264         { LPROC_LL_DIRTY_HITS,     LPROCFS_TYPE_REGS, "dirty_pages_hits" },
1265         { LPROC_LL_DIRTY_MISSES,   LPROCFS_TYPE_REGS, "dirty_pages_misses" },
1266         { LPROC_LL_READ_BYTES,     LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
1267                                    "read_bytes" },
1268         { LPROC_LL_WRITE_BYTES,    LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
1269                                    "write_bytes" },
1270         { LPROC_LL_BRW_READ,       LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
1271                                    "brw_read" },
1272         { LPROC_LL_BRW_WRITE,      LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
1273                                    "brw_write" },
1274         { LPROC_LL_IOCTL,          LPROCFS_TYPE_REGS, "ioctl" },
1275         { LPROC_LL_OPEN,           LPROCFS_TYPE_REGS, "open" },
1276         { LPROC_LL_RELEASE,        LPROCFS_TYPE_REGS, "close" },
1277         { LPROC_LL_MAP,            LPROCFS_TYPE_REGS, "mmap" },
1278         { LPROC_LL_FAULT,          LPROCFS_TYPE_REGS, "page_fault" },
1279         { LPROC_LL_MKWRITE,        LPROCFS_TYPE_REGS, "page_mkwrite" },
1280         { LPROC_LL_LLSEEK,         LPROCFS_TYPE_REGS, "seek" },
1281         { LPROC_LL_FSYNC,          LPROCFS_TYPE_REGS, "fsync" },
1282         { LPROC_LL_READDIR,        LPROCFS_TYPE_REGS, "readdir" },
1283         /* inode operation */
1284         { LPROC_LL_SETATTR,        LPROCFS_TYPE_REGS, "setattr" },
1285         { LPROC_LL_TRUNC,          LPROCFS_TYPE_REGS, "truncate" },
1286         { LPROC_LL_FLOCK,          LPROCFS_TYPE_REGS, "flock" },
1287         { LPROC_LL_GETATTR,        LPROCFS_TYPE_REGS, "getattr" },
1288         /* dir inode operation */
1289         { LPROC_LL_CREATE,         LPROCFS_TYPE_REGS, "create" },
1290         { LPROC_LL_LINK,           LPROCFS_TYPE_REGS, "link" },
1291         { LPROC_LL_UNLINK,         LPROCFS_TYPE_REGS, "unlink" },
1292         { LPROC_LL_SYMLINK,        LPROCFS_TYPE_REGS, "symlink" },
1293         { LPROC_LL_MKDIR,          LPROCFS_TYPE_REGS, "mkdir" },
1294         { LPROC_LL_RMDIR,          LPROCFS_TYPE_REGS, "rmdir" },
1295         { LPROC_LL_MKNOD,          LPROCFS_TYPE_REGS, "mknod" },
1296         { LPROC_LL_RENAME,         LPROCFS_TYPE_REGS, "rename" },
1297         /* special inode operation */
1298         { LPROC_LL_STAFS,          LPROCFS_TYPE_REGS, "statfs" },
1299         { LPROC_LL_ALLOC_INODE,    LPROCFS_TYPE_REGS, "alloc_inode" },
1300         { LPROC_LL_SETXATTR,       LPROCFS_TYPE_REGS, "setxattr" },
1301         { LPROC_LL_GETXATTR,       LPROCFS_TYPE_REGS, "getxattr" },
1302         { LPROC_LL_GETXATTR_HITS,  LPROCFS_TYPE_REGS, "getxattr_hits" },
1303         { LPROC_LL_LISTXATTR,      LPROCFS_TYPE_REGS, "listxattr" },
1304         { LPROC_LL_REMOVEXATTR,    LPROCFS_TYPE_REGS, "removexattr" },
1305         { LPROC_LL_INODE_PERM,     LPROCFS_TYPE_REGS, "inode_permission" },
1306 };
1307
1308 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count)
1309 {
1310         if (!sbi->ll_stats)
1311                 return;
1312         if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
1313                 lprocfs_counter_add(sbi->ll_stats, op, count);
1314         else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
1315                  sbi->ll_stats_track_id == current->pid)
1316                 lprocfs_counter_add(sbi->ll_stats, op, count);
1317         else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
1318                  sbi->ll_stats_track_id == current->parent->pid)
1319                 lprocfs_counter_add(sbi->ll_stats, op, count);
1320         else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
1321                  sbi->ll_stats_track_id ==
1322                         from_kgid(&init_user_ns, current_gid()))
1323                 lprocfs_counter_add(sbi->ll_stats, op, count);
1324 }
1325 EXPORT_SYMBOL(ll_stats_ops_tally);
1326
1327 static const char *ra_stat_string[] = {
1328         [RA_STAT_HIT] = "hits",
1329         [RA_STAT_MISS] = "misses",
1330         [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
1331         [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
1332         [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
1333         [RA_STAT_FAILED_MATCH] = "failed lock match",
1334         [RA_STAT_DISCARDED] = "read but discarded",
1335         [RA_STAT_ZERO_LEN] = "zero length file",
1336         [RA_STAT_ZERO_WINDOW] = "zero size window",
1337         [RA_STAT_EOF] = "read-ahead to EOF",
1338         [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
1339         [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
1340         [RA_STAT_FAILED_REACH_END] = "failed to reach end"
1341 };
1342
1343 LPROC_SEQ_FOPS_RO_TYPE(llite, name);
1344 LPROC_SEQ_FOPS_RO_TYPE(llite, uuid);
1345
1346 int ll_debugfs_register_super(struct super_block *sb, const char *name)
1347 {
1348         struct lustre_sb_info *lsi = s2lsi(sb);
1349         struct ll_sb_info *sbi = ll_s2sbi(sb);
1350         struct lprocfs_vars lvars[2];
1351         int err, id, rc;
1352
1353         ENTRY;
1354         memset(lvars, 0, sizeof(lvars));
1355         lvars[0].name = name;
1356
1357         LASSERT(sbi != NULL);
1358
1359         sbi->ll_proc_root = lprocfs_register(name, proc_lustre_fs_root,
1360                                              NULL, NULL);
1361         if (IS_ERR(sbi->ll_proc_root)) {
1362                 err = PTR_ERR(sbi->ll_proc_root);
1363                 sbi->ll_proc_root = NULL;
1364                 RETURN(err);
1365         }
1366
1367         rc = lprocfs_seq_create(sbi->ll_proc_root, "dump_page_cache", 0444,
1368                                 &vvp_dump_pgcache_file_ops, sbi);
1369         if (rc)
1370                 CWARN("Error adding the dump_page_cache file\n");
1371
1372         rc = lprocfs_seq_create(sbi->ll_proc_root, "extents_stats", 0644,
1373                                 &ll_rw_extents_stats_fops, sbi);
1374         if (rc)
1375                 CWARN("Error adding the extent_stats file\n");
1376
1377         rc = lprocfs_seq_create(sbi->ll_proc_root, "extents_stats_per_process",
1378                                 0644, &ll_rw_extents_stats_pp_fops, sbi);
1379         if (rc)
1380                 CWARN("Error adding the extents_stats_per_process file\n");
1381
1382         rc = lprocfs_seq_create(sbi->ll_proc_root, "offset_stats", 0644,
1383                                 &ll_rw_offset_stats_fops, sbi);
1384         if (rc)
1385                 CWARN("Error adding the offset_stats file\n");
1386
1387         /* File operations stats */
1388         sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
1389                                             LPROCFS_STATS_FLAG_NONE);
1390         if (sbi->ll_stats == NULL)
1391                 GOTO(out_proc, err = -ENOMEM);
1392
1393         /* do counter init */
1394         for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
1395                 __u32 type = llite_opcode_table[id].type;
1396                 void *ptr = NULL;
1397                 if (type & LPROCFS_TYPE_REGS)
1398                         ptr = "regs";
1399                 else if (type & LPROCFS_TYPE_BYTES)
1400                         ptr = "bytes";
1401                 else if (type & LPROCFS_TYPE_PAGES)
1402                         ptr = "pages";
1403                 lprocfs_counter_init(sbi->ll_stats,
1404                                      llite_opcode_table[id].opcode,
1405                                      (type & LPROCFS_CNTR_AVGMINMAX),
1406                                      llite_opcode_table[id].opname, ptr);
1407         }
1408
1409         err = lprocfs_register_stats(sbi->ll_proc_root, "stats", sbi->ll_stats);
1410         if (err)
1411                 GOTO(out_stats, err);
1412
1413         sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
1414                                                LPROCFS_STATS_FLAG_NONE);
1415         if (sbi->ll_ra_stats == NULL)
1416                 GOTO(out_stats, err = -ENOMEM);
1417
1418         for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
1419                 lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
1420                                      ra_stat_string[id], "pages");
1421         err = lprocfs_register_stats(sbi->ll_proc_root, "read_ahead_stats",
1422                                      sbi->ll_ra_stats);
1423         if (err)
1424                 GOTO(out_ra_stats, err);
1425
1426         err = lprocfs_add_vars(sbi->ll_proc_root, lprocfs_llite_obd_vars, sb);
1427         if (err)
1428                 GOTO(out_ra_stats, err);
1429
1430         /* Yes we also register sysfs mount kset here as well */
1431         sbi->ll_kset.kobj.parent = llite_kobj;
1432         sbi->ll_kset.kobj.ktype = &llite_ktype;
1433         init_completion(&sbi->ll_kobj_unregister);
1434         err = kobject_set_name(&sbi->ll_kset.kobj, "%s", name);
1435         if (err)
1436                 GOTO(out_ra_stats, err);
1437
1438         err = kset_register(&sbi->ll_kset);
1439         if (err)
1440                 GOTO(out_ra_stats, err);
1441
1442         lsi->lsi_kobj = kobject_get(&sbi->ll_kset.kobj);
1443
1444         RETURN(0);
1445 out_ra_stats:
1446         lprocfs_free_stats(&sbi->ll_ra_stats);
1447 out_stats:
1448         lprocfs_free_stats(&sbi->ll_stats);
1449 out_proc:
1450         lprocfs_remove(&sbi->ll_proc_root);
1451
1452         RETURN(err);
1453 }
1454
1455 int lprocfs_ll_register_obd(struct super_block *sb, const char *obdname)
1456 {
1457         struct lprocfs_vars lvars[2];
1458         struct ll_sb_info *sbi = ll_s2sbi(sb);
1459         struct obd_device *obd;
1460         struct proc_dir_entry *dir;
1461         char name[MAX_STRING_SIZE + 1];
1462         int err;
1463         ENTRY;
1464
1465         memset(lvars, 0, sizeof(lvars));
1466
1467         name[MAX_STRING_SIZE] = '\0';
1468         lvars[0].name = name;
1469
1470         LASSERT(sbi != NULL);
1471         LASSERT(obdname != NULL);
1472
1473         obd = class_name2obd(obdname);
1474
1475         LASSERT(obd != NULL);
1476         LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
1477         LASSERT(obd->obd_type->typ_name != NULL);
1478
1479         dir = proc_mkdir(obd->obd_type->typ_name, sbi->ll_proc_root);
1480         if (dir == NULL)
1481                 GOTO(out, err = -ENOMEM);
1482
1483         snprintf(name, MAX_STRING_SIZE, "common_name");
1484         lvars[0].fops = &llite_name_fops;
1485         err = lprocfs_add_vars(dir, lvars, obd);
1486         if (err)
1487                 GOTO(out, err);
1488
1489         snprintf(name, MAX_STRING_SIZE, "uuid");
1490         lvars[0].fops = &llite_uuid_fops;
1491         err = lprocfs_add_vars(dir, lvars, obd);
1492         if (err)
1493                 GOTO(out, err);
1494
1495 out:
1496         if (err) {
1497                 lprocfs_remove(&sbi->ll_proc_root);
1498                 lprocfs_free_stats(&sbi->ll_ra_stats);
1499                 lprocfs_free_stats(&sbi->ll_stats);
1500         }
1501         RETURN(err);
1502 }
1503
1504 void ll_debugfs_unregister_super(struct super_block *sb)
1505 {
1506         struct lustre_sb_info *lsi = s2lsi(sb);
1507         struct ll_sb_info *sbi = ll_s2sbi(sb);
1508
1509         kobject_put(lsi->lsi_kobj);
1510
1511         kset_unregister(&sbi->ll_kset);
1512         wait_for_completion(&sbi->ll_kobj_unregister);
1513
1514         if (sbi->ll_proc_root) {
1515                 lprocfs_remove(&sbi->ll_proc_root);
1516                 lprocfs_free_stats(&sbi->ll_ra_stats);
1517                 lprocfs_free_stats(&sbi->ll_stats);
1518         }
1519 }
1520 #undef MAX_STRING_SIZE
1521
1522 #define pct(a,b) (b ? a * 100 / b : 0)
1523
1524 static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
1525                                    struct seq_file *seq, int which)
1526 {
1527         unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
1528         unsigned long start, end, r, w;
1529         char *unitp = "KMGTPEZY";
1530         int i, units = 10;
1531         struct per_process_info *pp_info = &io_extents->pp_extents[which];
1532
1533         read_cum = 0;
1534         write_cum = 0;
1535         start = 0;
1536
1537         for(i = 0; i < LL_HIST_MAX; i++) {
1538                 read_tot += pp_info->pp_r_hist.oh_buckets[i];
1539                 write_tot += pp_info->pp_w_hist.oh_buckets[i];
1540         }
1541
1542         for(i = 0; i < LL_HIST_MAX; i++) {
1543                 r = pp_info->pp_r_hist.oh_buckets[i];
1544                 w = pp_info->pp_w_hist.oh_buckets[i];
1545                 read_cum += r;
1546                 write_cum += w;
1547                 end = 1 << (i + LL_HIST_START - units);
1548                 seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu  | "
1549                            "%14lu %4lu %4lu\n", start, *unitp, end, *unitp,
1550                            (i == LL_HIST_MAX - 1) ? '+' : ' ',
1551                            r, pct(r, read_tot), pct(read_cum, read_tot),
1552                            w, pct(w, write_tot), pct(write_cum, write_tot));
1553                 start = end;
1554                 if (start == 1<<10) {
1555                         start = 1;
1556                         units += 10;
1557                         unitp++;
1558                 }
1559                 if (read_cum == read_tot && write_cum == write_tot)
1560                         break;
1561         }
1562 }
1563
1564 static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
1565 {
1566         struct timespec64 now;
1567         struct ll_sb_info *sbi = seq->private;
1568         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1569         int k;
1570
1571         ktime_get_real_ts64(&now);
1572
1573         if (!sbi->ll_rw_stats_on) {
1574                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1575                 return 0;
1576         }
1577         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1578                    (s64)now.tv_sec, now.tv_nsec);
1579         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1580         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1581                    "extents", "calls", "%", "cum%",
1582                    "calls", "%", "cum%");
1583         spin_lock(&sbi->ll_pp_extent_lock);
1584         for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
1585                 if (io_extents->pp_extents[k].pid != 0) {
1586                         seq_printf(seq, "\nPID: %d\n",
1587                                    io_extents->pp_extents[k].pid);
1588                         ll_display_extents_info(io_extents, seq, k);
1589                 }
1590         }
1591         spin_unlock(&sbi->ll_pp_extent_lock);
1592         return 0;
1593 }
1594
1595 static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
1596                                                 const char __user *buf,
1597                                                 size_t len,
1598                                                 loff_t *off)
1599 {
1600         struct seq_file *seq = file->private_data;
1601         struct ll_sb_info *sbi = seq->private;
1602         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1603         int i;
1604         __s64 value;
1605
1606         if (len == 0)
1607                 return -EINVAL;
1608
1609         value = ll_stats_pid_write(buf, len);
1610
1611         if (value == 0)
1612                 sbi->ll_rw_stats_on = 0;
1613         else
1614                 sbi->ll_rw_stats_on = 1;
1615
1616         spin_lock(&sbi->ll_pp_extent_lock);
1617         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1618                 io_extents->pp_extents[i].pid = 0;
1619                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1620                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1621         }
1622         spin_unlock(&sbi->ll_pp_extent_lock);
1623         return len;
1624 }
1625
1626 LPROC_SEQ_FOPS(ll_rw_extents_stats_pp);
1627
1628 static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
1629 {
1630         struct timespec64 now;
1631         struct ll_sb_info *sbi = seq->private;
1632         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1633
1634         ktime_get_real_ts64(&now);
1635
1636         if (!sbi->ll_rw_stats_on) {
1637                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1638                 return 0;
1639         }
1640         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1641                    (s64)now.tv_sec, now.tv_nsec);
1642
1643         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1644         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1645                    "extents", "calls", "%", "cum%",
1646                    "calls", "%", "cum%");
1647         spin_lock(&sbi->ll_lock);
1648         ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
1649         spin_unlock(&sbi->ll_lock);
1650
1651         return 0;
1652 }
1653
1654 static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
1655                                              const char __user *buf,
1656                                              size_t len, loff_t *off)
1657 {
1658         struct seq_file *seq = file->private_data;
1659         struct ll_sb_info *sbi = seq->private;
1660         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1661         int i;
1662         __s64 value;
1663
1664         if (len == 0)
1665                 return -EINVAL;
1666
1667         value = ll_stats_pid_write(buf, len);
1668
1669         if (value == 0)
1670                 sbi->ll_rw_stats_on = 0;
1671         else
1672                 sbi->ll_rw_stats_on = 1;
1673
1674         spin_lock(&sbi->ll_pp_extent_lock);
1675         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
1676                 io_extents->pp_extents[i].pid = 0;
1677                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1678                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1679         }
1680         spin_unlock(&sbi->ll_pp_extent_lock);
1681
1682         return len;
1683 }
1684 LPROC_SEQ_FOPS(ll_rw_extents_stats);
1685
1686 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1687                        struct ll_file_data *file, loff_t pos,
1688                        size_t count, int rw)
1689 {
1690         int i, cur = -1;
1691         struct ll_rw_process_info *process;
1692         struct ll_rw_process_info *offset;
1693         int *off_count = &sbi->ll_rw_offset_entry_count;
1694         int *process_count = &sbi->ll_offset_process_count;
1695         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1696
1697         if(!sbi->ll_rw_stats_on)
1698                 return;
1699         process = sbi->ll_rw_process_info;
1700         offset = sbi->ll_rw_offset_info;
1701
1702         spin_lock(&sbi->ll_pp_extent_lock);
1703         /* Extent statistics */
1704         for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1705                 if(io_extents->pp_extents[i].pid == pid) {
1706                         cur = i;
1707                         break;
1708                 }
1709         }
1710
1711         if (cur == -1) {
1712                 /* new process */
1713                 sbi->ll_extent_process_count =
1714                         (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
1715                 cur = sbi->ll_extent_process_count;
1716                 io_extents->pp_extents[cur].pid = pid;
1717                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
1718                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
1719         }
1720
1721         for(i = 0; (count >= (1 << LL_HIST_START << i)) &&
1722              (i < (LL_HIST_MAX - 1)); i++);
1723         if (rw == 0) {
1724                 io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
1725                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
1726         } else {
1727                 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
1728                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
1729         }
1730         spin_unlock(&sbi->ll_pp_extent_lock);
1731
1732         spin_lock(&sbi->ll_process_lock);
1733         /* Offset statistics */
1734         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1735                 if (process[i].rw_pid == pid) {
1736                         if (process[i].rw_last_file != file) {
1737                                 process[i].rw_range_start = pos;
1738                                 process[i].rw_last_file_pos = pos + count;
1739                                 process[i].rw_smallest_extent = count;
1740                                 process[i].rw_largest_extent = count;
1741                                 process[i].rw_offset = 0;
1742                                 process[i].rw_last_file = file;
1743                                 spin_unlock(&sbi->ll_process_lock);
1744                                 return;
1745                         }
1746                         if (process[i].rw_last_file_pos != pos) {
1747                                 *off_count =
1748                                     (*off_count + 1) % LL_OFFSET_HIST_MAX;
1749                                 offset[*off_count].rw_op = process[i].rw_op;
1750                                 offset[*off_count].rw_pid = pid;
1751                                 offset[*off_count].rw_range_start =
1752                                         process[i].rw_range_start;
1753                                 offset[*off_count].rw_range_end =
1754                                         process[i].rw_last_file_pos;
1755                                 offset[*off_count].rw_smallest_extent =
1756                                         process[i].rw_smallest_extent;
1757                                 offset[*off_count].rw_largest_extent =
1758                                         process[i].rw_largest_extent;
1759                                 offset[*off_count].rw_offset =
1760                                         process[i].rw_offset;
1761                                 process[i].rw_op = rw;
1762                                 process[i].rw_range_start = pos;
1763                                 process[i].rw_smallest_extent = count;
1764                                 process[i].rw_largest_extent = count;
1765                                 process[i].rw_offset = pos -
1766                                         process[i].rw_last_file_pos;
1767                         }
1768                         if(process[i].rw_smallest_extent > count)
1769                                 process[i].rw_smallest_extent = count;
1770                         if(process[i].rw_largest_extent < count)
1771                                 process[i].rw_largest_extent = count;
1772                         process[i].rw_last_file_pos = pos + count;
1773                         spin_unlock(&sbi->ll_process_lock);
1774                         return;
1775                 }
1776         }
1777         *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
1778         process[*process_count].rw_pid = pid;
1779         process[*process_count].rw_op = rw;
1780         process[*process_count].rw_range_start = pos;
1781         process[*process_count].rw_last_file_pos = pos + count;
1782         process[*process_count].rw_smallest_extent = count;
1783         process[*process_count].rw_largest_extent = count;
1784         process[*process_count].rw_offset = 0;
1785         process[*process_count].rw_last_file = file;
1786         spin_unlock(&sbi->ll_process_lock);
1787 }
1788
1789 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
1790 {
1791         struct timespec64 now;
1792         struct ll_sb_info *sbi = seq->private;
1793         struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
1794         struct ll_rw_process_info *process = sbi->ll_rw_process_info;
1795         int i;
1796
1797         ktime_get_real_ts64(&now);
1798
1799         if (!sbi->ll_rw_stats_on) {
1800                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1801                 return 0;
1802         }
1803         spin_lock(&sbi->ll_process_lock);
1804
1805         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1806                    (s64)now.tv_sec, now.tv_nsec);
1807         seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
1808                    "R/W", "PID", "RANGE START", "RANGE END",
1809                    "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
1810
1811         /* We stored the discontiguous offsets here; print them first */
1812         for (i = 0; i < LL_OFFSET_HIST_MAX; i++) {
1813                 if (offset[i].rw_pid != 0)
1814                         seq_printf(seq,
1815                                   "%3c %10d %14llu %14llu %17lu %17lu %14llu\n",
1816                                    offset[i].rw_op == READ ? 'R' : 'W',
1817                                    offset[i].rw_pid,
1818                                    offset[i].rw_range_start,
1819                                    offset[i].rw_range_end,
1820                                    (unsigned long)offset[i].rw_smallest_extent,
1821                                    (unsigned long)offset[i].rw_largest_extent,
1822                                    offset[i].rw_offset);
1823         }
1824
1825         /* Then print the current offsets for each process */
1826         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1827                 if (process[i].rw_pid != 0)
1828                         seq_printf(seq,
1829                                   "%3c %10d %14llu %14llu %17lu %17lu %14llu\n",
1830                                    process[i].rw_op == READ ? 'R' : 'W',
1831                                    process[i].rw_pid,
1832                                    process[i].rw_range_start,
1833                                    process[i].rw_last_file_pos,
1834                                    (unsigned long)process[i].rw_smallest_extent,
1835                                    (unsigned long)process[i].rw_largest_extent,
1836                                    process[i].rw_offset);
1837         }
1838         spin_unlock(&sbi->ll_process_lock);
1839
1840         return 0;
1841 }
1842
1843 static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
1844                                             const char __user *buf,
1845                                             size_t len, loff_t *off)
1846 {
1847         struct seq_file *seq = file->private_data;
1848         struct ll_sb_info *sbi = seq->private;
1849         struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
1850         struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
1851         __s64 value;
1852
1853         if (len == 0)
1854                 return -EINVAL;
1855
1856         value = ll_stats_pid_write(buf, len);
1857
1858         if (value == 0)
1859                 sbi->ll_rw_stats_on = 0;
1860         else
1861                 sbi->ll_rw_stats_on = 1;
1862
1863         spin_lock(&sbi->ll_process_lock);
1864         sbi->ll_offset_process_count = 0;
1865         sbi->ll_rw_offset_entry_count = 0;
1866         memset(process_info, 0, sizeof(struct ll_rw_process_info) *
1867                LL_PROCESS_HIST_MAX);
1868         memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
1869                LL_OFFSET_HIST_MAX);
1870         spin_unlock(&sbi->ll_process_lock);
1871
1872         return len;
1873 }
1874
1875 /**
1876  * ll_stats_pid_write() - Determine if stats collection should be enabled
1877  * @buf: Buffer containing the data written
1878  * @len: Number of bytes in the buffer
1879  *
1880  * Several proc files begin collecting stats when a value is written, and stop
1881  * collecting when either '0' or 'disable' is written. This function checks the
1882  * written value to see if collection should be enabled or disabled.
1883  *
1884  * Return: If '0' or 'disable' is provided, 0 is returned. If the text
1885  * equivalent of a number is written, that number is returned. Otherwise,
1886  * 1 is returned. Non-zero return values indicate collection should be enabled.
1887  */
1888 static __s64 ll_stats_pid_write(const char __user *buf, size_t len)
1889 {
1890         unsigned long long value = 1;
1891         int rc;
1892         char kernbuf[16];
1893
1894         rc = kstrtoull_from_user(buf, len, 0, &value);
1895         if (rc < 0 && len < sizeof(kernbuf)) {
1896
1897                 if (copy_from_user(kernbuf, buf, len))
1898                         return -EFAULT;
1899                 kernbuf[len] = 0;
1900
1901                 if (kernbuf[len - 1] == '\n')
1902                         kernbuf[len - 1] = 0;
1903
1904                 if (strncasecmp(kernbuf, "disable", 7) == 0)
1905                         value = 0;
1906         }
1907
1908         return value;
1909 }
1910
1911 LPROC_SEQ_FOPS(ll_rw_offset_stats);
1912 #endif /* CONFIG_PROC_FS */