Whamcloud - gitweb
24b40104b721ecf543157a94ce8a58c1378a1de8
[fs/lustre-release.git] / lustre / llite / lproc_llite.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32 #define DEBUG_SUBSYSTEM S_LLITE
33
34 #include <linux/version.h>
35 #include <linux/user_namespace.h>
36 #ifdef HAVE_UIDGID_HEADER
37 # include <linux/uidgid.h>
38 #endif
39 #include <uapi/linux/lustre/lustre_param.h>
40 #include <lprocfs_status.h>
41 #include <obd_support.h>
42
43 #include "llite_internal.h"
44 #include "vvp_internal.h"
45
46 static struct kobject *llite_kobj;
47 static struct dentry *llite_root;
48
49 int llite_tunables_register(void)
50 {
51         int rc = 0;
52
53         llite_kobj = class_setup_tunables("llite");
54         if (IS_ERR(llite_kobj))
55                 return PTR_ERR(llite_kobj);
56
57         llite_root = debugfs_create_dir("llite", debugfs_lustre_root);
58         if (IS_ERR_OR_NULL(llite_root)) {
59                 rc = llite_root ? PTR_ERR(llite_root) : -ENOMEM;
60                 llite_root = NULL;
61                 kobject_put(llite_kobj);
62                 llite_kobj = NULL;
63         }
64
65         return rc;
66 }
67
68 void llite_tunables_unregister(void)
69 {
70         if (llite_kobj) {
71                 kobject_put(llite_kobj);
72                 llite_kobj = NULL;
73         }
74
75         if (!IS_ERR_OR_NULL(llite_root)) {
76                 debugfs_remove(llite_root);
77                 llite_root = NULL;
78         }
79 }
80
81 /* <debugfs>/lustre/llite mount point registration */
82 static const struct file_operations ll_rw_extents_stats_fops;
83 static const struct file_operations ll_rw_extents_stats_pp_fops;
84 static const struct file_operations ll_rw_offset_stats_fops;
85
86 /**
87  * ll_stats_pid_write() - Determine if stats collection should be enabled
88  * @buf: Buffer containing the data written
89  * @len: Number of bytes in the buffer
90  *
91  * Several proc files begin collecting stats when a value is written, and stop
92  * collecting when either '0' or 'disable' is written. This function checks the
93  * written value to see if collection should be enabled or disabled.
94  *
95  * Return: If '0' or 'disable' is provided, 0 is returned. If the text
96  * equivalent of a number is written, that number is returned. Otherwise,
97  * 1 is returned. Non-zero return values indicate collection should be enabled.
98  */
99 static s64 ll_stats_pid_write(const char __user *buf, size_t len)
100 {
101         unsigned long long value = 1;
102         char kernbuf[16];
103         int rc;
104
105         rc = kstrtoull_from_user(buf, len, 0, &value);
106         if (rc < 0 && len < sizeof(kernbuf)) {
107                 if (copy_from_user(kernbuf, buf, len))
108                         return -EFAULT;
109                 kernbuf[len] = 0;
110
111                 if (kernbuf[len - 1] == '\n')
112                         kernbuf[len - 1] = 0;
113
114                 if (strncasecmp(kernbuf, "disable", 7) == 0)
115                         value = 0;
116         }
117
118         return value;
119 }
120
121 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
122                               char *buf)
123 {
124         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
125                                               ll_kset.kobj);
126         struct obd_statfs osfs;
127         int rc;
128
129         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
130         if (rc)
131                 return rc;
132
133         return sprintf(buf, "%u\n", osfs.os_bsize);
134 }
135 LUSTRE_RO_ATTR(blocksize);
136
137 static ssize_t stat_blocksize_show(struct kobject *kobj, struct attribute *attr,
138                                    char *buf)
139 {
140         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
141                                               ll_kset.kobj);
142
143         return sprintf(buf, "%u\n", sbi->ll_stat_blksize);
144 }
145
146 static ssize_t stat_blocksize_store(struct kobject *kobj,
147                                     struct attribute *attr,
148                                     const char *buffer,
149                                     size_t count)
150 {
151         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
152                                               ll_kset.kobj);
153         unsigned int val;
154         int rc;
155
156         rc = kstrtouint(buffer, 10, &val);
157         if (rc)
158                 return rc;
159
160         if (val != 0 && (val < PAGE_SIZE || (val & (val - 1))) != 0)
161                 return -ERANGE;
162
163         sbi->ll_stat_blksize = val;
164
165         return count;
166 }
167 LUSTRE_RW_ATTR(stat_blocksize);
168
169 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
170                                 char *buf)
171 {
172         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
173                                               ll_kset.kobj);
174         struct obd_statfs osfs;
175         u32 blk_size;
176         u64 result;
177         int rc;
178
179         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
180         if (rc)
181                 return rc;
182
183         blk_size = osfs.os_bsize >> 10;
184         result = osfs.os_blocks;
185
186         while (blk_size >>= 1)
187                 result <<= 1;
188
189         return sprintf(buf, "%llu\n", result);
190 }
191 LUSTRE_RO_ATTR(kbytestotal);
192
193 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
194                                char *buf)
195 {
196         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
197                                               ll_kset.kobj);
198         struct obd_statfs osfs;
199         u32 blk_size;
200         u64 result;
201         int rc;
202
203         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
204         if (rc)
205                 return rc;
206
207         blk_size = osfs.os_bsize >> 10;
208         result = osfs.os_bfree;
209
210         while (blk_size >>= 1)
211                 result <<= 1;
212
213         return sprintf(buf, "%llu\n", result);
214 }
215 LUSTRE_RO_ATTR(kbytesfree);
216
217 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
218                                 char *buf)
219 {
220         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
221                                               ll_kset.kobj);
222         struct obd_statfs osfs;
223         u32 blk_size;
224         u64 result;
225         int rc;
226
227         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
228         if (rc)
229                 return rc;
230
231         blk_size = osfs.os_bsize >> 10;
232         result = osfs.os_bavail;
233
234         while (blk_size >>= 1)
235                 result <<= 1;
236
237         return sprintf(buf, "%llu\n", result);
238 }
239 LUSTRE_RO_ATTR(kbytesavail);
240
241 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
242                                char *buf)
243 {
244         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
245                                               ll_kset.kobj);
246         struct obd_statfs osfs;
247         int rc;
248
249         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
250         if (rc)
251                 return rc;
252
253         return sprintf(buf, "%llu\n", osfs.os_files);
254 }
255 LUSTRE_RO_ATTR(filestotal);
256
257 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
258                               char *buf)
259 {
260         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
261                                               ll_kset.kobj);
262         struct obd_statfs osfs;
263         int rc;
264
265         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
266         if (rc)
267                 return rc;
268
269         return sprintf(buf, "%llu\n", osfs.os_ffree);
270 }
271 LUSTRE_RO_ATTR(filesfree);
272
273 static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
274                                 char *buf)
275 {
276         return sprintf(buf, "local client\n");
277 }
278 LUSTRE_RO_ATTR(client_type);
279
280 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
281                            char *buf)
282 {
283         return sprintf(buf, "lustre\n");
284 }
285 LUSTRE_RO_ATTR(fstype);
286
287 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
288                          char *buf)
289 {
290         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
291                                               ll_kset.kobj);
292
293         return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
294 }
295 LUSTRE_RO_ATTR(uuid);
296
297 static int ll_site_stats_seq_show(struct seq_file *m, void *v)
298 {
299         struct super_block *sb = m->private;
300
301         /*
302          * See description of statistical counters in struct cl_site, and
303          * struct lu_site.
304          */
305         return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
306 }
307
308 LDEBUGFS_SEQ_FOPS_RO(ll_site_stats);
309
310 static int ll_max_readahead_mb_seq_show(struct seq_file *m, void *v)
311 {
312         struct super_block *sb = m->private;
313         struct ll_sb_info *sbi = ll_s2sbi(sb);
314         long pages_number;
315         int mult;
316
317         spin_lock(&sbi->ll_lock);
318         pages_number = sbi->ll_ra_info.ra_max_pages;
319         spin_unlock(&sbi->ll_lock);
320
321         mult = 1 << (20 - PAGE_SHIFT);
322         return lprocfs_seq_read_frac_helper(m, pages_number, mult);
323 }
324
325 static ssize_t
326 ll_max_readahead_mb_seq_write(struct file *file, const char __user *buffer,
327                               size_t count, loff_t *off)
328 {
329         struct seq_file *m = file->private_data;
330         struct super_block *sb = m->private;
331         struct ll_sb_info *sbi = ll_s2sbi(sb);
332         __s64 pages_number;
333         int rc;
334
335         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
336         if (rc)
337                 return rc;
338
339         pages_number >>= PAGE_SHIFT;
340
341         if (pages_number < 0 || pages_number > totalram_pages / 2) {
342                 /* 1/2 of RAM */
343                 CERROR("%s: can't set max_readahead_mb=%lu > %luMB\n",
344                        ll_get_fsname(sb, NULL, 0),
345                        (unsigned long)pages_number >> (20 - PAGE_SHIFT),
346                        totalram_pages >> (20 - PAGE_SHIFT + 1));
347                 return -ERANGE;
348         }
349
350         spin_lock(&sbi->ll_lock);
351         sbi->ll_ra_info.ra_max_pages = pages_number;
352         spin_unlock(&sbi->ll_lock);
353
354         return count;
355 }
356
357 LDEBUGFS_SEQ_FOPS(ll_max_readahead_mb);
358
359 static int ll_max_readahead_per_file_mb_seq_show(struct seq_file *m, void *v)
360 {
361         struct super_block *sb = m->private;
362         struct ll_sb_info *sbi = ll_s2sbi(sb);
363         long pages_number;
364         int mult;
365
366         spin_lock(&sbi->ll_lock);
367         pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
368         spin_unlock(&sbi->ll_lock);
369
370         mult = 1 << (20 - PAGE_SHIFT);
371         return lprocfs_seq_read_frac_helper(m, pages_number, mult);
372 }
373
374 static ssize_t
375 ll_max_readahead_per_file_mb_seq_write(struct file *file,
376                                        const char __user *buffer,
377                                        size_t count, loff_t *off)
378 {
379         struct seq_file *m = file->private_data;
380         struct super_block *sb = m->private;
381         struct ll_sb_info *sbi = ll_s2sbi(sb);
382         int rc;
383         __s64 pages_number;
384
385         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
386         if (rc)
387                 return rc;
388
389         pages_number >>= PAGE_SHIFT;
390
391         if (pages_number < 0 || pages_number > sbi->ll_ra_info.ra_max_pages) {
392                 CERROR("%s: can't set max_readahead_per_file_mb=%lu > "
393                        "max_read_ahead_mb=%lu\n", ll_get_fsname(sb, NULL, 0),
394                        (unsigned long)pages_number >> (20 - PAGE_SHIFT),
395                        sbi->ll_ra_info.ra_max_pages >> (20 - PAGE_SHIFT));
396                 return -ERANGE;
397         }
398
399         spin_lock(&sbi->ll_lock);
400         sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
401         spin_unlock(&sbi->ll_lock);
402
403         return count;
404 }
405
406 LDEBUGFS_SEQ_FOPS(ll_max_readahead_per_file_mb);
407
408 static int ll_max_read_ahead_whole_mb_seq_show(struct seq_file *m, void *v)
409 {
410         struct super_block *sb = m->private;
411         struct ll_sb_info *sbi = ll_s2sbi(sb);
412         long pages_number;
413         int mult;
414
415         spin_lock(&sbi->ll_lock);
416         pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
417         spin_unlock(&sbi->ll_lock);
418
419         mult = 1 << (20 - PAGE_SHIFT);
420         return lprocfs_seq_read_frac_helper(m, pages_number, mult);
421 }
422
423 static ssize_t
424 ll_max_read_ahead_whole_mb_seq_write(struct file *file,
425                                      const char __user *buffer,
426                                      size_t count, loff_t *off)
427 {
428         struct seq_file *m = file->private_data;
429         struct super_block *sb = m->private;
430         struct ll_sb_info *sbi = ll_s2sbi(sb);
431         int rc;
432         __s64 pages_number;
433
434         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
435         if (rc)
436                 return rc;
437
438         pages_number >>= PAGE_SHIFT;
439
440         /* Cap this at the current max readahead window size, the readahead
441          * algorithm does this anyway so it's pointless to set it larger. */
442         if (pages_number < 0 ||
443             pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
444                 int pages_shift = 20 - PAGE_SHIFT;
445                 CERROR("%s: can't set max_read_ahead_whole_mb=%lu > "
446                        "max_read_ahead_per_file_mb=%lu\n",
447                        ll_get_fsname(sb, NULL, 0),
448                        (unsigned long)pages_number >> pages_shift,
449                        sbi->ll_ra_info.ra_max_pages_per_file >> pages_shift);
450                 return -ERANGE;
451         }
452
453         spin_lock(&sbi->ll_lock);
454         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
455         spin_unlock(&sbi->ll_lock);
456
457         return count;
458 }
459
460 LDEBUGFS_SEQ_FOPS(ll_max_read_ahead_whole_mb);
461
462 static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
463 {
464         struct super_block     *sb    = m->private;
465         struct ll_sb_info      *sbi   = ll_s2sbi(sb);
466         struct cl_client_cache *cache = sbi->ll_cache;
467         int shift = 20 - PAGE_SHIFT;
468         long max_cached_mb;
469         long unused_mb;
470
471         max_cached_mb = cache->ccc_lru_max >> shift;
472         unused_mb = atomic_long_read(&cache->ccc_lru_left) >> shift;
473         seq_printf(m, "users: %d\n"
474                       "max_cached_mb: %ld\n"
475                       "used_mb: %ld\n"
476                       "unused_mb: %ld\n"
477                       "reclaim_count: %u\n",
478                    atomic_read(&cache->ccc_users),
479                    max_cached_mb,
480                    max_cached_mb - unused_mb,
481                    unused_mb,
482                    cache->ccc_lru_shrinkers);
483         return 0;
484 }
485
486 static ssize_t ll_max_cached_mb_seq_write(struct file *file,
487                                           const char __user *buffer,
488                                           size_t count, loff_t *off)
489 {
490         struct seq_file *m = file->private_data;
491         struct super_block *sb = m->private;
492         struct ll_sb_info *sbi = ll_s2sbi(sb);
493         struct cl_client_cache *cache = sbi->ll_cache;
494         struct lu_env *env;
495         long diff = 0;
496         long nrpages = 0;
497         __u16 refcheck;
498         __s64 pages_number;
499         int rc;
500         char kernbuf[128];
501
502         ENTRY;
503         if (count >= sizeof(kernbuf))
504                 RETURN(-EINVAL);
505
506         if (copy_from_user(kernbuf, buffer, count))
507                 RETURN(-EFAULT);
508         kernbuf[count] = 0;
509
510         buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
511                   kernbuf;
512         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
513         if (rc)
514                 RETURN(rc);
515
516         pages_number >>= PAGE_SHIFT;
517
518         if (pages_number < 0 || pages_number > totalram_pages) {
519                 CERROR("%s: can't set max cache more than %lu MB\n",
520                        ll_get_fsname(sb, NULL, 0),
521                        totalram_pages >> (20 - PAGE_SHIFT));
522                 RETURN(-ERANGE);
523         }
524         /* Allow enough cache so clients can make well-formed RPCs */
525         pages_number = max_t(long, pages_number, PTLRPC_MAX_BRW_PAGES);
526
527         spin_lock(&sbi->ll_lock);
528         diff = pages_number - cache->ccc_lru_max;
529         spin_unlock(&sbi->ll_lock);
530
531         /* easy - add more LRU slots. */
532         if (diff >= 0) {
533                 atomic_long_add(diff, &cache->ccc_lru_left);
534                 GOTO(out, rc = 0);
535         }
536
537         env = cl_env_get(&refcheck);
538         if (IS_ERR(env))
539                 RETURN(PTR_ERR(env));
540
541         diff = -diff;
542         while (diff > 0) {
543                 long tmp;
544
545                 /* reduce LRU budget from free slots. */
546                 do {
547                         long ov, nv;
548
549                         ov = atomic_long_read(&cache->ccc_lru_left);
550                         if (ov == 0)
551                                 break;
552
553                         nv = ov > diff ? ov - diff : 0;
554                         rc = atomic_long_cmpxchg(&cache->ccc_lru_left, ov, nv);
555                         if (likely(ov == rc)) {
556                                 diff -= ov - nv;
557                                 nrpages += ov - nv;
558                                 break;
559                         }
560                 } while (1);
561
562                 if (diff <= 0)
563                         break;
564
565                 if (sbi->ll_dt_exp == NULL) { /* being initialized */
566                         rc = -ENODEV;
567                         break;
568                 }
569
570                 /* difficult - have to ask OSCs to drop LRU slots. */
571                 tmp = diff << 1;
572                 rc = obd_set_info_async(env, sbi->ll_dt_exp,
573                                 sizeof(KEY_CACHE_LRU_SHRINK),
574                                 KEY_CACHE_LRU_SHRINK,
575                                 sizeof(tmp), &tmp, NULL);
576                 if (rc < 0)
577                         break;
578         }
579         cl_env_put(env, &refcheck);
580
581 out:
582         if (rc >= 0) {
583                 spin_lock(&sbi->ll_lock);
584                 cache->ccc_lru_max = pages_number;
585                 spin_unlock(&sbi->ll_lock);
586                 rc = count;
587         } else {
588                 atomic_long_add(nrpages, &cache->ccc_lru_left);
589         }
590         return rc;
591 }
592
593 LDEBUGFS_SEQ_FOPS(ll_max_cached_mb);
594
595 static ssize_t checksums_show(struct kobject *kobj, struct attribute *attr,
596                               char *buf)
597 {
598         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
599                                               ll_kset.kobj);
600
601         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
602 }
603
604 static ssize_t checksums_store(struct kobject *kobj, struct attribute *attr,
605                                const char *buffer, size_t count)
606 {
607         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
608                                               ll_kset.kobj);
609         bool val;
610         int tmp;
611         int rc;
612
613         if (!sbi->ll_dt_exp)
614                 /* Not set up yet */
615                 return -EAGAIN;
616
617         rc = kstrtobool(buffer, &val);
618         if (rc)
619                 return rc;
620         if (val)
621                 sbi->ll_flags |= LL_SBI_CHECKSUM;
622         else
623                 sbi->ll_flags &= ~LL_SBI_CHECKSUM;
624         tmp = val;
625
626         rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
627                                 KEY_CHECKSUM, sizeof(tmp), &tmp, NULL);
628         if (rc)
629                 CWARN("Failed to set OSC checksum flags: %d\n", rc);
630
631         return count;
632 }
633 LUSTRE_RW_ATTR(checksums);
634
635 LUSTRE_ATTR(checksum_pages, 0644, checksums_show, checksums_store);
636
637 static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
638                               enum stats_track_type type)
639 {
640         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
641                                               ll_kset.kobj);
642
643         if (sbi->ll_stats_track_type == type)
644                 return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
645         else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
646                 return sprintf(buf, "0 (all)\n");
647
648         return sprintf(buf, "untracked\n");
649 }
650
651 static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
652                               size_t count, enum stats_track_type type)
653 {
654         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
655                                               ll_kset.kobj);
656         unsigned long pid;
657         int rc;
658
659         rc = kstrtoul(buffer, 10, &pid);
660         if (rc)
661                 return rc;
662
663         sbi->ll_stats_track_id = pid;
664         if (pid == 0)
665                 sbi->ll_stats_track_type = STATS_TRACK_ALL;
666         else
667                 sbi->ll_stats_track_type = type;
668         lprocfs_clear_stats(sbi->ll_stats);
669         return count;
670 }
671
672 static ssize_t stats_track_pid_show(struct kobject *kobj,
673                                     struct attribute *attr,
674                                     char *buf)
675 {
676         return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
677 }
678
679 static ssize_t stats_track_pid_store(struct kobject *kobj,
680                                      struct attribute *attr,
681                                      const char *buffer,
682                                      size_t count)
683 {
684         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
685 }
686 LUSTRE_RW_ATTR(stats_track_pid);
687
688 static ssize_t stats_track_ppid_show(struct kobject *kobj,
689                                      struct attribute *attr,
690                                      char *buf)
691 {
692         return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
693 }
694
695 static ssize_t stats_track_ppid_store(struct kobject *kobj,
696                                       struct attribute *attr,
697                                       const char *buffer,
698                                       size_t count)
699 {
700         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
701 }
702 LUSTRE_RW_ATTR(stats_track_ppid);
703
704 static ssize_t stats_track_gid_show(struct kobject *kobj,
705                                     struct attribute *attr,
706                                     char *buf)
707 {
708         return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
709 }
710
711 static ssize_t stats_track_gid_store(struct kobject *kobj,
712                                      struct attribute *attr,
713                                      const char *buffer,
714                                      size_t count)
715 {
716         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
717 }
718 LUSTRE_RW_ATTR(stats_track_gid);
719
720 static ssize_t statahead_running_max_show(struct kobject *kobj,
721                                           struct attribute *attr,
722                                           char *buf)
723 {
724         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
725                                               ll_kset.kobj);
726
727         return snprintf(buf, 16, "%u\n", sbi->ll_sa_running_max);
728 }
729
730 static ssize_t statahead_running_max_store(struct kobject *kobj,
731                                            struct attribute *attr,
732                                            const char *buffer,
733                                            size_t count)
734 {
735         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
736                                               ll_kset.kobj);
737         unsigned long val;
738         int rc;
739
740         rc = kstrtoul(buffer, 0, &val);
741         if (rc)
742                 return rc;
743
744         if (val <= LL_SA_RUNNING_MAX) {
745                 sbi->ll_sa_running_max = val;
746                 return count;
747         }
748
749         CERROR("Bad statahead_running_max value %lu. Valid values "
750                "are in the range [0, %d]\n", val, LL_SA_RUNNING_MAX);
751
752         return -ERANGE;
753 }
754 LUSTRE_RW_ATTR(statahead_running_max);
755
756 static ssize_t statahead_max_show(struct kobject *kobj,
757                                   struct attribute *attr,
758                                   char *buf)
759 {
760         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
761                                               ll_kset.kobj);
762
763         return sprintf(buf, "%u\n", sbi->ll_sa_max);
764 }
765
766 static ssize_t statahead_max_store(struct kobject *kobj,
767                                    struct attribute *attr,
768                                    const char *buffer,
769                                    size_t count)
770 {
771         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
772                                               ll_kset.kobj);
773         unsigned long val;
774         int rc;
775
776         rc = kstrtoul(buffer, 0, &val);
777         if (rc)
778                 return rc;
779
780         if (val <= LL_SA_RPC_MAX)
781                 sbi->ll_sa_max = val;
782         else
783                 CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
784                        val, LL_SA_RPC_MAX);
785
786         return count;
787 }
788 LUSTRE_RW_ATTR(statahead_max);
789
790 static ssize_t statahead_agl_show(struct kobject *kobj,
791                                   struct attribute *attr,
792                                   char *buf)
793 {
794         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
795                                               ll_kset.kobj);
796
797         return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
798 }
799
800 static ssize_t statahead_agl_store(struct kobject *kobj,
801                                    struct attribute *attr,
802                                    const char *buffer,
803                                    size_t count)
804 {
805         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
806                                               ll_kset.kobj);
807         bool val;
808         int rc;
809
810         rc = kstrtobool(buffer, &val);
811         if (rc)
812                 return rc;
813
814         if (val)
815                 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
816         else
817                 sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
818
819         return count;
820 }
821 LUSTRE_RW_ATTR(statahead_agl);
822
823 static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
824 {
825         struct super_block *sb = m->private;
826         struct ll_sb_info *sbi = ll_s2sbi(sb);
827
828         seq_printf(m, "statahead total: %u\n"
829                       "statahead wrong: %u\n"
830                       "agl total: %u\n",
831                    atomic_read(&sbi->ll_sa_total),
832                    atomic_read(&sbi->ll_sa_wrong),
833                    atomic_read(&sbi->ll_agl_total));
834         return 0;
835 }
836
837 LDEBUGFS_SEQ_FOPS_RO(ll_statahead_stats);
838
839 static ssize_t lazystatfs_show(struct kobject *kobj,
840                                struct attribute *attr,
841                                char *buf)
842 {
843         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
844                                               ll_kset.kobj);
845
846         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_LAZYSTATFS) ? 1 : 0);
847 }
848
849 static ssize_t lazystatfs_store(struct kobject *kobj,
850                                 struct attribute *attr,
851                                 const char *buffer,
852                                 size_t count)
853 {
854         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
855                                               ll_kset.kobj);
856         bool val;
857         int rc;
858
859         rc = kstrtobool(buffer, &val);
860         if (rc)
861                 return rc;
862
863         if (val)
864                 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
865         else
866                 sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
867
868         return count;
869 }
870 LUSTRE_RW_ATTR(lazystatfs);
871
872 static ssize_t max_easize_show(struct kobject *kobj,
873                                struct attribute *attr,
874                                char *buf)
875 {
876         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
877                                               ll_kset.kobj);
878         unsigned int ealen;
879         int rc;
880
881         rc = ll_get_max_mdsize(sbi, &ealen);
882         if (rc)
883                 return rc;
884
885         return sprintf(buf, "%u\n", ealen);
886 }
887 LUSTRE_RO_ATTR(max_easize);
888
889 /**
890  * Get default_easize.
891  *
892  * \see client_obd::cl_default_mds_easize
893  *
894  * \param[in] m         seq_file handle
895  * \param[in] v         unused for single entry
896  *
897  * \retval 0            on success
898  * \retval negative     negated errno on failure
899  */
900 static ssize_t default_easize_show(struct kobject *kobj,
901                                    struct attribute *attr,
902                                    char *buf)
903 {
904         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
905                                               ll_kset.kobj);
906         unsigned int ealen;
907         int rc;
908
909         rc = ll_get_default_mdsize(sbi, &ealen);
910         if (rc)
911                 return rc;
912
913         return sprintf(buf, "%u\n", ealen);
914 }
915
916 /**
917  * Set default_easize.
918  *
919  * Range checking on the passed value is handled by
920  * ll_set_default_mdsize().
921  *
922  * \see client_obd::cl_default_mds_easize
923  *
924  * \param[in] file      proc file
925  * \param[in] buffer    string passed from user space
926  * \param[in] count     \a buffer length
927  * \param[in] off       unused for single entry
928  *
929  * \retval positive     \a count on success
930  * \retval negative     negated errno on failure
931  */
932 static ssize_t default_easize_store(struct kobject *kobj,
933                                     struct attribute *attr,
934                                     const char *buffer,
935                                     size_t count)
936 {
937         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
938                                               ll_kset.kobj);
939         unsigned int val;
940         int rc;
941
942         if (count == 0)
943                 return 0;
944
945         rc = kstrtouint(buffer, 10, &val);
946         if (rc)
947                 return rc;
948
949         rc = ll_set_default_mdsize(sbi, val);
950         if (rc)
951                 return rc;
952
953         return count;
954 }
955 LUSTRE_RW_ATTR(default_easize);
956
957 static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
958 {
959         const char *str[] = LL_SBI_FLAGS;
960         struct super_block *sb = m->private;
961         int flags = ll_s2sbi(sb)->ll_flags;
962         int i = 0;
963
964         while (flags != 0) {
965                 if (ARRAY_SIZE(str) <= i) {
966                         CERROR("%s: Revise array LL_SBI_FLAGS to match sbi "
967                                 "flags please.\n", ll_get_fsname(sb, NULL, 0));
968                         return -EINVAL;
969                 }
970
971                 if (flags & 0x1)
972                         seq_printf(m, "%s ", str[i]);
973                 flags >>= 1;
974                 ++i;
975         }
976         seq_printf(m, "\b\n");
977         return 0;
978 }
979
980 LDEBUGFS_SEQ_FOPS_RO(ll_sbi_flags);
981
982 static ssize_t xattr_cache_show(struct kobject *kobj,
983                                 struct attribute *attr,
984                                 char *buf)
985 {
986         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
987                                               ll_kset.kobj);
988
989         return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled);
990 }
991
992 static ssize_t xattr_cache_store(struct kobject *kobj,
993                                  struct attribute *attr,
994                                  const char *buffer,
995                                  size_t count)
996 {
997         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
998                                               ll_kset.kobj);
999         bool val;
1000         int rc;
1001
1002         rc = kstrtobool(buffer, &val);
1003         if (rc)
1004                 return rc;
1005
1006         if (val && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
1007                 return -ENOTSUPP;
1008
1009         sbi->ll_xattr_cache_enabled = val;
1010         sbi->ll_xattr_cache_set = 1;
1011
1012         return count;
1013 }
1014 LUSTRE_RW_ATTR(xattr_cache);
1015
1016 static ssize_t tiny_write_show(struct kobject *kobj,
1017                                struct attribute *attr,
1018                                char *buf)
1019 {
1020         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1021                                               ll_kset.kobj);
1022
1023         return sprintf(buf, "%u\n", !!(sbi->ll_flags & LL_SBI_TINY_WRITE));
1024 }
1025
1026 static ssize_t tiny_write_store(struct kobject *kobj,
1027                                 struct attribute *attr,
1028                                 const char *buffer,
1029                                 size_t count)
1030 {
1031         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1032                                               ll_kset.kobj);
1033         bool val;
1034         int rc;
1035
1036         rc = kstrtobool(buffer, &val);
1037         if (rc)
1038                 return rc;
1039
1040         spin_lock(&sbi->ll_lock);
1041         if (val)
1042                 sbi->ll_flags |= LL_SBI_TINY_WRITE;
1043         else
1044                 sbi->ll_flags &= ~LL_SBI_TINY_WRITE;
1045         spin_unlock(&sbi->ll_lock);
1046
1047         return count;
1048 }
1049 LUSTRE_RW_ATTR(tiny_write);
1050
1051 static ssize_t fast_read_show(struct kobject *kobj,
1052                               struct attribute *attr,
1053                               char *buf)
1054 {
1055         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1056                                               ll_kset.kobj);
1057
1058         return sprintf(buf, "%u\n", !!(sbi->ll_flags & LL_SBI_FAST_READ));
1059 }
1060
1061 static ssize_t fast_read_store(struct kobject *kobj,
1062                                struct attribute *attr,
1063                                const char *buffer,
1064                                size_t count)
1065 {
1066         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1067                                               ll_kset.kobj);
1068         bool val;
1069         int rc;
1070
1071         rc = kstrtobool(buffer, &val);
1072         if (rc)
1073                 return rc;
1074
1075         spin_lock(&sbi->ll_lock);
1076         if (val)
1077                 sbi->ll_flags |= LL_SBI_FAST_READ;
1078         else
1079                 sbi->ll_flags &= ~LL_SBI_FAST_READ;
1080         spin_unlock(&sbi->ll_lock);
1081
1082         return count;
1083 }
1084 LUSTRE_RW_ATTR(fast_read);
1085
1086 static ssize_t pio_show(struct kobject *kobj,
1087                         struct attribute *attr,
1088                         char *buf)
1089 {
1090         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1091                                               ll_kset.kobj);
1092
1093         return sprintf(buf, "%u\n", !!(sbi->ll_flags & LL_SBI_PIO));
1094 }
1095
1096 static ssize_t pio_store(struct kobject *kobj,
1097                          struct attribute *attr,
1098                          const char *buffer,
1099                          size_t count)
1100 {
1101         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1102                                               ll_kset.kobj);
1103         bool val;
1104         int rc;
1105
1106         rc = kstrtobool(buffer, &val);
1107         if (rc)
1108                 return rc;
1109
1110         spin_lock(&sbi->ll_lock);
1111         if (val)
1112                 sbi->ll_flags |= LL_SBI_PIO;
1113         else
1114                 sbi->ll_flags &= ~LL_SBI_PIO;
1115         spin_unlock(&sbi->ll_lock);
1116
1117         return count;
1118 }
1119 LUSTRE_RW_ATTR(pio);
1120
1121 static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
1122 {
1123         struct super_block      *sb    = m->private;
1124         struct ll_sb_info       *sbi   = ll_s2sbi(sb);
1125         struct cl_client_cache  *cache = sbi->ll_cache;
1126         long pages;
1127         int mb;
1128
1129         pages = atomic_long_read(&cache->ccc_unstable_nr);
1130         mb    = (pages * PAGE_SIZE) >> 20;
1131
1132         seq_printf(m, "unstable_check:     %8d\n"
1133                       "unstable_pages: %12ld\n"
1134                       "unstable_mb:        %8d\n",
1135                    cache->ccc_unstable_check, pages, mb);
1136         return 0;
1137 }
1138
1139 static ssize_t ll_unstable_stats_seq_write(struct file *file,
1140                                            const char __user *buffer,
1141                                            size_t count, loff_t *unused)
1142 {
1143         struct seq_file *seq = file->private_data;
1144         struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)seq->private);
1145         char kernbuf[128];
1146         bool val;
1147         int rc;
1148
1149         if (count == 0)
1150                 return 0;
1151         if (count >= sizeof(kernbuf))
1152                 return -EINVAL;
1153
1154         if (copy_from_user(kernbuf, buffer, count))
1155                 return -EFAULT;
1156         kernbuf[count] = 0;
1157
1158         buffer += lprocfs_find_named_value(kernbuf, "unstable_check:", &count) -
1159                   kernbuf;
1160         rc = kstrtobool_from_user(buffer, count, &val);
1161         if (rc < 0)
1162                 return rc;
1163
1164         /* borrow lru lock to set the value */
1165         spin_lock(&sbi->ll_cache->ccc_lru_lock);
1166         sbi->ll_cache->ccc_unstable_check = val;
1167         spin_unlock(&sbi->ll_cache->ccc_lru_lock);
1168
1169         return count;
1170 }
1171
1172 LDEBUGFS_SEQ_FOPS(ll_unstable_stats);
1173
1174 static int ll_root_squash_seq_show(struct seq_file *m, void *v)
1175 {
1176         struct super_block *sb = m->private;
1177         struct ll_sb_info *sbi = ll_s2sbi(sb);
1178         struct root_squash_info *squash = &sbi->ll_squash;
1179
1180         seq_printf(m, "%u:%u\n", squash->rsi_uid, squash->rsi_gid);
1181         return 0;
1182 }
1183
1184 static ssize_t ll_root_squash_seq_write(struct file *file,
1185                                         const char __user *buffer,
1186                                         size_t count, loff_t *off)
1187 {
1188         struct seq_file *m = file->private_data;
1189         struct super_block *sb = m->private;
1190         struct ll_sb_info *sbi = ll_s2sbi(sb);
1191         struct root_squash_info *squash = &sbi->ll_squash;
1192
1193         return lprocfs_wr_root_squash(buffer, count, squash,
1194                                       ll_get_fsname(sb, NULL, 0));
1195 }
1196
1197 LDEBUGFS_SEQ_FOPS(ll_root_squash);
1198
1199 static int ll_nosquash_nids_seq_show(struct seq_file *m, void *v)
1200 {
1201         struct super_block *sb = m->private;
1202         struct ll_sb_info *sbi = ll_s2sbi(sb);
1203         struct root_squash_info *squash = &sbi->ll_squash;
1204         int len;
1205
1206         down_read(&squash->rsi_sem);
1207         if (!list_empty(&squash->rsi_nosquash_nids)) {
1208                 len = cfs_print_nidlist(m->buf + m->count, m->size - m->count,
1209                                         &squash->rsi_nosquash_nids);
1210                 m->count += len;
1211                 seq_putc(m, '\n');
1212         } else {
1213                 seq_puts(m, "NONE\n");
1214         }
1215         up_read(&squash->rsi_sem);
1216
1217         return 0;
1218 }
1219
1220 static ssize_t ll_nosquash_nids_seq_write(struct file *file,
1221                                           const char __user *buffer,
1222                                           size_t count, loff_t *off)
1223 {
1224         struct seq_file *m = file->private_data;
1225         struct super_block *sb = m->private;
1226         struct ll_sb_info *sbi = ll_s2sbi(sb);
1227         struct root_squash_info *squash = &sbi->ll_squash;
1228         int rc;
1229
1230         rc = lprocfs_wr_nosquash_nids(buffer, count, squash,
1231                                       ll_get_fsname(sb, NULL, 0));
1232         if (rc < 0)
1233                 return rc;
1234
1235         ll_compute_rootsquash_state(sbi);
1236
1237         return rc;
1238 }
1239
1240 LDEBUGFS_SEQ_FOPS(ll_nosquash_nids);
1241
1242 struct lprocfs_vars lprocfs_llite_obd_vars[] = {
1243         { .name =       "site",
1244           .fops =       &ll_site_stats_fops                     },
1245         { .name =       "max_read_ahead_mb",
1246           .fops =       &ll_max_readahead_mb_fops               },
1247         { .name =       "max_read_ahead_per_file_mb",
1248           .fops =       &ll_max_readahead_per_file_mb_fops      },
1249         { .name =       "max_read_ahead_whole_mb",
1250           .fops =       &ll_max_read_ahead_whole_mb_fops        },
1251         { .name =       "max_cached_mb",
1252           .fops =       &ll_max_cached_mb_fops                  },
1253         { .name =       "statahead_stats",
1254           .fops =       &ll_statahead_stats_fops                },
1255         { .name =       "unstable_stats",
1256           .fops =       &ll_unstable_stats_fops                 },
1257         { .name =       "sbi_flags",
1258           .fops =       &ll_sbi_flags_fops                      },
1259         { .name =       "root_squash",
1260           .fops =       &ll_root_squash_fops                    },
1261         { .name =       "nosquash_nids",
1262           .fops =       &ll_nosquash_nids_fops                  },
1263         { NULL }
1264 };
1265
1266 #define MAX_STRING_SIZE 128
1267
1268 static struct attribute *llite_attrs[] = {
1269         &lustre_attr_blocksize.attr,
1270         &lustre_attr_stat_blocksize.attr,
1271         &lustre_attr_kbytestotal.attr,
1272         &lustre_attr_kbytesfree.attr,
1273         &lustre_attr_kbytesavail.attr,
1274         &lustre_attr_filestotal.attr,
1275         &lustre_attr_filesfree.attr,
1276         &lustre_attr_client_type.attr,
1277         &lustre_attr_fstype.attr,
1278         &lustre_attr_uuid.attr,
1279         &lustre_attr_checksums.attr,
1280         &lustre_attr_checksum_pages.attr,
1281         &lustre_attr_stats_track_pid.attr,
1282         &lustre_attr_stats_track_ppid.attr,
1283         &lustre_attr_stats_track_gid.attr,
1284         &lustre_attr_statahead_running_max.attr,
1285         &lustre_attr_statahead_max.attr,
1286         &lustre_attr_statahead_agl.attr,
1287         &lustre_attr_lazystatfs.attr,
1288         &lustre_attr_max_easize.attr,
1289         &lustre_attr_default_easize.attr,
1290         &lustre_attr_xattr_cache.attr,
1291         &lustre_attr_fast_read.attr,
1292         &lustre_attr_pio.attr,
1293         &lustre_attr_tiny_write.attr,
1294         NULL,
1295 };
1296
1297 static void llite_kobj_release(struct kobject *kobj)
1298 {
1299         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1300                                               ll_kset.kobj);
1301         complete(&sbi->ll_kobj_unregister);
1302 }
1303
1304 static struct kobj_type llite_ktype = {
1305         .default_attrs  = llite_attrs,
1306         .sysfs_ops      = &lustre_sysfs_ops,
1307         .release        = llite_kobj_release,
1308 };
1309
1310 static const struct llite_file_opcode {
1311         __u32       opcode;
1312         __u32       type;
1313         const char *opname;
1314 } llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
1315         /* file operation */
1316         { LPROC_LL_DIRTY_HITS,     LPROCFS_TYPE_REGS, "dirty_pages_hits" },
1317         { LPROC_LL_DIRTY_MISSES,   LPROCFS_TYPE_REGS, "dirty_pages_misses" },
1318         { LPROC_LL_READ_BYTES,     LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
1319                                    "read_bytes" },
1320         { LPROC_LL_WRITE_BYTES,    LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
1321                                    "write_bytes" },
1322         { LPROC_LL_BRW_READ,       LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
1323                                    "brw_read" },
1324         { LPROC_LL_BRW_WRITE,      LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
1325                                    "brw_write" },
1326         { LPROC_LL_IOCTL,          LPROCFS_TYPE_REGS, "ioctl" },
1327         { LPROC_LL_OPEN,           LPROCFS_TYPE_REGS, "open" },
1328         { LPROC_LL_RELEASE,        LPROCFS_TYPE_REGS, "close" },
1329         { LPROC_LL_MAP,            LPROCFS_TYPE_REGS, "mmap" },
1330         { LPROC_LL_FAULT,          LPROCFS_TYPE_REGS, "page_fault" },
1331         { LPROC_LL_MKWRITE,        LPROCFS_TYPE_REGS, "page_mkwrite" },
1332         { LPROC_LL_LLSEEK,         LPROCFS_TYPE_REGS, "seek" },
1333         { LPROC_LL_FSYNC,          LPROCFS_TYPE_REGS, "fsync" },
1334         { LPROC_LL_READDIR,        LPROCFS_TYPE_REGS, "readdir" },
1335         /* inode operation */
1336         { LPROC_LL_SETATTR,        LPROCFS_TYPE_REGS, "setattr" },
1337         { LPROC_LL_TRUNC,          LPROCFS_TYPE_REGS, "truncate" },
1338         { LPROC_LL_FLOCK,          LPROCFS_TYPE_REGS, "flock" },
1339         { LPROC_LL_GETATTR,        LPROCFS_TYPE_REGS, "getattr" },
1340         /* dir inode operation */
1341         { LPROC_LL_CREATE,         LPROCFS_TYPE_REGS, "create" },
1342         { LPROC_LL_LINK,           LPROCFS_TYPE_REGS, "link" },
1343         { LPROC_LL_UNLINK,         LPROCFS_TYPE_REGS, "unlink" },
1344         { LPROC_LL_SYMLINK,        LPROCFS_TYPE_REGS, "symlink" },
1345         { LPROC_LL_MKDIR,          LPROCFS_TYPE_REGS, "mkdir" },
1346         { LPROC_LL_RMDIR,          LPROCFS_TYPE_REGS, "rmdir" },
1347         { LPROC_LL_MKNOD,          LPROCFS_TYPE_REGS, "mknod" },
1348         { LPROC_LL_RENAME,         LPROCFS_TYPE_REGS, "rename" },
1349         /* special inode operation */
1350         { LPROC_LL_STAFS,          LPROCFS_TYPE_REGS, "statfs" },
1351         { LPROC_LL_ALLOC_INODE,    LPROCFS_TYPE_REGS, "alloc_inode" },
1352         { LPROC_LL_SETXATTR,       LPROCFS_TYPE_REGS, "setxattr" },
1353         { LPROC_LL_GETXATTR,       LPROCFS_TYPE_REGS, "getxattr" },
1354         { LPROC_LL_GETXATTR_HITS,  LPROCFS_TYPE_REGS, "getxattr_hits" },
1355         { LPROC_LL_LISTXATTR,      LPROCFS_TYPE_REGS, "listxattr" },
1356         { LPROC_LL_REMOVEXATTR,    LPROCFS_TYPE_REGS, "removexattr" },
1357         { LPROC_LL_INODE_PERM,     LPROCFS_TYPE_REGS, "inode_permission" },
1358 };
1359
1360 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count)
1361 {
1362         if (!sbi->ll_stats)
1363                 return;
1364         if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
1365                 lprocfs_counter_add(sbi->ll_stats, op, count);
1366         else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
1367                  sbi->ll_stats_track_id == current->pid)
1368                 lprocfs_counter_add(sbi->ll_stats, op, count);
1369         else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
1370                  sbi->ll_stats_track_id == current->parent->pid)
1371                 lprocfs_counter_add(sbi->ll_stats, op, count);
1372         else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
1373                  sbi->ll_stats_track_id ==
1374                         from_kgid(&init_user_ns, current_gid()))
1375                 lprocfs_counter_add(sbi->ll_stats, op, count);
1376 }
1377 EXPORT_SYMBOL(ll_stats_ops_tally);
1378
1379 static const char *ra_stat_string[] = {
1380         [RA_STAT_HIT] = "hits",
1381         [RA_STAT_MISS] = "misses",
1382         [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
1383         [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
1384         [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
1385         [RA_STAT_FAILED_MATCH] = "failed lock match",
1386         [RA_STAT_DISCARDED] = "read but discarded",
1387         [RA_STAT_ZERO_LEN] = "zero length file",
1388         [RA_STAT_ZERO_WINDOW] = "zero size window",
1389         [RA_STAT_EOF] = "read-ahead to EOF",
1390         [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
1391         [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
1392         [RA_STAT_FAILED_REACH_END] = "failed to reach end"
1393 };
1394
1395 int ll_debugfs_register_super(struct super_block *sb, const char *name)
1396 {
1397         struct lustre_sb_info *lsi = s2lsi(sb);
1398         struct ll_sb_info *sbi = ll_s2sbi(sb);
1399         int err, id, rc;
1400
1401         ENTRY;
1402         LASSERT(sbi);
1403
1404         if (IS_ERR_OR_NULL(llite_root))
1405                 goto out_ll_kset;
1406
1407         sbi->ll_debugfs_entry = ldebugfs_register(name, llite_root,
1408                                                   lprocfs_llite_obd_vars, sb);
1409         if (IS_ERR_OR_NULL(sbi->ll_debugfs_entry)) {
1410                 err = sbi->ll_debugfs_entry ? PTR_ERR(sbi->ll_debugfs_entry) :
1411                                               -ENOMEM;
1412                 sbi->ll_debugfs_entry = NULL;
1413                 RETURN(err);
1414         }
1415
1416         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "dump_page_cache",0444,
1417                                  &vvp_dump_pgcache_file_ops, sbi);
1418         if (rc)
1419                 CWARN("Error adding the dump_page_cache file\n");
1420
1421         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "extents_stats", 0644,
1422                                  &ll_rw_extents_stats_fops, sbi);
1423         if (rc)
1424                 CWARN("Error adding the extent_stats file\n");
1425
1426         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry,
1427                                  "extents_stats_per_process", 0644,
1428                                  &ll_rw_extents_stats_pp_fops, sbi);
1429         if (rc)
1430                 CWARN("Error adding the extents_stats_per_process file\n");
1431
1432         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "offset_stats", 0644,
1433                                  &ll_rw_offset_stats_fops, sbi);
1434         if (rc)
1435                 CWARN("Error adding the offset_stats file\n");
1436
1437         /* File operations stats */
1438         sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
1439                                             LPROCFS_STATS_FLAG_NONE);
1440         if (sbi->ll_stats == NULL)
1441                 GOTO(out_debugfs, err = -ENOMEM);
1442
1443         /* do counter init */
1444         for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
1445                 u32 type = llite_opcode_table[id].type;
1446                 void *ptr = NULL;
1447
1448                 if (type & LPROCFS_TYPE_REGS)
1449                         ptr = "regs";
1450                 else if (type & LPROCFS_TYPE_BYTES)
1451                         ptr = "bytes";
1452                 else if (type & LPROCFS_TYPE_PAGES)
1453                         ptr = "pages";
1454                 lprocfs_counter_init(sbi->ll_stats,
1455                                      llite_opcode_table[id].opcode,
1456                                      (type & LPROCFS_CNTR_AVGMINMAX),
1457                                      llite_opcode_table[id].opname, ptr);
1458         }
1459
1460         err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats",
1461                                       sbi->ll_stats);
1462         if (err)
1463                 GOTO(out_stats, err);
1464
1465         sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
1466                                                LPROCFS_STATS_FLAG_NONE);
1467         if (sbi->ll_ra_stats == NULL)
1468                 GOTO(out_stats, err = -ENOMEM);
1469
1470         for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
1471                 lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
1472                                      ra_stat_string[id], "pages");
1473
1474         err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats",
1475                                       sbi->ll_ra_stats);
1476         if (err)
1477                 GOTO(out_ra_stats, err);
1478
1479 out_ll_kset:
1480         /* Yes we also register sysfs mount kset here as well */
1481         sbi->ll_kset.kobj.parent = llite_kobj;
1482         sbi->ll_kset.kobj.ktype = &llite_ktype;
1483         init_completion(&sbi->ll_kobj_unregister);
1484         err = kobject_set_name(&sbi->ll_kset.kobj, "%s", name);
1485         if (err)
1486                 GOTO(out_ra_stats, err);
1487
1488         err = kset_register(&sbi->ll_kset);
1489         if (err)
1490                 GOTO(out_ra_stats, err);
1491
1492         lsi->lsi_kobj = kobject_get(&sbi->ll_kset.kobj);
1493
1494         RETURN(0);
1495 out_ra_stats:
1496         lprocfs_free_stats(&sbi->ll_ra_stats);
1497 out_stats:
1498         lprocfs_free_stats(&sbi->ll_stats);
1499 out_debugfs:
1500         ldebugfs_remove(&sbi->ll_debugfs_entry);
1501
1502         RETURN(err);
1503 }
1504
1505 void ll_debugfs_unregister_super(struct super_block *sb)
1506 {
1507         struct lustre_sb_info *lsi = s2lsi(sb);
1508         struct ll_sb_info *sbi = ll_s2sbi(sb);
1509
1510         if (!IS_ERR_OR_NULL(sbi->ll_debugfs_entry))
1511                 ldebugfs_remove(&sbi->ll_debugfs_entry);
1512
1513         if (sbi->ll_dt_obd)
1514                 sysfs_remove_link(&sbi->ll_kset.kobj,
1515                                   sbi->ll_dt_obd->obd_type->typ_name);
1516
1517         if (sbi->ll_md_obd)
1518                 sysfs_remove_link(&sbi->ll_kset.kobj,
1519                                   sbi->ll_md_obd->obd_type->typ_name);
1520
1521         kobject_put(lsi->lsi_kobj);
1522
1523         kset_unregister(&sbi->ll_kset);
1524         wait_for_completion(&sbi->ll_kobj_unregister);
1525
1526         lprocfs_free_stats(&sbi->ll_ra_stats);
1527         lprocfs_free_stats(&sbi->ll_stats);
1528 }
1529 #undef MAX_STRING_SIZE
1530
1531 #define pct(a,b) (b ? a * 100 / b : 0)
1532
1533 static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
1534                                    struct seq_file *seq, int which)
1535 {
1536         unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
1537         unsigned long start, end, r, w;
1538         char *unitp = "KMGTPEZY";
1539         int i, units = 10;
1540         struct per_process_info *pp_info = &io_extents->pp_extents[which];
1541
1542         read_cum = 0;
1543         write_cum = 0;
1544         start = 0;
1545
1546         for(i = 0; i < LL_HIST_MAX; i++) {
1547                 read_tot += pp_info->pp_r_hist.oh_buckets[i];
1548                 write_tot += pp_info->pp_w_hist.oh_buckets[i];
1549         }
1550
1551         for(i = 0; i < LL_HIST_MAX; i++) {
1552                 r = pp_info->pp_r_hist.oh_buckets[i];
1553                 w = pp_info->pp_w_hist.oh_buckets[i];
1554                 read_cum += r;
1555                 write_cum += w;
1556                 end = BIT(i + LL_HIST_START - units);
1557                 seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu  | "
1558                            "%14lu %4lu %4lu\n", start, *unitp, end, *unitp,
1559                            (i == LL_HIST_MAX - 1) ? '+' : ' ',
1560                            r, pct(r, read_tot), pct(read_cum, read_tot),
1561                            w, pct(w, write_tot), pct(write_cum, write_tot));
1562                 start = end;
1563                 if (start == BIT(10)) {
1564                         start = 1;
1565                         units += 10;
1566                         unitp++;
1567                 }
1568                 if (read_cum == read_tot && write_cum == write_tot)
1569                         break;
1570         }
1571 }
1572
1573 static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
1574 {
1575         struct timespec64 now;
1576         struct ll_sb_info *sbi = seq->private;
1577         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1578         int k;
1579
1580         ktime_get_real_ts64(&now);
1581
1582         if (!sbi->ll_rw_stats_on) {
1583                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1584                 return 0;
1585         }
1586         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1587                    (s64)now.tv_sec, now.tv_nsec);
1588         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1589         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1590                    "extents", "calls", "%", "cum%",
1591                    "calls", "%", "cum%");
1592         spin_lock(&sbi->ll_pp_extent_lock);
1593         for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
1594                 if (io_extents->pp_extents[k].pid != 0) {
1595                         seq_printf(seq, "\nPID: %d\n",
1596                                    io_extents->pp_extents[k].pid);
1597                         ll_display_extents_info(io_extents, seq, k);
1598                 }
1599         }
1600         spin_unlock(&sbi->ll_pp_extent_lock);
1601         return 0;
1602 }
1603
1604 static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
1605                                                 const char __user *buf,
1606                                                 size_t len,
1607                                                 loff_t *off)
1608 {
1609         struct seq_file *seq = file->private_data;
1610         struct ll_sb_info *sbi = seq->private;
1611         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1612         int i;
1613         __s64 value;
1614
1615         if (len == 0)
1616                 return -EINVAL;
1617
1618         value = ll_stats_pid_write(buf, len);
1619
1620         if (value == 0)
1621                 sbi->ll_rw_stats_on = 0;
1622         else
1623                 sbi->ll_rw_stats_on = 1;
1624
1625         spin_lock(&sbi->ll_pp_extent_lock);
1626         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1627                 io_extents->pp_extents[i].pid = 0;
1628                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1629                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1630         }
1631         spin_unlock(&sbi->ll_pp_extent_lock);
1632         return len;
1633 }
1634
1635 LDEBUGFS_SEQ_FOPS(ll_rw_extents_stats_pp);
1636
1637 static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
1638 {
1639         struct timespec64 now;
1640         struct ll_sb_info *sbi = seq->private;
1641         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1642
1643         ktime_get_real_ts64(&now);
1644
1645         if (!sbi->ll_rw_stats_on) {
1646                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1647                 return 0;
1648         }
1649         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1650                    (s64)now.tv_sec, now.tv_nsec);
1651
1652         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1653         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1654                    "extents", "calls", "%", "cum%",
1655                    "calls", "%", "cum%");
1656         spin_lock(&sbi->ll_lock);
1657         ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
1658         spin_unlock(&sbi->ll_lock);
1659
1660         return 0;
1661 }
1662
1663 static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
1664                                              const char __user *buf,
1665                                              size_t len, loff_t *off)
1666 {
1667         struct seq_file *seq = file->private_data;
1668         struct ll_sb_info *sbi = seq->private;
1669         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1670         int i;
1671         __s64 value;
1672
1673         if (len == 0)
1674                 return -EINVAL;
1675
1676         value = ll_stats_pid_write(buf, len);
1677
1678         if (value == 0)
1679                 sbi->ll_rw_stats_on = 0;
1680         else
1681                 sbi->ll_rw_stats_on = 1;
1682
1683         spin_lock(&sbi->ll_pp_extent_lock);
1684         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
1685                 io_extents->pp_extents[i].pid = 0;
1686                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1687                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1688         }
1689         spin_unlock(&sbi->ll_pp_extent_lock);
1690
1691         return len;
1692 }
1693
1694 LDEBUGFS_SEQ_FOPS(ll_rw_extents_stats);
1695
1696 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1697                        struct ll_file_data *file, loff_t pos,
1698                        size_t count, int rw)
1699 {
1700         int i, cur = -1;
1701         struct ll_rw_process_info *process;
1702         struct ll_rw_process_info *offset;
1703         int *off_count = &sbi->ll_rw_offset_entry_count;
1704         int *process_count = &sbi->ll_offset_process_count;
1705         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1706
1707         if(!sbi->ll_rw_stats_on)
1708                 return;
1709         process = sbi->ll_rw_process_info;
1710         offset = sbi->ll_rw_offset_info;
1711
1712         spin_lock(&sbi->ll_pp_extent_lock);
1713         /* Extent statistics */
1714         for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1715                 if(io_extents->pp_extents[i].pid == pid) {
1716                         cur = i;
1717                         break;
1718                 }
1719         }
1720
1721         if (cur == -1) {
1722                 /* new process */
1723                 sbi->ll_extent_process_count =
1724                         (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
1725                 cur = sbi->ll_extent_process_count;
1726                 io_extents->pp_extents[cur].pid = pid;
1727                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
1728                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
1729         }
1730
1731         for(i = 0; (count >= BIT(LL_HIST_START << i)) &&
1732              (i < (LL_HIST_MAX - 1)); i++);
1733         if (rw == 0) {
1734                 io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
1735                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
1736         } else {
1737                 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
1738                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
1739         }
1740         spin_unlock(&sbi->ll_pp_extent_lock);
1741
1742         spin_lock(&sbi->ll_process_lock);
1743         /* Offset statistics */
1744         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1745                 if (process[i].rw_pid == pid) {
1746                         if (process[i].rw_last_file != file) {
1747                                 process[i].rw_range_start = pos;
1748                                 process[i].rw_last_file_pos = pos + count;
1749                                 process[i].rw_smallest_extent = count;
1750                                 process[i].rw_largest_extent = count;
1751                                 process[i].rw_offset = 0;
1752                                 process[i].rw_last_file = file;
1753                                 spin_unlock(&sbi->ll_process_lock);
1754                                 return;
1755                         }
1756                         if (process[i].rw_last_file_pos != pos) {
1757                                 *off_count =
1758                                     (*off_count + 1) % LL_OFFSET_HIST_MAX;
1759                                 offset[*off_count].rw_op = process[i].rw_op;
1760                                 offset[*off_count].rw_pid = pid;
1761                                 offset[*off_count].rw_range_start =
1762                                         process[i].rw_range_start;
1763                                 offset[*off_count].rw_range_end =
1764                                         process[i].rw_last_file_pos;
1765                                 offset[*off_count].rw_smallest_extent =
1766                                         process[i].rw_smallest_extent;
1767                                 offset[*off_count].rw_largest_extent =
1768                                         process[i].rw_largest_extent;
1769                                 offset[*off_count].rw_offset =
1770                                         process[i].rw_offset;
1771                                 process[i].rw_op = rw;
1772                                 process[i].rw_range_start = pos;
1773                                 process[i].rw_smallest_extent = count;
1774                                 process[i].rw_largest_extent = count;
1775                                 process[i].rw_offset = pos -
1776                                         process[i].rw_last_file_pos;
1777                         }
1778                         if(process[i].rw_smallest_extent > count)
1779                                 process[i].rw_smallest_extent = count;
1780                         if(process[i].rw_largest_extent < count)
1781                                 process[i].rw_largest_extent = count;
1782                         process[i].rw_last_file_pos = pos + count;
1783                         spin_unlock(&sbi->ll_process_lock);
1784                         return;
1785                 }
1786         }
1787         *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
1788         process[*process_count].rw_pid = pid;
1789         process[*process_count].rw_op = rw;
1790         process[*process_count].rw_range_start = pos;
1791         process[*process_count].rw_last_file_pos = pos + count;
1792         process[*process_count].rw_smallest_extent = count;
1793         process[*process_count].rw_largest_extent = count;
1794         process[*process_count].rw_offset = 0;
1795         process[*process_count].rw_last_file = file;
1796         spin_unlock(&sbi->ll_process_lock);
1797 }
1798
1799 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
1800 {
1801         struct timespec64 now;
1802         struct ll_sb_info *sbi = seq->private;
1803         struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
1804         struct ll_rw_process_info *process = sbi->ll_rw_process_info;
1805         int i;
1806
1807         ktime_get_real_ts64(&now);
1808
1809         if (!sbi->ll_rw_stats_on) {
1810                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1811                 return 0;
1812         }
1813         spin_lock(&sbi->ll_process_lock);
1814
1815         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1816                    (s64)now.tv_sec, now.tv_nsec);
1817         seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
1818                    "R/W", "PID", "RANGE START", "RANGE END",
1819                    "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
1820
1821         /* We stored the discontiguous offsets here; print them first */
1822         for (i = 0; i < LL_OFFSET_HIST_MAX; i++) {
1823                 if (offset[i].rw_pid != 0)
1824                         seq_printf(seq,
1825                                   "%3c %10d %14llu %14llu %17lu %17lu %14llu\n",
1826                                    offset[i].rw_op == READ ? 'R' : 'W',
1827                                    offset[i].rw_pid,
1828                                    offset[i].rw_range_start,
1829                                    offset[i].rw_range_end,
1830                                    (unsigned long)offset[i].rw_smallest_extent,
1831                                    (unsigned long)offset[i].rw_largest_extent,
1832                                    offset[i].rw_offset);
1833         }
1834
1835         /* Then print the current offsets for each process */
1836         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1837                 if (process[i].rw_pid != 0)
1838                         seq_printf(seq,
1839                                   "%3c %10d %14llu %14llu %17lu %17lu %14llu\n",
1840                                    process[i].rw_op == READ ? 'R' : 'W',
1841                                    process[i].rw_pid,
1842                                    process[i].rw_range_start,
1843                                    process[i].rw_last_file_pos,
1844                                    (unsigned long)process[i].rw_smallest_extent,
1845                                    (unsigned long)process[i].rw_largest_extent,
1846                                    process[i].rw_offset);
1847         }
1848         spin_unlock(&sbi->ll_process_lock);
1849
1850         return 0;
1851 }
1852
1853 static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
1854                                             const char __user *buf,
1855                                             size_t len, loff_t *off)
1856 {
1857         struct seq_file *seq = file->private_data;
1858         struct ll_sb_info *sbi = seq->private;
1859         struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
1860         struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
1861         __s64 value;
1862
1863         if (len == 0)
1864                 return -EINVAL;
1865
1866         value = ll_stats_pid_write(buf, len);
1867
1868         if (value == 0)
1869                 sbi->ll_rw_stats_on = 0;
1870         else
1871                 sbi->ll_rw_stats_on = 1;
1872
1873         spin_lock(&sbi->ll_process_lock);
1874         sbi->ll_offset_process_count = 0;
1875         sbi->ll_rw_offset_entry_count = 0;
1876         memset(process_info, 0, sizeof(struct ll_rw_process_info) *
1877                LL_PROCESS_HIST_MAX);
1878         memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
1879                LL_OFFSET_HIST_MAX);
1880         spin_unlock(&sbi->ll_process_lock);
1881
1882         return len;
1883 }
1884
1885 LDEBUGFS_SEQ_FOPS(ll_rw_offset_stats);