Whamcloud - gitweb
LU-14543 target: prevent overflowing of tgd->tgd_tot_granted
[fs/lustre-release.git] / lustre / llite / lproc_llite.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32 #define DEBUG_SUBSYSTEM S_LLITE
33
34 #include <linux/version.h>
35 #include <linux/user_namespace.h>
36 #ifdef HAVE_UIDGID_HEADER
37 # include <linux/uidgid.h>
38 #endif
39 #include <uapi/linux/lustre/lustre_param.h>
40 #include <lprocfs_status.h>
41 #include <obd_support.h>
42
43 #include "llite_internal.h"
44 #include "vvp_internal.h"
45
46 static struct kobject *llite_kobj;
47 static struct dentry *llite_root;
48
49 int llite_tunables_register(void)
50 {
51         int rc = 0;
52
53         llite_kobj = class_setup_tunables("llite");
54         if (IS_ERR(llite_kobj))
55                 return PTR_ERR(llite_kobj);
56
57         llite_root = debugfs_create_dir("llite", debugfs_lustre_root);
58         if (IS_ERR_OR_NULL(llite_root)) {
59                 rc = llite_root ? PTR_ERR(llite_root) : -ENOMEM;
60                 llite_root = NULL;
61                 kobject_put(llite_kobj);
62                 llite_kobj = NULL;
63         }
64
65         return rc;
66 }
67
68 void llite_tunables_unregister(void)
69 {
70         if (llite_kobj) {
71                 kobject_put(llite_kobj);
72                 llite_kobj = NULL;
73         }
74
75         if (!IS_ERR_OR_NULL(llite_root)) {
76                 debugfs_remove(llite_root);
77                 llite_root = NULL;
78         }
79 }
80
81 /* <debugfs>/lustre/llite mount point registration */
82 static const struct file_operations ll_rw_extents_stats_fops;
83 static const struct file_operations ll_rw_extents_stats_pp_fops;
84 static const struct file_operations ll_rw_offset_stats_fops;
85
86 /**
87  * ll_stats_pid_write() - Determine if stats collection should be enabled
88  * @buf: Buffer containing the data written
89  * @len: Number of bytes in the buffer
90  *
91  * Several proc files begin collecting stats when a value is written, and stop
92  * collecting when either '0' or 'disable' is written. This function checks the
93  * written value to see if collection should be enabled or disabled.
94  *
95  * Return: If '0' or 'disable' is provided, 0 is returned. If the text
96  * equivalent of a number is written, that number is returned. Otherwise,
97  * 1 is returned. Non-zero return values indicate collection should be enabled.
98  */
99 static s64 ll_stats_pid_write(const char __user *buf, size_t len)
100 {
101         unsigned long long value = 1;
102         char kernbuf[16];
103         int rc;
104
105         rc = kstrtoull_from_user(buf, len, 0, &value);
106         if (rc < 0 && len < sizeof(kernbuf)) {
107                 if (copy_from_user(kernbuf, buf, len))
108                         return -EFAULT;
109                 kernbuf[len] = 0;
110
111                 if (kernbuf[len - 1] == '\n')
112                         kernbuf[len - 1] = 0;
113
114                 if (strncasecmp(kernbuf, "disable", 7) == 0)
115                         value = 0;
116         }
117
118         return value;
119 }
120
121 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
122                               char *buf)
123 {
124         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
125                                               ll_kset.kobj);
126         struct obd_statfs osfs;
127         int rc;
128
129         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
130         if (rc)
131                 return rc;
132
133         return sprintf(buf, "%u\n", osfs.os_bsize);
134 }
135 LUSTRE_RO_ATTR(blocksize);
136
137 static ssize_t stat_blocksize_show(struct kobject *kobj, struct attribute *attr,
138                                    char *buf)
139 {
140         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
141                                               ll_kset.kobj);
142
143         return sprintf(buf, "%u\n", sbi->ll_stat_blksize);
144 }
145
146 static ssize_t stat_blocksize_store(struct kobject *kobj,
147                                     struct attribute *attr,
148                                     const char *buffer,
149                                     size_t count)
150 {
151         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
152                                               ll_kset.kobj);
153         unsigned int val;
154         int rc;
155
156         rc = kstrtouint(buffer, 10, &val);
157         if (rc)
158                 return rc;
159
160         if (val != 0 && (val < PAGE_SIZE || (val & (val - 1))) != 0)
161                 return -ERANGE;
162
163         sbi->ll_stat_blksize = val;
164
165         return count;
166 }
167 LUSTRE_RW_ATTR(stat_blocksize);
168
169 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
170                                 char *buf)
171 {
172         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
173                                               ll_kset.kobj);
174         struct obd_statfs osfs;
175         u32 blk_size;
176         u64 result;
177         int rc;
178
179         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
180         if (rc)
181                 return rc;
182
183         blk_size = osfs.os_bsize >> 10;
184         result = osfs.os_blocks;
185
186         while (blk_size >>= 1)
187                 result <<= 1;
188
189         return sprintf(buf, "%llu\n", result);
190 }
191 LUSTRE_RO_ATTR(kbytestotal);
192
193 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
194                                char *buf)
195 {
196         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
197                                               ll_kset.kobj);
198         struct obd_statfs osfs;
199         u32 blk_size;
200         u64 result;
201         int rc;
202
203         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
204         if (rc)
205                 return rc;
206
207         blk_size = osfs.os_bsize >> 10;
208         result = osfs.os_bfree;
209
210         while (blk_size >>= 1)
211                 result <<= 1;
212
213         return sprintf(buf, "%llu\n", result);
214 }
215 LUSTRE_RO_ATTR(kbytesfree);
216
217 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
218                                 char *buf)
219 {
220         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
221                                               ll_kset.kobj);
222         struct obd_statfs osfs;
223         u32 blk_size;
224         u64 result;
225         int rc;
226
227         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
228         if (rc)
229                 return rc;
230
231         blk_size = osfs.os_bsize >> 10;
232         result = osfs.os_bavail;
233
234         while (blk_size >>= 1)
235                 result <<= 1;
236
237         return sprintf(buf, "%llu\n", result);
238 }
239 LUSTRE_RO_ATTR(kbytesavail);
240
241 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
242                                char *buf)
243 {
244         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
245                                               ll_kset.kobj);
246         struct obd_statfs osfs;
247         int rc;
248
249         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
250         if (rc)
251                 return rc;
252
253         return sprintf(buf, "%llu\n", osfs.os_files);
254 }
255 LUSTRE_RO_ATTR(filestotal);
256
257 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
258                               char *buf)
259 {
260         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
261                                               ll_kset.kobj);
262         struct obd_statfs osfs;
263         int rc;
264
265         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
266         if (rc)
267                 return rc;
268
269         return sprintf(buf, "%llu\n", osfs.os_ffree);
270 }
271 LUSTRE_RO_ATTR(filesfree);
272
273 static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
274                                 char *buf)
275 {
276         return sprintf(buf, "local client\n");
277 }
278 LUSTRE_RO_ATTR(client_type);
279
280 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
281                            char *buf)
282 {
283         return sprintf(buf, "lustre\n");
284 }
285 LUSTRE_RO_ATTR(fstype);
286
287 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
288                          char *buf)
289 {
290         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
291                                               ll_kset.kobj);
292
293         return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
294 }
295 LUSTRE_RO_ATTR(uuid);
296
297 static int ll_site_stats_seq_show(struct seq_file *m, void *v)
298 {
299         struct super_block *sb = m->private;
300
301         /*
302          * See description of statistical counters in struct cl_site, and
303          * struct lu_site.
304          */
305         return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
306 }
307
308 LDEBUGFS_SEQ_FOPS_RO(ll_site_stats);
309
310 static int ll_max_readahead_mb_seq_show(struct seq_file *m, void *v)
311 {
312         struct super_block *sb = m->private;
313         struct ll_sb_info *sbi = ll_s2sbi(sb);
314         unsigned long ra_max_mb;
315
316         spin_lock(&sbi->ll_lock);
317         ra_max_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages);
318         spin_unlock(&sbi->ll_lock);
319
320         seq_printf(m, "%lu\n", ra_max_mb);
321         return 0;
322 }
323
324 static ssize_t
325 ll_max_readahead_mb_seq_write(struct file *file, const char __user *buffer,
326                               size_t count, loff_t *off)
327 {
328         struct seq_file *m = file->private_data;
329         struct super_block *sb = m->private;
330         struct ll_sb_info *sbi = ll_s2sbi(sb);
331         s64 ra_max_mb, pages_number;
332         int rc;
333
334         rc = lprocfs_str_with_units_to_s64(buffer, count, &ra_max_mb, 'M');
335         if (rc)
336                 return rc;
337
338         pages_number = round_up(ra_max_mb, 1024 * 1024) >> PAGE_SHIFT;
339         if (pages_number < 0 || pages_number > cfs_totalram_pages() / 2) {
340                 /* 1/2 of RAM */
341                 CERROR("%s: can't set max_readahead_mb=%llu > %luMB\n",
342                        ll_get_fsname(sb, NULL, 0), PAGES_TO_MiB(pages_number),
343                        PAGES_TO_MiB(cfs_totalram_pages()));
344                 return -ERANGE;
345         }
346
347         spin_lock(&sbi->ll_lock);
348         sbi->ll_ra_info.ra_max_pages = pages_number;
349         spin_unlock(&sbi->ll_lock);
350
351         return count;
352 }
353
354 LDEBUGFS_SEQ_FOPS(ll_max_readahead_mb);
355
356 static int ll_max_readahead_per_file_mb_seq_show(struct seq_file *m, void *v)
357 {
358         struct super_block *sb = m->private;
359         struct ll_sb_info *sbi = ll_s2sbi(sb);
360         unsigned long ra_max_file_mb;
361
362         spin_lock(&sbi->ll_lock);
363         ra_max_file_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages_per_file);
364         spin_unlock(&sbi->ll_lock);
365
366         seq_printf(m, "%lu\n", ra_max_file_mb);
367         return 0;
368 }
369
370 static ssize_t
371 ll_max_readahead_per_file_mb_seq_write(struct file *file,
372                                        const char __user *buffer,
373                                        size_t count, loff_t *off)
374 {
375         struct seq_file *m = file->private_data;
376         struct super_block *sb = m->private;
377         struct ll_sb_info *sbi = ll_s2sbi(sb);
378         s64 ra_max_file_mb, pages_number;
379         int rc;
380
381         rc = lprocfs_str_with_units_to_s64(buffer, count, &ra_max_file_mb,
382                                            'M');
383         if (rc)
384                 return rc;
385
386         pages_number = round_up(ra_max_file_mb, 1024 * 1024) >> PAGE_SHIFT;
387         if (pages_number < 0 || pages_number > sbi->ll_ra_info.ra_max_pages) {
388                 CERROR("%s: can't set max_readahead_per_file_mb=%llu > max_read_ahead_mb=%lu\n",
389                        ll_get_fsname(sb, NULL, 0), PAGES_TO_MiB(pages_number),
390                        PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages));
391                 return -ERANGE;
392         }
393
394         spin_lock(&sbi->ll_lock);
395         sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
396         spin_unlock(&sbi->ll_lock);
397
398         return count;
399 }
400
401 LDEBUGFS_SEQ_FOPS(ll_max_readahead_per_file_mb);
402
403 static int ll_max_read_ahead_whole_mb_seq_show(struct seq_file *m, void *v)
404 {
405         struct super_block *sb = m->private;
406         struct ll_sb_info *sbi = ll_s2sbi(sb);
407         unsigned long ra_max_whole_mb;
408
409         spin_lock(&sbi->ll_lock);
410         ra_max_whole_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_read_ahead_whole_pages);
411         spin_unlock(&sbi->ll_lock);
412
413         seq_printf(m, "%lu\n", ra_max_whole_mb);
414         return 0;
415 }
416
417 static ssize_t
418 ll_max_read_ahead_whole_mb_seq_write(struct file *file,
419                                      const char __user *buffer,
420                                      size_t count, loff_t *off)
421 {
422         struct seq_file *m = file->private_data;
423         struct super_block *sb = m->private;
424         struct ll_sb_info *sbi = ll_s2sbi(sb);
425         s64 ra_max_whole_mb, pages_number;
426         int rc;
427
428         rc = lprocfs_str_with_units_to_s64(buffer, count, &ra_max_whole_mb,
429                                            'M');
430         if (rc)
431                 return rc;
432
433         pages_number = round_up(ra_max_whole_mb, 1024 * 1024) >> PAGE_SHIFT;
434         /* Cap this at the current max readahead window size, the readahead
435          * algorithm does this anyway so it's pointless to set it larger.
436          */
437         if (pages_number < 0 ||
438             pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
439                 CERROR("%s: can't set max_read_ahead_whole_mb=%llu > max_read_ahead_per_file_mb=%lu\n",
440                        ll_get_fsname(sb, NULL, 0), PAGES_TO_MiB(pages_number),
441                        PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages_per_file));
442                 return -ERANGE;
443         }
444
445         spin_lock(&sbi->ll_lock);
446         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
447         spin_unlock(&sbi->ll_lock);
448
449         return count;
450 }
451
452 LDEBUGFS_SEQ_FOPS(ll_max_read_ahead_whole_mb);
453
454 static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
455 {
456         struct super_block     *sb    = m->private;
457         struct ll_sb_info      *sbi   = ll_s2sbi(sb);
458         struct cl_client_cache *cache = sbi->ll_cache;
459         long max_cached_mb;
460         long unused_mb;
461
462         max_cached_mb = PAGES_TO_MiB(cache->ccc_lru_max);
463         unused_mb = PAGES_TO_MiB(atomic_long_read(&cache->ccc_lru_left));
464         seq_printf(m, "users: %d\n"
465                       "max_cached_mb: %ld\n"
466                       "used_mb: %ld\n"
467                       "unused_mb: %ld\n"
468                       "reclaim_count: %u\n",
469                    atomic_read(&cache->ccc_users),
470                    max_cached_mb,
471                    max_cached_mb - unused_mb,
472                    unused_mb,
473                    cache->ccc_lru_shrinkers);
474         return 0;
475 }
476
477 static ssize_t ll_max_cached_mb_seq_write(struct file *file,
478                                           const char __user *buffer,
479                                           size_t count, loff_t *off)
480 {
481         struct seq_file *m = file->private_data;
482         struct super_block *sb = m->private;
483         struct ll_sb_info *sbi = ll_s2sbi(sb);
484         struct cl_client_cache *cache = sbi->ll_cache;
485         struct lu_env *env;
486         long diff = 0;
487         long nrpages = 0;
488         __u16 refcheck;
489         __s64 pages_number;
490         int rc;
491         char kernbuf[128];
492
493         ENTRY;
494         if (count >= sizeof(kernbuf))
495                 RETURN(-EINVAL);
496
497         if (copy_from_user(kernbuf, buffer, count))
498                 RETURN(-EFAULT);
499         kernbuf[count] = 0;
500
501         buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
502                   kernbuf;
503         rc = lprocfs_str_with_units_to_s64(buffer, count, &pages_number, 'M');
504         if (rc)
505                 RETURN(rc);
506
507         pages_number >>= PAGE_SHIFT;
508
509         if (pages_number < 0 || pages_number > cfs_totalram_pages()) {
510                 CERROR("%s: can't set max cache more than %lu MB\n",
511                        ll_get_fsname(sb, NULL, 0),
512                        PAGES_TO_MiB(cfs_totalram_pages()));
513                 RETURN(-ERANGE);
514         }
515         /* Allow enough cache so clients can make well-formed RPCs */
516         pages_number = max_t(long, pages_number, PTLRPC_MAX_BRW_PAGES);
517
518         spin_lock(&sbi->ll_lock);
519         diff = pages_number - cache->ccc_lru_max;
520         spin_unlock(&sbi->ll_lock);
521
522         /* easy - add more LRU slots. */
523         if (diff >= 0) {
524                 atomic_long_add(diff, &cache->ccc_lru_left);
525                 GOTO(out, rc = 0);
526         }
527
528         env = cl_env_get(&refcheck);
529         if (IS_ERR(env))
530                 RETURN(PTR_ERR(env));
531
532         diff = -diff;
533         while (diff > 0) {
534                 long tmp;
535
536                 /* reduce LRU budget from free slots. */
537                 do {
538                         long ov, nv;
539
540                         ov = atomic_long_read(&cache->ccc_lru_left);
541                         if (ov == 0)
542                                 break;
543
544                         nv = ov > diff ? ov - diff : 0;
545                         rc = atomic_long_cmpxchg(&cache->ccc_lru_left, ov, nv);
546                         if (likely(ov == rc)) {
547                                 diff -= ov - nv;
548                                 nrpages += ov - nv;
549                                 break;
550                         }
551                 } while (1);
552
553                 if (diff <= 0)
554                         break;
555
556                 if (sbi->ll_dt_exp == NULL) { /* being initialized */
557                         rc = -ENODEV;
558                         break;
559                 }
560
561                 /* difficult - have to ask OSCs to drop LRU slots. */
562                 tmp = diff << 1;
563                 rc = obd_set_info_async(env, sbi->ll_dt_exp,
564                                 sizeof(KEY_CACHE_LRU_SHRINK),
565                                 KEY_CACHE_LRU_SHRINK,
566                                 sizeof(tmp), &tmp, NULL);
567                 if (rc < 0)
568                         break;
569         }
570         cl_env_put(env, &refcheck);
571
572 out:
573         if (rc >= 0) {
574                 spin_lock(&sbi->ll_lock);
575                 cache->ccc_lru_max = pages_number;
576                 spin_unlock(&sbi->ll_lock);
577                 rc = count;
578         } else {
579                 atomic_long_add(nrpages, &cache->ccc_lru_left);
580         }
581         return rc;
582 }
583
584 LDEBUGFS_SEQ_FOPS(ll_max_cached_mb);
585
586 static ssize_t checksums_show(struct kobject *kobj, struct attribute *attr,
587                               char *buf)
588 {
589         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
590                                               ll_kset.kobj);
591
592         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
593 }
594
595 static ssize_t checksums_store(struct kobject *kobj, struct attribute *attr,
596                                const char *buffer, size_t count)
597 {
598         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
599                                               ll_kset.kobj);
600         bool val;
601         int tmp;
602         int rc;
603
604         if (!sbi->ll_dt_exp)
605                 /* Not set up yet */
606                 return -EAGAIN;
607
608         rc = kstrtobool(buffer, &val);
609         if (rc)
610                 return rc;
611         if (val)
612                 sbi->ll_flags |= LL_SBI_CHECKSUM;
613         else
614                 sbi->ll_flags &= ~LL_SBI_CHECKSUM;
615         tmp = val;
616
617         rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
618                                 KEY_CHECKSUM, sizeof(tmp), &tmp, NULL);
619         if (rc)
620                 CWARN("Failed to set OSC checksum flags: %d\n", rc);
621
622         return count;
623 }
624 LUSTRE_RW_ATTR(checksums);
625
626 LUSTRE_ATTR(checksum_pages, 0644, checksums_show, checksums_store);
627
628 static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
629                               enum stats_track_type type)
630 {
631         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
632                                               ll_kset.kobj);
633
634         if (sbi->ll_stats_track_type == type)
635                 return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
636         else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
637                 return sprintf(buf, "0 (all)\n");
638
639         return sprintf(buf, "untracked\n");
640 }
641
642 static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
643                               size_t count, enum stats_track_type type)
644 {
645         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
646                                               ll_kset.kobj);
647         unsigned long pid;
648         int rc;
649
650         rc = kstrtoul(buffer, 10, &pid);
651         if (rc)
652                 return rc;
653
654         sbi->ll_stats_track_id = pid;
655         if (pid == 0)
656                 sbi->ll_stats_track_type = STATS_TRACK_ALL;
657         else
658                 sbi->ll_stats_track_type = type;
659         lprocfs_clear_stats(sbi->ll_stats);
660         return count;
661 }
662
663 static ssize_t stats_track_pid_show(struct kobject *kobj,
664                                     struct attribute *attr,
665                                     char *buf)
666 {
667         return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
668 }
669
670 static ssize_t stats_track_pid_store(struct kobject *kobj,
671                                      struct attribute *attr,
672                                      const char *buffer,
673                                      size_t count)
674 {
675         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
676 }
677 LUSTRE_RW_ATTR(stats_track_pid);
678
679 static ssize_t stats_track_ppid_show(struct kobject *kobj,
680                                      struct attribute *attr,
681                                      char *buf)
682 {
683         return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
684 }
685
686 static ssize_t stats_track_ppid_store(struct kobject *kobj,
687                                       struct attribute *attr,
688                                       const char *buffer,
689                                       size_t count)
690 {
691         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
692 }
693 LUSTRE_RW_ATTR(stats_track_ppid);
694
695 static ssize_t stats_track_gid_show(struct kobject *kobj,
696                                     struct attribute *attr,
697                                     char *buf)
698 {
699         return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
700 }
701
702 static ssize_t stats_track_gid_store(struct kobject *kobj,
703                                      struct attribute *attr,
704                                      const char *buffer,
705                                      size_t count)
706 {
707         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
708 }
709 LUSTRE_RW_ATTR(stats_track_gid);
710
711 static ssize_t statahead_running_max_show(struct kobject *kobj,
712                                           struct attribute *attr,
713                                           char *buf)
714 {
715         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
716                                               ll_kset.kobj);
717
718         return snprintf(buf, 16, "%u\n", sbi->ll_sa_running_max);
719 }
720
721 static ssize_t statahead_running_max_store(struct kobject *kobj,
722                                            struct attribute *attr,
723                                            const char *buffer,
724                                            size_t count)
725 {
726         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
727                                               ll_kset.kobj);
728         unsigned long val;
729         int rc;
730
731         rc = kstrtoul(buffer, 0, &val);
732         if (rc)
733                 return rc;
734
735         if (val <= LL_SA_RUNNING_MAX) {
736                 sbi->ll_sa_running_max = val;
737                 return count;
738         }
739
740         CERROR("Bad statahead_running_max value %lu. Valid values "
741                "are in the range [0, %d]\n", val, LL_SA_RUNNING_MAX);
742
743         return -ERANGE;
744 }
745 LUSTRE_RW_ATTR(statahead_running_max);
746
747 static ssize_t statahead_max_show(struct kobject *kobj,
748                                   struct attribute *attr,
749                                   char *buf)
750 {
751         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
752                                               ll_kset.kobj);
753
754         return sprintf(buf, "%u\n", sbi->ll_sa_max);
755 }
756
757 static ssize_t statahead_max_store(struct kobject *kobj,
758                                    struct attribute *attr,
759                                    const char *buffer,
760                                    size_t count)
761 {
762         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
763                                               ll_kset.kobj);
764         unsigned long val;
765         int rc;
766
767         rc = kstrtoul(buffer, 0, &val);
768         if (rc)
769                 return rc;
770
771         if (val <= LL_SA_RPC_MAX)
772                 sbi->ll_sa_max = val;
773         else
774                 CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
775                        val, LL_SA_RPC_MAX);
776
777         return count;
778 }
779 LUSTRE_RW_ATTR(statahead_max);
780
781 static ssize_t statahead_agl_show(struct kobject *kobj,
782                                   struct attribute *attr,
783                                   char *buf)
784 {
785         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
786                                               ll_kset.kobj);
787
788         return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
789 }
790
791 static ssize_t statahead_agl_store(struct kobject *kobj,
792                                    struct attribute *attr,
793                                    const char *buffer,
794                                    size_t count)
795 {
796         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
797                                               ll_kset.kobj);
798         bool val;
799         int rc;
800
801         rc = kstrtobool(buffer, &val);
802         if (rc)
803                 return rc;
804
805         if (val)
806                 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
807         else
808                 sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
809
810         return count;
811 }
812 LUSTRE_RW_ATTR(statahead_agl);
813
814 static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
815 {
816         struct super_block *sb = m->private;
817         struct ll_sb_info *sbi = ll_s2sbi(sb);
818
819         seq_printf(m, "statahead total: %u\n"
820                       "statahead wrong: %u\n"
821                       "agl total: %u\n",
822                    atomic_read(&sbi->ll_sa_total),
823                    atomic_read(&sbi->ll_sa_wrong),
824                    atomic_read(&sbi->ll_agl_total));
825         return 0;
826 }
827
828 LDEBUGFS_SEQ_FOPS_RO(ll_statahead_stats);
829
830 static ssize_t lazystatfs_show(struct kobject *kobj,
831                                struct attribute *attr,
832                                char *buf)
833 {
834         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
835                                               ll_kset.kobj);
836
837         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_LAZYSTATFS) ? 1 : 0);
838 }
839
840 static ssize_t lazystatfs_store(struct kobject *kobj,
841                                 struct attribute *attr,
842                                 const char *buffer,
843                                 size_t count)
844 {
845         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
846                                               ll_kset.kobj);
847         bool val;
848         int rc;
849
850         rc = kstrtobool(buffer, &val);
851         if (rc)
852                 return rc;
853
854         if (val)
855                 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
856         else
857                 sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
858
859         return count;
860 }
861 LUSTRE_RW_ATTR(lazystatfs);
862
863 static ssize_t statfs_max_age_show(struct kobject *kobj, struct attribute *attr,
864                                    char *buf)
865 {
866         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
867                                               ll_kset.kobj);
868
869         return snprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_statfs_max_age);
870 }
871
872 static ssize_t statfs_max_age_store(struct kobject *kobj,
873                                     struct attribute *attr, const char *buffer,
874                                     size_t count)
875 {
876         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
877                                               ll_kset.kobj);
878         unsigned int val;
879         int rc;
880
881         rc = kstrtouint(buffer, 10, &val);
882         if (rc)
883                 return rc;
884         if (val > OBD_STATFS_CACHE_MAX_AGE)
885                 return -EINVAL;
886
887         sbi->ll_statfs_max_age = val;
888
889         return count;
890 }
891 LUSTRE_RW_ATTR(statfs_max_age);
892
893 static ssize_t max_easize_show(struct kobject *kobj,
894                                struct attribute *attr,
895                                char *buf)
896 {
897         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
898                                               ll_kset.kobj);
899         unsigned int ealen;
900         int rc;
901
902         rc = ll_get_max_mdsize(sbi, &ealen);
903         if (rc)
904                 return rc;
905
906         return sprintf(buf, "%u\n", ealen);
907 }
908 LUSTRE_RO_ATTR(max_easize);
909
910 /**
911  * Get default_easize.
912  *
913  * \see client_obd::cl_default_mds_easize
914  *
915  * \param[in] m         seq_file handle
916  * \param[in] v         unused for single entry
917  *
918  * \retval 0            on success
919  * \retval negative     negated errno on failure
920  */
921 static ssize_t default_easize_show(struct kobject *kobj,
922                                    struct attribute *attr,
923                                    char *buf)
924 {
925         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
926                                               ll_kset.kobj);
927         unsigned int ealen;
928         int rc;
929
930         rc = ll_get_default_mdsize(sbi, &ealen);
931         if (rc)
932                 return rc;
933
934         return sprintf(buf, "%u\n", ealen);
935 }
936
937 /**
938  * Set default_easize.
939  *
940  * Range checking on the passed value is handled by
941  * ll_set_default_mdsize().
942  *
943  * \see client_obd::cl_default_mds_easize
944  *
945  * \param[in] file      proc file
946  * \param[in] buffer    string passed from user space
947  * \param[in] count     \a buffer length
948  * \param[in] off       unused for single entry
949  *
950  * \retval positive     \a count on success
951  * \retval negative     negated errno on failure
952  */
953 static ssize_t default_easize_store(struct kobject *kobj,
954                                     struct attribute *attr,
955                                     const char *buffer,
956                                     size_t count)
957 {
958         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
959                                               ll_kset.kobj);
960         unsigned int val;
961         int rc;
962
963         if (count == 0)
964                 return 0;
965
966         rc = kstrtouint(buffer, 10, &val);
967         if (rc)
968                 return rc;
969
970         rc = ll_set_default_mdsize(sbi, val);
971         if (rc)
972                 return rc;
973
974         return count;
975 }
976 LUSTRE_RW_ATTR(default_easize);
977
978 static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
979 {
980         const char *str[] = LL_SBI_FLAGS;
981         struct super_block *sb = m->private;
982         int flags = ll_s2sbi(sb)->ll_flags;
983         int i = 0;
984
985         while (flags != 0) {
986                 if (ARRAY_SIZE(str) <= i) {
987                         CERROR("%s: Revise array LL_SBI_FLAGS to match sbi "
988                                 "flags please.\n", ll_get_fsname(sb, NULL, 0));
989                         return -EINVAL;
990                 }
991
992                 if (flags & 0x1)
993                         seq_printf(m, "%s ", str[i]);
994                 flags >>= 1;
995                 ++i;
996         }
997         seq_printf(m, "\b\n");
998         return 0;
999 }
1000
1001 LDEBUGFS_SEQ_FOPS_RO(ll_sbi_flags);
1002
1003 static ssize_t xattr_cache_show(struct kobject *kobj,
1004                                 struct attribute *attr,
1005                                 char *buf)
1006 {
1007         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1008                                               ll_kset.kobj);
1009
1010         return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled);
1011 }
1012
1013 static ssize_t xattr_cache_store(struct kobject *kobj,
1014                                  struct attribute *attr,
1015                                  const char *buffer,
1016                                  size_t count)
1017 {
1018         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1019                                               ll_kset.kobj);
1020         bool val;
1021         int rc;
1022
1023         rc = kstrtobool(buffer, &val);
1024         if (rc)
1025                 return rc;
1026
1027         if (val && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
1028                 return -ENOTSUPP;
1029
1030         sbi->ll_xattr_cache_enabled = val;
1031         sbi->ll_xattr_cache_set = 1;
1032
1033         return count;
1034 }
1035 LUSTRE_RW_ATTR(xattr_cache);
1036
1037 static ssize_t tiny_write_show(struct kobject *kobj,
1038                                struct attribute *attr,
1039                                char *buf)
1040 {
1041         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1042                                               ll_kset.kobj);
1043
1044         return sprintf(buf, "%u\n", !!(sbi->ll_flags & LL_SBI_TINY_WRITE));
1045 }
1046
1047 static ssize_t tiny_write_store(struct kobject *kobj,
1048                                 struct attribute *attr,
1049                                 const char *buffer,
1050                                 size_t count)
1051 {
1052         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1053                                               ll_kset.kobj);
1054         bool val;
1055         int rc;
1056
1057         rc = kstrtobool(buffer, &val);
1058         if (rc)
1059                 return rc;
1060
1061         spin_lock(&sbi->ll_lock);
1062         if (val)
1063                 sbi->ll_flags |= LL_SBI_TINY_WRITE;
1064         else
1065                 sbi->ll_flags &= ~LL_SBI_TINY_WRITE;
1066         spin_unlock(&sbi->ll_lock);
1067
1068         return count;
1069 }
1070 LUSTRE_RW_ATTR(tiny_write);
1071
1072 static ssize_t fast_read_show(struct kobject *kobj,
1073                               struct attribute *attr,
1074                               char *buf)
1075 {
1076         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1077                                               ll_kset.kobj);
1078
1079         return sprintf(buf, "%u\n", !!(sbi->ll_flags & LL_SBI_FAST_READ));
1080 }
1081
1082 static ssize_t fast_read_store(struct kobject *kobj,
1083                                struct attribute *attr,
1084                                const char *buffer,
1085                                size_t count)
1086 {
1087         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1088                                               ll_kset.kobj);
1089         bool val;
1090         int rc;
1091
1092         rc = kstrtobool(buffer, &val);
1093         if (rc)
1094                 return rc;
1095
1096         spin_lock(&sbi->ll_lock);
1097         if (val)
1098                 sbi->ll_flags |= LL_SBI_FAST_READ;
1099         else
1100                 sbi->ll_flags &= ~LL_SBI_FAST_READ;
1101         spin_unlock(&sbi->ll_lock);
1102
1103         return count;
1104 }
1105 LUSTRE_RW_ATTR(fast_read);
1106
1107 static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
1108 {
1109         struct super_block      *sb    = m->private;
1110         struct ll_sb_info       *sbi   = ll_s2sbi(sb);
1111         struct cl_client_cache  *cache = sbi->ll_cache;
1112         long pages;
1113         int mb;
1114
1115         pages = atomic_long_read(&cache->ccc_unstable_nr);
1116         mb    = (pages * PAGE_SIZE) >> 20;
1117
1118         seq_printf(m, "unstable_check:     %8d\n"
1119                       "unstable_pages: %12ld\n"
1120                       "unstable_mb:        %8d\n",
1121                    cache->ccc_unstable_check, pages, mb);
1122         return 0;
1123 }
1124
1125 static ssize_t ll_unstable_stats_seq_write(struct file *file,
1126                                            const char __user *buffer,
1127                                            size_t count, loff_t *unused)
1128 {
1129         struct seq_file *seq = file->private_data;
1130         struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)seq->private);
1131         char kernbuf[128];
1132         bool val;
1133         int rc;
1134
1135         if (count == 0)
1136                 return 0;
1137         if (count >= sizeof(kernbuf))
1138                 return -EINVAL;
1139
1140         if (copy_from_user(kernbuf, buffer, count))
1141                 return -EFAULT;
1142         kernbuf[count] = 0;
1143
1144         buffer += lprocfs_find_named_value(kernbuf, "unstable_check:", &count) -
1145                   kernbuf;
1146         rc = kstrtobool_from_user(buffer, count, &val);
1147         if (rc < 0)
1148                 return rc;
1149
1150         /* borrow lru lock to set the value */
1151         spin_lock(&sbi->ll_cache->ccc_lru_lock);
1152         sbi->ll_cache->ccc_unstable_check = val;
1153         spin_unlock(&sbi->ll_cache->ccc_lru_lock);
1154
1155         return count;
1156 }
1157
1158 LDEBUGFS_SEQ_FOPS(ll_unstable_stats);
1159
1160 static int ll_root_squash_seq_show(struct seq_file *m, void *v)
1161 {
1162         struct super_block *sb = m->private;
1163         struct ll_sb_info *sbi = ll_s2sbi(sb);
1164         struct root_squash_info *squash = &sbi->ll_squash;
1165
1166         seq_printf(m, "%u:%u\n", squash->rsi_uid, squash->rsi_gid);
1167         return 0;
1168 }
1169
1170 static ssize_t ll_root_squash_seq_write(struct file *file,
1171                                         const char __user *buffer,
1172                                         size_t count, loff_t *off)
1173 {
1174         struct seq_file *m = file->private_data;
1175         struct super_block *sb = m->private;
1176         struct ll_sb_info *sbi = ll_s2sbi(sb);
1177         struct root_squash_info *squash = &sbi->ll_squash;
1178
1179         return lprocfs_wr_root_squash(buffer, count, squash,
1180                                       ll_get_fsname(sb, NULL, 0));
1181 }
1182
1183 LDEBUGFS_SEQ_FOPS(ll_root_squash);
1184
1185 static int ll_nosquash_nids_seq_show(struct seq_file *m, void *v)
1186 {
1187         struct super_block *sb = m->private;
1188         struct ll_sb_info *sbi = ll_s2sbi(sb);
1189         struct root_squash_info *squash = &sbi->ll_squash;
1190         int len;
1191
1192         down_read(&squash->rsi_sem);
1193         if (!list_empty(&squash->rsi_nosquash_nids)) {
1194                 len = cfs_print_nidlist(m->buf + m->count, m->size - m->count,
1195                                         &squash->rsi_nosquash_nids);
1196                 m->count += len;
1197                 seq_putc(m, '\n');
1198         } else {
1199                 seq_puts(m, "NONE\n");
1200         }
1201         up_read(&squash->rsi_sem);
1202
1203         return 0;
1204 }
1205
1206 static ssize_t ll_nosquash_nids_seq_write(struct file *file,
1207                                           const char __user *buffer,
1208                                           size_t count, loff_t *off)
1209 {
1210         struct seq_file *m = file->private_data;
1211         struct super_block *sb = m->private;
1212         struct ll_sb_info *sbi = ll_s2sbi(sb);
1213         struct root_squash_info *squash = &sbi->ll_squash;
1214         int rc;
1215
1216         rc = lprocfs_wr_nosquash_nids(buffer, count, squash,
1217                                       ll_get_fsname(sb, NULL, 0));
1218         if (rc < 0)
1219                 return rc;
1220
1221         ll_compute_rootsquash_state(sbi);
1222
1223         return rc;
1224 }
1225
1226 LDEBUGFS_SEQ_FOPS(ll_nosquash_nids);
1227
1228 struct lprocfs_vars lprocfs_llite_obd_vars[] = {
1229         { .name =       "site",
1230           .fops =       &ll_site_stats_fops                     },
1231         { .name =       "max_read_ahead_mb",
1232           .fops =       &ll_max_readahead_mb_fops               },
1233         { .name =       "max_read_ahead_per_file_mb",
1234           .fops =       &ll_max_readahead_per_file_mb_fops      },
1235         { .name =       "max_read_ahead_whole_mb",
1236           .fops =       &ll_max_read_ahead_whole_mb_fops        },
1237         { .name =       "max_cached_mb",
1238           .fops =       &ll_max_cached_mb_fops                  },
1239         { .name =       "statahead_stats",
1240           .fops =       &ll_statahead_stats_fops                },
1241         { .name =       "unstable_stats",
1242           .fops =       &ll_unstable_stats_fops                 },
1243         { .name =       "sbi_flags",
1244           .fops =       &ll_sbi_flags_fops                      },
1245         { .name =       "root_squash",
1246           .fops =       &ll_root_squash_fops                    },
1247         { .name =       "nosquash_nids",
1248           .fops =       &ll_nosquash_nids_fops                  },
1249         { NULL }
1250 };
1251
1252 #define MAX_STRING_SIZE 128
1253
1254 static struct attribute *llite_attrs[] = {
1255         &lustre_attr_blocksize.attr,
1256         &lustre_attr_stat_blocksize.attr,
1257         &lustre_attr_kbytestotal.attr,
1258         &lustre_attr_kbytesfree.attr,
1259         &lustre_attr_kbytesavail.attr,
1260         &lustre_attr_filestotal.attr,
1261         &lustre_attr_filesfree.attr,
1262         &lustre_attr_client_type.attr,
1263         &lustre_attr_fstype.attr,
1264         &lustre_attr_uuid.attr,
1265         &lustre_attr_checksums.attr,
1266         &lustre_attr_checksum_pages.attr,
1267         &lustre_attr_stats_track_pid.attr,
1268         &lustre_attr_stats_track_ppid.attr,
1269         &lustre_attr_stats_track_gid.attr,
1270         &lustre_attr_statahead_running_max.attr,
1271         &lustre_attr_statahead_max.attr,
1272         &lustre_attr_statahead_agl.attr,
1273         &lustre_attr_lazystatfs.attr,
1274         &lustre_attr_statfs_max_age.attr,
1275         &lustre_attr_max_easize.attr,
1276         &lustre_attr_default_easize.attr,
1277         &lustre_attr_xattr_cache.attr,
1278         &lustre_attr_fast_read.attr,
1279         &lustre_attr_tiny_write.attr,
1280         NULL,
1281 };
1282
1283 static void llite_kobj_release(struct kobject *kobj)
1284 {
1285         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1286                                               ll_kset.kobj);
1287         complete(&sbi->ll_kobj_unregister);
1288 }
1289
1290 static struct kobj_type llite_ktype = {
1291         .default_attrs  = llite_attrs,
1292         .sysfs_ops      = &lustre_sysfs_ops,
1293         .release        = llite_kobj_release,
1294 };
1295
1296 static const struct llite_file_opcode {
1297         __u32       opcode;
1298         __u32       type;
1299         const char *opname;
1300 } llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
1301         /* file operation */
1302         { LPROC_LL_DIRTY_HITS,     LPROCFS_TYPE_REGS, "dirty_pages_hits" },
1303         { LPROC_LL_DIRTY_MISSES,   LPROCFS_TYPE_REGS, "dirty_pages_misses" },
1304         { LPROC_LL_READ_BYTES,     LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
1305                                    "read_bytes" },
1306         { LPROC_LL_WRITE_BYTES,    LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
1307                                    "write_bytes" },
1308         { LPROC_LL_BRW_READ,       LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
1309                                    "brw_read" },
1310         { LPROC_LL_BRW_WRITE,      LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
1311                                    "brw_write" },
1312         { LPROC_LL_IOCTL,          LPROCFS_TYPE_REGS, "ioctl" },
1313         { LPROC_LL_OPEN,           LPROCFS_TYPE_REGS, "open" },
1314         { LPROC_LL_RELEASE,        LPROCFS_TYPE_REGS, "close" },
1315         { LPROC_LL_MAP,            LPROCFS_TYPE_REGS, "mmap" },
1316         { LPROC_LL_FAULT,          LPROCFS_TYPE_REGS, "page_fault" },
1317         { LPROC_LL_MKWRITE,        LPROCFS_TYPE_REGS, "page_mkwrite" },
1318         { LPROC_LL_LLSEEK,         LPROCFS_TYPE_REGS, "seek" },
1319         { LPROC_LL_FSYNC,          LPROCFS_TYPE_REGS, "fsync" },
1320         { LPROC_LL_READDIR,        LPROCFS_TYPE_REGS, "readdir" },
1321         /* inode operation */
1322         { LPROC_LL_SETATTR,        LPROCFS_TYPE_REGS, "setattr" },
1323         { LPROC_LL_TRUNC,          LPROCFS_TYPE_REGS, "truncate" },
1324         { LPROC_LL_FLOCK,          LPROCFS_TYPE_REGS, "flock" },
1325         { LPROC_LL_GETATTR,        LPROCFS_TYPE_REGS, "getattr" },
1326         /* dir inode operation */
1327         { LPROC_LL_CREATE,         LPROCFS_TYPE_REGS, "create" },
1328         { LPROC_LL_LINK,           LPROCFS_TYPE_REGS, "link" },
1329         { LPROC_LL_UNLINK,         LPROCFS_TYPE_REGS, "unlink" },
1330         { LPROC_LL_SYMLINK,        LPROCFS_TYPE_REGS, "symlink" },
1331         { LPROC_LL_MKDIR,          LPROCFS_TYPE_REGS, "mkdir" },
1332         { LPROC_LL_RMDIR,          LPROCFS_TYPE_REGS, "rmdir" },
1333         { LPROC_LL_MKNOD,          LPROCFS_TYPE_REGS, "mknod" },
1334         { LPROC_LL_RENAME,         LPROCFS_TYPE_REGS, "rename" },
1335         /* special inode operation */
1336         { LPROC_LL_STAFS,          LPROCFS_TYPE_REGS, "statfs" },
1337         { LPROC_LL_ALLOC_INODE,    LPROCFS_TYPE_REGS, "alloc_inode" },
1338         { LPROC_LL_SETXATTR,       LPROCFS_TYPE_REGS, "setxattr" },
1339         { LPROC_LL_GETXATTR,       LPROCFS_TYPE_REGS, "getxattr" },
1340         { LPROC_LL_GETXATTR_HITS,  LPROCFS_TYPE_REGS, "getxattr_hits" },
1341         { LPROC_LL_LISTXATTR,      LPROCFS_TYPE_REGS, "listxattr" },
1342         { LPROC_LL_REMOVEXATTR,    LPROCFS_TYPE_REGS, "removexattr" },
1343         { LPROC_LL_INODE_PERM,     LPROCFS_TYPE_REGS, "inode_permission" },
1344 };
1345
1346 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count)
1347 {
1348         if (!sbi->ll_stats)
1349                 return;
1350         if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
1351                 lprocfs_counter_add(sbi->ll_stats, op, count);
1352         else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
1353                  sbi->ll_stats_track_id == current->pid)
1354                 lprocfs_counter_add(sbi->ll_stats, op, count);
1355         else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
1356                  sbi->ll_stats_track_id == current->parent->pid)
1357                 lprocfs_counter_add(sbi->ll_stats, op, count);
1358         else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
1359                  sbi->ll_stats_track_id ==
1360                         from_kgid(&init_user_ns, current_gid()))
1361                 lprocfs_counter_add(sbi->ll_stats, op, count);
1362 }
1363 EXPORT_SYMBOL(ll_stats_ops_tally);
1364
1365 static const char *ra_stat_string[] = {
1366         [RA_STAT_HIT] = "hits",
1367         [RA_STAT_MISS] = "misses",
1368         [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
1369         [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
1370         [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
1371         [RA_STAT_FAILED_MATCH] = "failed lock match",
1372         [RA_STAT_DISCARDED] = "read but discarded",
1373         [RA_STAT_ZERO_LEN] = "zero length file",
1374         [RA_STAT_ZERO_WINDOW] = "zero size window",
1375         [RA_STAT_EOF] = "read-ahead to EOF",
1376         [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
1377         [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
1378         [RA_STAT_FAILED_REACH_END] = "failed to reach end"
1379 };
1380
1381 int ll_debugfs_register_super(struct super_block *sb, const char *name)
1382 {
1383         struct lustre_sb_info *lsi = s2lsi(sb);
1384         struct ll_sb_info *sbi = ll_s2sbi(sb);
1385         int err, id, rc;
1386
1387         ENTRY;
1388         LASSERT(sbi);
1389
1390         if (IS_ERR_OR_NULL(llite_root))
1391                 goto out_ll_kset;
1392
1393         sbi->ll_debugfs_entry = ldebugfs_register(name, llite_root,
1394                                                   lprocfs_llite_obd_vars, sb);
1395         if (IS_ERR_OR_NULL(sbi->ll_debugfs_entry)) {
1396                 err = sbi->ll_debugfs_entry ? PTR_ERR(sbi->ll_debugfs_entry) :
1397                                               -ENOMEM;
1398                 sbi->ll_debugfs_entry = NULL;
1399                 RETURN(err);
1400         }
1401
1402         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "dump_page_cache",0444,
1403                                  &vvp_dump_pgcache_file_ops, sbi);
1404         if (rc)
1405                 CWARN("Error adding the dump_page_cache file\n");
1406
1407         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "extents_stats", 0644,
1408                                  &ll_rw_extents_stats_fops, sbi);
1409         if (rc)
1410                 CWARN("Error adding the extent_stats file\n");
1411
1412         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry,
1413                                  "extents_stats_per_process", 0644,
1414                                  &ll_rw_extents_stats_pp_fops, sbi);
1415         if (rc)
1416                 CWARN("Error adding the extents_stats_per_process file\n");
1417
1418         rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "offset_stats", 0644,
1419                                  &ll_rw_offset_stats_fops, sbi);
1420         if (rc)
1421                 CWARN("Error adding the offset_stats file\n");
1422
1423         /* File operations stats */
1424         sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
1425                                             LPROCFS_STATS_FLAG_NONE);
1426         if (sbi->ll_stats == NULL)
1427                 GOTO(out_debugfs, err = -ENOMEM);
1428
1429         /* do counter init */
1430         for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
1431                 u32 type = llite_opcode_table[id].type;
1432                 void *ptr = NULL;
1433
1434                 if (type & LPROCFS_TYPE_REGS)
1435                         ptr = "regs";
1436                 else if (type & LPROCFS_TYPE_BYTES)
1437                         ptr = "bytes";
1438                 else if (type & LPROCFS_TYPE_PAGES)
1439                         ptr = "pages";
1440                 lprocfs_counter_init(sbi->ll_stats,
1441                                      llite_opcode_table[id].opcode,
1442                                      (type & LPROCFS_CNTR_AVGMINMAX),
1443                                      llite_opcode_table[id].opname, ptr);
1444         }
1445
1446         err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats",
1447                                       sbi->ll_stats);
1448         if (err)
1449                 GOTO(out_stats, err);
1450
1451         sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
1452                                                LPROCFS_STATS_FLAG_NONE);
1453         if (sbi->ll_ra_stats == NULL)
1454                 GOTO(out_stats, err = -ENOMEM);
1455
1456         for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
1457                 lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
1458                                      ra_stat_string[id], "pages");
1459
1460         err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats",
1461                                       sbi->ll_ra_stats);
1462         if (err)
1463                 GOTO(out_ra_stats, err);
1464
1465 out_ll_kset:
1466         /* Yes we also register sysfs mount kset here as well */
1467         sbi->ll_kset.kobj.parent = llite_kobj;
1468         sbi->ll_kset.kobj.ktype = &llite_ktype;
1469         init_completion(&sbi->ll_kobj_unregister);
1470         err = kobject_set_name(&sbi->ll_kset.kobj, "%s", name);
1471         if (err)
1472                 GOTO(out_ra_stats, err);
1473
1474         err = kset_register(&sbi->ll_kset);
1475         if (err)
1476                 GOTO(out_ra_stats, err);
1477
1478         lsi->lsi_kobj = kobject_get(&sbi->ll_kset.kobj);
1479
1480         RETURN(0);
1481 out_ra_stats:
1482         lprocfs_free_stats(&sbi->ll_ra_stats);
1483 out_stats:
1484         lprocfs_free_stats(&sbi->ll_stats);
1485 out_debugfs:
1486         ldebugfs_remove(&sbi->ll_debugfs_entry);
1487
1488         RETURN(err);
1489 }
1490
1491 void ll_debugfs_unregister_super(struct super_block *sb)
1492 {
1493         struct lustre_sb_info *lsi = s2lsi(sb);
1494         struct ll_sb_info *sbi = ll_s2sbi(sb);
1495
1496         if (!IS_ERR_OR_NULL(sbi->ll_debugfs_entry))
1497                 ldebugfs_remove(&sbi->ll_debugfs_entry);
1498
1499         if (sbi->ll_dt_obd)
1500                 sysfs_remove_link(&sbi->ll_kset.kobj,
1501                                   sbi->ll_dt_obd->obd_type->typ_name);
1502
1503         if (sbi->ll_md_obd)
1504                 sysfs_remove_link(&sbi->ll_kset.kobj,
1505                                   sbi->ll_md_obd->obd_type->typ_name);
1506
1507         kobject_put(lsi->lsi_kobj);
1508
1509         kset_unregister(&sbi->ll_kset);
1510         wait_for_completion(&sbi->ll_kobj_unregister);
1511
1512         lprocfs_free_stats(&sbi->ll_ra_stats);
1513         lprocfs_free_stats(&sbi->ll_stats);
1514 }
1515 #undef MAX_STRING_SIZE
1516
1517 static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
1518                                    struct seq_file *seq, int which)
1519 {
1520         unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
1521         unsigned long start, end, r, w;
1522         char *unitp = "KMGTPEZY";
1523         int i, units = 10;
1524         struct per_process_info *pp_info = &io_extents->pp_extents[which];
1525
1526         read_cum = 0;
1527         write_cum = 0;
1528         start = 0;
1529
1530         for(i = 0; i < LL_HIST_MAX; i++) {
1531                 read_tot += pp_info->pp_r_hist.oh_buckets[i];
1532                 write_tot += pp_info->pp_w_hist.oh_buckets[i];
1533         }
1534
1535         for(i = 0; i < LL_HIST_MAX; i++) {
1536                 r = pp_info->pp_r_hist.oh_buckets[i];
1537                 w = pp_info->pp_w_hist.oh_buckets[i];
1538                 read_cum += r;
1539                 write_cum += w;
1540                 end = BIT(i + LL_HIST_START - units);
1541                 seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4u %4u  | "
1542                            "%14lu %4u %4u\n", start, *unitp, end, *unitp,
1543                            (i == LL_HIST_MAX - 1) ? '+' : ' ',
1544                            r, pct(r, read_tot), pct(read_cum, read_tot),
1545                            w, pct(w, write_tot), pct(write_cum, write_tot));
1546                 start = end;
1547                 if (start == BIT(10)) {
1548                         start = 1;
1549                         units += 10;
1550                         unitp++;
1551                 }
1552                 if (read_cum == read_tot && write_cum == write_tot)
1553                         break;
1554         }
1555 }
1556
1557 static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
1558 {
1559         struct timespec64 now;
1560         struct ll_sb_info *sbi = seq->private;
1561         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1562         int k;
1563
1564         ktime_get_real_ts64(&now);
1565
1566         if (!sbi->ll_rw_stats_on) {
1567                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1568                 return 0;
1569         }
1570         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1571                    (s64)now.tv_sec, now.tv_nsec);
1572         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1573         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1574                    "extents", "calls", "%", "cum%",
1575                    "calls", "%", "cum%");
1576         spin_lock(&sbi->ll_pp_extent_lock);
1577         for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
1578                 if (io_extents->pp_extents[k].pid != 0) {
1579                         seq_printf(seq, "\nPID: %d\n",
1580                                    io_extents->pp_extents[k].pid);
1581                         ll_display_extents_info(io_extents, seq, k);
1582                 }
1583         }
1584         spin_unlock(&sbi->ll_pp_extent_lock);
1585         return 0;
1586 }
1587
1588 static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
1589                                                 const char __user *buf,
1590                                                 size_t len,
1591                                                 loff_t *off)
1592 {
1593         struct seq_file *seq = file->private_data;
1594         struct ll_sb_info *sbi = seq->private;
1595         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1596         int i;
1597         __s64 value;
1598
1599         if (len == 0)
1600                 return -EINVAL;
1601
1602         value = ll_stats_pid_write(buf, len);
1603
1604         if (value == 0)
1605                 sbi->ll_rw_stats_on = 0;
1606         else
1607                 sbi->ll_rw_stats_on = 1;
1608
1609         spin_lock(&sbi->ll_pp_extent_lock);
1610         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1611                 io_extents->pp_extents[i].pid = 0;
1612                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1613                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1614         }
1615         spin_unlock(&sbi->ll_pp_extent_lock);
1616         return len;
1617 }
1618
1619 LDEBUGFS_SEQ_FOPS(ll_rw_extents_stats_pp);
1620
1621 static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
1622 {
1623         struct timespec64 now;
1624         struct ll_sb_info *sbi = seq->private;
1625         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1626
1627         ktime_get_real_ts64(&now);
1628
1629         if (!sbi->ll_rw_stats_on) {
1630                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1631                 return 0;
1632         }
1633         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1634                    (s64)now.tv_sec, now.tv_nsec);
1635
1636         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1637         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1638                    "extents", "calls", "%", "cum%",
1639                    "calls", "%", "cum%");
1640         spin_lock(&sbi->ll_lock);
1641         ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
1642         spin_unlock(&sbi->ll_lock);
1643
1644         return 0;
1645 }
1646
1647 static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
1648                                              const char __user *buf,
1649                                              size_t len, loff_t *off)
1650 {
1651         struct seq_file *seq = file->private_data;
1652         struct ll_sb_info *sbi = seq->private;
1653         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1654         int i;
1655         __s64 value;
1656
1657         if (len == 0)
1658                 return -EINVAL;
1659
1660         value = ll_stats_pid_write(buf, len);
1661
1662         if (value == 0)
1663                 sbi->ll_rw_stats_on = 0;
1664         else
1665                 sbi->ll_rw_stats_on = 1;
1666
1667         spin_lock(&sbi->ll_pp_extent_lock);
1668         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
1669                 io_extents->pp_extents[i].pid = 0;
1670                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1671                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1672         }
1673         spin_unlock(&sbi->ll_pp_extent_lock);
1674
1675         return len;
1676 }
1677
1678 LDEBUGFS_SEQ_FOPS(ll_rw_extents_stats);
1679
1680 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1681                        struct ll_file_data *file, loff_t pos,
1682                        size_t count, int rw)
1683 {
1684         int i, cur = -1;
1685         struct ll_rw_process_info *process;
1686         struct ll_rw_process_info *offset;
1687         int *off_count = &sbi->ll_rw_offset_entry_count;
1688         int *process_count = &sbi->ll_offset_process_count;
1689         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1690
1691         if(!sbi->ll_rw_stats_on)
1692                 return;
1693         process = sbi->ll_rw_process_info;
1694         offset = sbi->ll_rw_offset_info;
1695
1696         spin_lock(&sbi->ll_pp_extent_lock);
1697         /* Extent statistics */
1698         for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1699                 if(io_extents->pp_extents[i].pid == pid) {
1700                         cur = i;
1701                         break;
1702                 }
1703         }
1704
1705         if (cur == -1) {
1706                 /* new process */
1707                 sbi->ll_extent_process_count =
1708                         (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
1709                 cur = sbi->ll_extent_process_count;
1710                 io_extents->pp_extents[cur].pid = pid;
1711                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
1712                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
1713         }
1714
1715         for (i = 0; (count >= BIT(LL_HIST_START + i)) &&
1716              (i < (LL_HIST_MAX - 1)); i++);
1717         if (rw == 0) {
1718                 io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
1719                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
1720         } else {
1721                 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
1722                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
1723         }
1724         spin_unlock(&sbi->ll_pp_extent_lock);
1725
1726         spin_lock(&sbi->ll_process_lock);
1727         /* Offset statistics */
1728         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1729                 if (process[i].rw_pid == pid) {
1730                         if (process[i].rw_last_file != file) {
1731                                 process[i].rw_range_start = pos;
1732                                 process[i].rw_last_file_pos = pos + count;
1733                                 process[i].rw_smallest_extent = count;
1734                                 process[i].rw_largest_extent = count;
1735                                 process[i].rw_offset = 0;
1736                                 process[i].rw_last_file = file;
1737                                 spin_unlock(&sbi->ll_process_lock);
1738                                 return;
1739                         }
1740                         if (process[i].rw_last_file_pos != pos) {
1741                                 *off_count =
1742                                     (*off_count + 1) % LL_OFFSET_HIST_MAX;
1743                                 offset[*off_count].rw_op = process[i].rw_op;
1744                                 offset[*off_count].rw_pid = pid;
1745                                 offset[*off_count].rw_range_start =
1746                                         process[i].rw_range_start;
1747                                 offset[*off_count].rw_range_end =
1748                                         process[i].rw_last_file_pos;
1749                                 offset[*off_count].rw_smallest_extent =
1750                                         process[i].rw_smallest_extent;
1751                                 offset[*off_count].rw_largest_extent =
1752                                         process[i].rw_largest_extent;
1753                                 offset[*off_count].rw_offset =
1754                                         process[i].rw_offset;
1755                                 process[i].rw_op = rw;
1756                                 process[i].rw_range_start = pos;
1757                                 process[i].rw_smallest_extent = count;
1758                                 process[i].rw_largest_extent = count;
1759                                 process[i].rw_offset = pos -
1760                                         process[i].rw_last_file_pos;
1761                         }
1762                         if(process[i].rw_smallest_extent > count)
1763                                 process[i].rw_smallest_extent = count;
1764                         if(process[i].rw_largest_extent < count)
1765                                 process[i].rw_largest_extent = count;
1766                         process[i].rw_last_file_pos = pos + count;
1767                         spin_unlock(&sbi->ll_process_lock);
1768                         return;
1769                 }
1770         }
1771         *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
1772         process[*process_count].rw_pid = pid;
1773         process[*process_count].rw_op = rw;
1774         process[*process_count].rw_range_start = pos;
1775         process[*process_count].rw_last_file_pos = pos + count;
1776         process[*process_count].rw_smallest_extent = count;
1777         process[*process_count].rw_largest_extent = count;
1778         process[*process_count].rw_offset = 0;
1779         process[*process_count].rw_last_file = file;
1780         spin_unlock(&sbi->ll_process_lock);
1781 }
1782
1783 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
1784 {
1785         struct timespec64 now;
1786         struct ll_sb_info *sbi = seq->private;
1787         struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
1788         struct ll_rw_process_info *process = sbi->ll_rw_process_info;
1789         int i;
1790
1791         ktime_get_real_ts64(&now);
1792
1793         if (!sbi->ll_rw_stats_on) {
1794                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1795                 return 0;
1796         }
1797         spin_lock(&sbi->ll_process_lock);
1798
1799         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1800                    (s64)now.tv_sec, now.tv_nsec);
1801         seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
1802                    "R/W", "PID", "RANGE START", "RANGE END",
1803                    "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
1804
1805         /* We stored the discontiguous offsets here; print them first */
1806         for (i = 0; i < LL_OFFSET_HIST_MAX; i++) {
1807                 if (offset[i].rw_pid != 0)
1808                         seq_printf(seq,
1809                                   "%3c %10d %14llu %14llu %17lu %17lu %14lld\n",
1810                                    offset[i].rw_op == READ ? 'R' : 'W',
1811                                    offset[i].rw_pid,
1812                                    offset[i].rw_range_start,
1813                                    offset[i].rw_range_end,
1814                                    (unsigned long)offset[i].rw_smallest_extent,
1815                                    (unsigned long)offset[i].rw_largest_extent,
1816                                    offset[i].rw_offset);
1817         }
1818
1819         /* Then print the current offsets for each process */
1820         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1821                 if (process[i].rw_pid != 0)
1822                         seq_printf(seq,
1823                                   "%3c %10d %14llu %14llu %17lu %17lu %14lld\n",
1824                                    process[i].rw_op == READ ? 'R' : 'W',
1825                                    process[i].rw_pid,
1826                                    process[i].rw_range_start,
1827                                    process[i].rw_last_file_pos,
1828                                    (unsigned long)process[i].rw_smallest_extent,
1829                                    (unsigned long)process[i].rw_largest_extent,
1830                                    process[i].rw_offset);
1831         }
1832         spin_unlock(&sbi->ll_process_lock);
1833
1834         return 0;
1835 }
1836
1837 static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
1838                                             const char __user *buf,
1839                                             size_t len, loff_t *off)
1840 {
1841         struct seq_file *seq = file->private_data;
1842         struct ll_sb_info *sbi = seq->private;
1843         struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
1844         struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
1845         __s64 value;
1846
1847         if (len == 0)
1848                 return -EINVAL;
1849
1850         value = ll_stats_pid_write(buf, len);
1851
1852         if (value == 0)
1853                 sbi->ll_rw_stats_on = 0;
1854         else
1855                 sbi->ll_rw_stats_on = 1;
1856
1857         spin_lock(&sbi->ll_process_lock);
1858         sbi->ll_offset_process_count = 0;
1859         sbi->ll_rw_offset_entry_count = 0;
1860         memset(process_info, 0, sizeof(struct ll_rw_process_info) *
1861                LL_PROCESS_HIST_MAX);
1862         memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
1863                LL_OFFSET_HIST_MAX);
1864         spin_unlock(&sbi->ll_process_lock);
1865
1866         return len;
1867 }
1868
1869 LDEBUGFS_SEQ_FOPS(ll_rw_offset_stats);