Whamcloud - gitweb
c701e0f4471de1252d589f4bdad41654dd24e8f4
[fs/lustre-release.git] / lustre / llite / lproc_llite.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32 #define DEBUG_SUBSYSTEM S_LLITE
33
34 #include <linux/version.h>
35 #include <linux/user_namespace.h>
36 #include <linux/uidgid.h>
37
38 #include <uapi/linux/lustre/lustre_param.h>
39 #include <lprocfs_status.h>
40 #include <obd_support.h>
41
42 #include "llite_internal.h"
43 #include "vvp_internal.h"
44
45 static struct kobject *llite_kobj;
46 static struct dentry *llite_root;
47
48 static void llite_kobj_release(struct kobject *kobj)
49 {
50         if (!IS_ERR_OR_NULL(llite_root)) {
51                 debugfs_remove(llite_root);
52                 llite_root = NULL;
53         }
54
55         kfree(kobj);
56 }
57
58 static struct kobj_type llite_kobj_ktype = {
59         .release        = llite_kobj_release,
60         .sysfs_ops      = &lustre_sysfs_ops,
61 };
62
63 int llite_tunables_register(void)
64 {
65         int rc;
66
67         llite_kobj = kzalloc(sizeof(*llite_kobj), GFP_KERNEL);
68         if (!llite_kobj)
69                 return -ENOMEM;
70
71         llite_kobj->kset = lustre_kset;
72         rc = kobject_init_and_add(llite_kobj, &llite_kobj_ktype,
73                                   &lustre_kset->kobj, "%s", "llite");
74         if (rc)
75                 goto free_kobj;
76
77         llite_root = debugfs_create_dir("llite", debugfs_lustre_root);
78         return 0;
79
80 free_kobj:
81         kobject_put(llite_kobj);
82         llite_kobj = NULL;
83
84         return rc;
85 }
86
87 void llite_tunables_unregister(void)
88 {
89         kobject_put(llite_kobj);
90         llite_kobj = NULL;
91 }
92
93 /* <debugfs>/lustre/llite mount point registration */
94 static const struct file_operations ll_rw_extents_stats_fops;
95 static const struct file_operations ll_rw_extents_stats_pp_fops;
96 static const struct file_operations ll_rw_offset_stats_fops;
97
98 /**
99  * ll_stats_pid_write() - Determine if stats collection should be enabled
100  * @buf: Buffer containing the data written
101  * @len: Number of bytes in the buffer
102  *
103  * Several proc files begin collecting stats when a value is written, and stop
104  * collecting when either '0' or 'disable' is written. This function checks the
105  * written value to see if collection should be enabled or disabled.
106  *
107  * Return: If '0' or 'disable' is provided, 0 is returned. If the text
108  * equivalent of a number is written, that number is returned. Otherwise,
109  * 1 is returned. Non-zero return values indicate collection should be enabled.
110  */
111 static s64 ll_stats_pid_write(const char __user *buf, size_t len)
112 {
113         unsigned long long value = 1;
114         char kernbuf[16];
115         int rc;
116
117         rc = kstrtoull_from_user(buf, len, 0, &value);
118         if (rc < 0 && len < sizeof(kernbuf)) {
119                 if (copy_from_user(kernbuf, buf, len))
120                         return -EFAULT;
121                 kernbuf[len] = 0;
122
123                 if (kernbuf[len - 1] == '\n')
124                         kernbuf[len - 1] = 0;
125
126                 if (strncasecmp(kernbuf, "disable", 7) == 0)
127                         value = 0;
128         }
129
130         return value;
131 }
132
133 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
134                               char *buf)
135 {
136         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
137                                               ll_kset.kobj);
138         struct obd_statfs osfs;
139         int rc;
140
141         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
142         if (rc)
143                 return rc;
144
145         return sprintf(buf, "%u\n", osfs.os_bsize);
146 }
147 LUSTRE_RO_ATTR(blocksize);
148
149 static ssize_t stat_blocksize_show(struct kobject *kobj, struct attribute *attr,
150                                    char *buf)
151 {
152         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
153                                               ll_kset.kobj);
154
155         return sprintf(buf, "%u\n", sbi->ll_stat_blksize);
156 }
157
158 static ssize_t stat_blocksize_store(struct kobject *kobj,
159                                     struct attribute *attr,
160                                     const char *buffer,
161                                     size_t count)
162 {
163         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
164                                               ll_kset.kobj);
165         unsigned int val;
166         int rc;
167
168         rc = kstrtouint(buffer, 10, &val);
169         if (rc)
170                 return rc;
171
172         if (val != 0 && (val < PAGE_SIZE || (val & (val - 1))) != 0)
173                 return -ERANGE;
174
175         sbi->ll_stat_blksize = val;
176
177         return count;
178 }
179 LUSTRE_RW_ATTR(stat_blocksize);
180
181 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
182                                 char *buf)
183 {
184         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
185                                               ll_kset.kobj);
186         struct obd_statfs osfs;
187         u32 blk_size;
188         u64 result;
189         int rc;
190
191         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
192         if (rc)
193                 return rc;
194
195         blk_size = osfs.os_bsize >> 10;
196         result = osfs.os_blocks;
197
198         while (blk_size >>= 1)
199                 result <<= 1;
200
201         return sprintf(buf, "%llu\n", result);
202 }
203 LUSTRE_RO_ATTR(kbytestotal);
204
205 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
206                                char *buf)
207 {
208         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
209                                               ll_kset.kobj);
210         struct obd_statfs osfs;
211         u32 blk_size;
212         u64 result;
213         int rc;
214
215         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
216         if (rc)
217                 return rc;
218
219         blk_size = osfs.os_bsize >> 10;
220         result = osfs.os_bfree;
221
222         while (blk_size >>= 1)
223                 result <<= 1;
224
225         return sprintf(buf, "%llu\n", result);
226 }
227 LUSTRE_RO_ATTR(kbytesfree);
228
229 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
230                                 char *buf)
231 {
232         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
233                                               ll_kset.kobj);
234         struct obd_statfs osfs;
235         u32 blk_size;
236         u64 result;
237         int rc;
238
239         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
240         if (rc)
241                 return rc;
242
243         blk_size = osfs.os_bsize >> 10;
244         result = osfs.os_bavail;
245
246         while (blk_size >>= 1)
247                 result <<= 1;
248
249         return sprintf(buf, "%llu\n", result);
250 }
251 LUSTRE_RO_ATTR(kbytesavail);
252
253 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
254                                char *buf)
255 {
256         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
257                                               ll_kset.kobj);
258         struct obd_statfs osfs;
259         int rc;
260
261         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
262         if (rc)
263                 return rc;
264
265         return sprintf(buf, "%llu\n", osfs.os_files);
266 }
267 LUSTRE_RO_ATTR(filestotal);
268
269 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
270                               char *buf)
271 {
272         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
273                                               ll_kset.kobj);
274         struct obd_statfs osfs;
275         int rc;
276
277         rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
278         if (rc)
279                 return rc;
280
281         return sprintf(buf, "%llu\n", osfs.os_ffree);
282 }
283 LUSTRE_RO_ATTR(filesfree);
284
285 static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
286                                 char *buf)
287 {
288         return sprintf(buf, "local client\n");
289 }
290 LUSTRE_RO_ATTR(client_type);
291
292 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
293                            char *buf)
294 {
295         return sprintf(buf, "lustre\n");
296 }
297 LUSTRE_RO_ATTR(fstype);
298
299 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
300                          char *buf)
301 {
302         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
303                                               ll_kset.kobj);
304
305         return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
306 }
307 LUSTRE_RO_ATTR(uuid);
308
309 static int ll_site_stats_seq_show(struct seq_file *m, void *v)
310 {
311         struct super_block *sb = m->private;
312
313         /*
314          * See description of statistical counters in struct cl_site, and
315          * struct lu_site.
316          */
317         return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
318 }
319
320 LDEBUGFS_SEQ_FOPS_RO(ll_site_stats);
321
322 static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
323                                       struct attribute *attr, char *buf)
324 {
325         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
326                                               ll_kset.kobj);
327         unsigned long ra_max_mb;
328
329         spin_lock(&sbi->ll_lock);
330         ra_max_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages);
331         spin_unlock(&sbi->ll_lock);
332
333         return snprintf(buf, PAGE_SIZE, "%lu\n", ra_max_mb);
334 }
335
336 static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
337                                        struct attribute *attr,
338                                        const char *buffer, size_t count)
339 {
340         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
341                                               ll_kset.kobj);
342         u64 ra_max_mb, pages_number;
343         int rc;
344
345         rc = sysfs_memparse(buffer, count, &ra_max_mb, "MiB");
346         if (rc)
347                 return rc;
348
349         pages_number = round_up(ra_max_mb, 1024 * 1024) >> PAGE_SHIFT;
350         CDEBUG(D_INFO, "%s: set max_read_ahead_mb=%llu (%llu pages)\n",
351                sbi->ll_fsname, PAGES_TO_MiB(pages_number), pages_number);
352         if (pages_number > cfs_totalram_pages() / 2) {
353                 /* 1/2 of RAM */
354                 CERROR("%s: cannot set max_read_ahead_mb=%llu > totalram/2=%luMB\n",
355                        sbi->ll_fsname, PAGES_TO_MiB(pages_number),
356                        PAGES_TO_MiB(cfs_totalram_pages() / 2));
357                 return -ERANGE;
358         }
359
360         spin_lock(&sbi->ll_lock);
361         sbi->ll_ra_info.ra_max_pages = pages_number;
362         spin_unlock(&sbi->ll_lock);
363
364         return count;
365 }
366 LUSTRE_RW_ATTR(max_read_ahead_mb);
367
368 static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
369                                                struct attribute *attr,
370                                                char *buf)
371 {
372         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
373                                               ll_kset.kobj);
374         unsigned long ra_max_file_mb;
375
376         spin_lock(&sbi->ll_lock);
377         ra_max_file_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages_per_file);
378         spin_unlock(&sbi->ll_lock);
379
380         return snprintf(buf, PAGE_SIZE, "%lu\n", ra_max_file_mb);
381 }
382
383 static ssize_t max_read_ahead_per_file_mb_store(struct kobject *kobj,
384                                                 struct attribute *attr,
385                                                 const char *buffer,
386                                                 size_t count)
387 {
388         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
389                                               ll_kset.kobj);
390         u64 ra_max_file_mb, pages_number;
391         int rc;
392
393         rc = sysfs_memparse(buffer, count, &ra_max_file_mb, "MiB");
394         if (rc)
395                 return rc;
396
397         pages_number = round_up(ra_max_file_mb, 1024 * 1024) >> PAGE_SHIFT;
398         if (pages_number > sbi->ll_ra_info.ra_max_pages) {
399                 CERROR("%s: cannot set max_read_ahead_per_file_mb=%llu > max_read_ahead_mb=%lu\n",
400                        sbi->ll_fsname, PAGES_TO_MiB(pages_number),
401                        PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages));
402                 return -ERANGE;
403         }
404
405         spin_lock(&sbi->ll_lock);
406         sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
407         spin_unlock(&sbi->ll_lock);
408
409         return count;
410 }
411 LUSTRE_RW_ATTR(max_read_ahead_per_file_mb);
412
413 static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
414                                             struct attribute *attr, char *buf)
415 {
416         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
417                                               ll_kset.kobj);
418         unsigned long ra_max_whole_mb;
419
420         spin_lock(&sbi->ll_lock);
421         ra_max_whole_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_read_ahead_whole_pages);
422         spin_unlock(&sbi->ll_lock);
423
424         return snprintf(buf, PAGE_SIZE, "%lu\n", ra_max_whole_mb);
425 }
426
427 static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
428                                              struct attribute *attr,
429                                              const char *buffer, size_t count)
430 {
431         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
432                                               ll_kset.kobj);
433         u64 ra_max_whole_mb, pages_number;
434         int rc;
435
436         rc = sysfs_memparse(buffer, count, &ra_max_whole_mb, "MiB");
437         if (rc)
438                 return rc;
439
440         pages_number = round_up(ra_max_whole_mb, 1024 * 1024) >> PAGE_SHIFT;
441         /* Cap this at the current max readahead window size, the readahead
442          * algorithm does this anyway so it's pointless to set it larger.
443          */
444         if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
445                 CERROR("%s: cannot set max_read_ahead_whole_mb=%llu > max_read_ahead_per_file_mb=%lu\n",
446                        sbi->ll_fsname, PAGES_TO_MiB(pages_number),
447                        PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages_per_file));
448
449                 return -ERANGE;
450         }
451
452         spin_lock(&sbi->ll_lock);
453         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
454         spin_unlock(&sbi->ll_lock);
455
456         return count;
457 }
458 LUSTRE_RW_ATTR(max_read_ahead_whole_mb);
459
460 static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
461 {
462         struct super_block     *sb    = m->private;
463         struct ll_sb_info      *sbi   = ll_s2sbi(sb);
464         struct cl_client_cache *cache = sbi->ll_cache;
465         long max_cached_mb;
466         long unused_mb;
467
468         mutex_lock(&cache->ccc_max_cache_mb_lock);
469         max_cached_mb = PAGES_TO_MiB(cache->ccc_lru_max);
470         unused_mb = PAGES_TO_MiB(atomic_long_read(&cache->ccc_lru_left));
471         mutex_unlock(&cache->ccc_max_cache_mb_lock);
472         seq_printf(m, "users: %d\n"
473                       "max_cached_mb: %ld\n"
474                       "used_mb: %ld\n"
475                       "unused_mb: %ld\n"
476                       "reclaim_count: %u\n",
477                    atomic_read(&cache->ccc_users),
478                    max_cached_mb,
479                    max_cached_mb - unused_mb,
480                    unused_mb,
481                    cache->ccc_lru_shrinkers);
482         return 0;
483 }
484
485 static ssize_t ll_max_cached_mb_seq_write(struct file *file,
486                                           const char __user *buffer,
487                                           size_t count, loff_t *off)
488 {
489         struct seq_file *m = file->private_data;
490         struct super_block *sb = m->private;
491         struct ll_sb_info *sbi = ll_s2sbi(sb);
492         struct cl_client_cache *cache = sbi->ll_cache;
493         struct lu_env *env;
494         long diff = 0;
495         long nrpages = 0;
496         __u16 refcheck;
497         u64 pages_number;
498         int rc;
499         char kernbuf[128], *ptr;
500
501         ENTRY;
502         if (count >= sizeof(kernbuf))
503                 RETURN(-EINVAL);
504
505         if (copy_from_user(kernbuf, buffer, count))
506                 RETURN(-EFAULT);
507         kernbuf[count] = '\0';
508
509         ptr = lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count);
510         rc = sysfs_memparse(ptr, count, &pages_number, "MiB");
511         if (rc)
512                 RETURN(rc);
513
514         pages_number >>= PAGE_SHIFT;
515
516         if (pages_number < 0 || pages_number > cfs_totalram_pages()) {
517                 CERROR("%s: can't set max cache more than %lu MB\n",
518                        sbi->ll_fsname,
519                        PAGES_TO_MiB(cfs_totalram_pages()));
520                 RETURN(-ERANGE);
521         }
522         /* Allow enough cache so clients can make well-formed RPCs */
523         pages_number = max_t(long, pages_number, PTLRPC_MAX_BRW_PAGES);
524
525         mutex_lock(&cache->ccc_max_cache_mb_lock);
526         diff = pages_number - cache->ccc_lru_max;
527
528         /* easy - add more LRU slots. */
529         if (diff >= 0) {
530                 atomic_long_add(diff, &cache->ccc_lru_left);
531                 GOTO(out, rc = 0);
532         }
533
534         env = cl_env_get(&refcheck);
535         if (IS_ERR(env))
536                 GOTO(out_unlock, rc = PTR_ERR(env));
537
538         diff = -diff;
539         while (diff > 0) {
540                 long tmp;
541
542                 /* reduce LRU budget from free slots. */
543                 do {
544                         long lru_left_old, lru_left_new, lru_left_ret;
545
546                         lru_left_old = atomic_long_read(&cache->ccc_lru_left);
547                         if (lru_left_old == 0)
548                                 break;
549
550                         lru_left_new = lru_left_old > diff ?
551                                         lru_left_old - diff : 0;
552                         lru_left_ret =
553                                 atomic_long_cmpxchg(&cache->ccc_lru_left,
554                                                     lru_left_old,
555                                                     lru_left_new);
556                         if (likely(lru_left_old == lru_left_ret)) {
557                                 diff -= lru_left_old - lru_left_new;
558                                 nrpages += lru_left_old - lru_left_new;
559                                 break;
560                         }
561                 } while (1);
562
563                 if (diff <= 0)
564                         break;
565
566                 if (sbi->ll_dt_exp == NULL) { /* being initialized */
567                         rc = -ENODEV;
568                         break;
569                 }
570
571                 /* Request extra free slots to avoid them all being used
572                  * by other processes before this can continue shrinking.
573                  */
574                 tmp = diff + min_t(long, diff, MiB_TO_PAGES(1024));
575                 /* difficult - have to ask OSCs to drop LRU slots. */
576                 rc = obd_set_info_async(env, sbi->ll_dt_exp,
577                                 sizeof(KEY_CACHE_LRU_SHRINK),
578                                 KEY_CACHE_LRU_SHRINK,
579                                 sizeof(tmp), &tmp, NULL);
580                 if (rc < 0)
581                         break;
582         }
583         cl_env_put(env, &refcheck);
584
585 out:
586         if (rc >= 0) {
587                 cache->ccc_lru_max = pages_number;
588                 rc = count;
589         } else {
590                 atomic_long_add(nrpages, &cache->ccc_lru_left);
591         }
592 out_unlock:
593         mutex_unlock(&cache->ccc_max_cache_mb_lock);
594         return rc;
595 }
596 LDEBUGFS_SEQ_FOPS(ll_max_cached_mb);
597
598 static ssize_t checksums_show(struct kobject *kobj, struct attribute *attr,
599                               char *buf)
600 {
601         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
602                                               ll_kset.kobj);
603
604         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
605 }
606
607 static ssize_t checksums_store(struct kobject *kobj, struct attribute *attr,
608                                const char *buffer, size_t count)
609 {
610         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
611                                               ll_kset.kobj);
612         bool val;
613         int tmp;
614         int rc;
615
616         if (!sbi->ll_dt_exp)
617                 /* Not set up yet */
618                 return -EAGAIN;
619
620         rc = kstrtobool(buffer, &val);
621         if (rc)
622                 return rc;
623         if (val)
624                 sbi->ll_flags |= LL_SBI_CHECKSUM;
625         else
626                 sbi->ll_flags &= ~LL_SBI_CHECKSUM;
627         tmp = val;
628
629         rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
630                                 KEY_CHECKSUM, sizeof(tmp), &tmp, NULL);
631         if (rc)
632                 CWARN("Failed to set OSC checksum flags: %d\n", rc);
633
634         return count;
635 }
636 LUSTRE_RW_ATTR(checksums);
637
638 LUSTRE_ATTR(checksum_pages, 0644, checksums_show, checksums_store);
639
640 static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
641                               enum stats_track_type type)
642 {
643         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
644                                               ll_kset.kobj);
645
646         if (sbi->ll_stats_track_type == type)
647                 return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
648         else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
649                 return sprintf(buf, "0 (all)\n");
650
651         return sprintf(buf, "untracked\n");
652 }
653
654 static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
655                               size_t count, enum stats_track_type type)
656 {
657         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
658                                               ll_kset.kobj);
659         unsigned long pid;
660         int rc;
661
662         rc = kstrtoul(buffer, 10, &pid);
663         if (rc)
664                 return rc;
665
666         sbi->ll_stats_track_id = pid;
667         if (pid == 0)
668                 sbi->ll_stats_track_type = STATS_TRACK_ALL;
669         else
670                 sbi->ll_stats_track_type = type;
671         lprocfs_clear_stats(sbi->ll_stats);
672         return count;
673 }
674
675 static ssize_t stats_track_pid_show(struct kobject *kobj,
676                                     struct attribute *attr,
677                                     char *buf)
678 {
679         return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
680 }
681
682 static ssize_t stats_track_pid_store(struct kobject *kobj,
683                                      struct attribute *attr,
684                                      const char *buffer,
685                                      size_t count)
686 {
687         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
688 }
689 LUSTRE_RW_ATTR(stats_track_pid);
690
691 static ssize_t stats_track_ppid_show(struct kobject *kobj,
692                                      struct attribute *attr,
693                                      char *buf)
694 {
695         return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
696 }
697
698 static ssize_t stats_track_ppid_store(struct kobject *kobj,
699                                       struct attribute *attr,
700                                       const char *buffer,
701                                       size_t count)
702 {
703         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
704 }
705 LUSTRE_RW_ATTR(stats_track_ppid);
706
707 static ssize_t stats_track_gid_show(struct kobject *kobj,
708                                     struct attribute *attr,
709                                     char *buf)
710 {
711         return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
712 }
713
714 static ssize_t stats_track_gid_store(struct kobject *kobj,
715                                      struct attribute *attr,
716                                      const char *buffer,
717                                      size_t count)
718 {
719         return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
720 }
721 LUSTRE_RW_ATTR(stats_track_gid);
722
723 static ssize_t statahead_running_max_show(struct kobject *kobj,
724                                           struct attribute *attr,
725                                           char *buf)
726 {
727         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
728                                               ll_kset.kobj);
729
730         return snprintf(buf, 16, "%u\n", sbi->ll_sa_running_max);
731 }
732
733 static ssize_t statahead_running_max_store(struct kobject *kobj,
734                                            struct attribute *attr,
735                                            const char *buffer,
736                                            size_t count)
737 {
738         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
739                                               ll_kset.kobj);
740         unsigned long val;
741         int rc;
742
743         rc = kstrtoul(buffer, 0, &val);
744         if (rc)
745                 return rc;
746
747         if (val <= LL_SA_RUNNING_MAX) {
748                 sbi->ll_sa_running_max = val;
749                 return count;
750         }
751
752         CERROR("Bad statahead_running_max value %lu. Valid values "
753                "are in the range [0, %d]\n", val, LL_SA_RUNNING_MAX);
754
755         return -ERANGE;
756 }
757 LUSTRE_RW_ATTR(statahead_running_max);
758
759 static ssize_t statahead_max_show(struct kobject *kobj,
760                                   struct attribute *attr,
761                                   char *buf)
762 {
763         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
764                                               ll_kset.kobj);
765
766         return sprintf(buf, "%u\n", sbi->ll_sa_max);
767 }
768
769 static ssize_t statahead_max_store(struct kobject *kobj,
770                                    struct attribute *attr,
771                                    const char *buffer,
772                                    size_t count)
773 {
774         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
775                                               ll_kset.kobj);
776         unsigned long val;
777         int rc;
778
779         rc = kstrtoul(buffer, 0, &val);
780         if (rc)
781                 return rc;
782
783         if (val <= LL_SA_RPC_MAX)
784                 sbi->ll_sa_max = val;
785         else
786                 CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
787                        val, LL_SA_RPC_MAX);
788
789         return count;
790 }
791 LUSTRE_RW_ATTR(statahead_max);
792
793 static ssize_t statahead_agl_show(struct kobject *kobj,
794                                   struct attribute *attr,
795                                   char *buf)
796 {
797         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
798                                               ll_kset.kobj);
799
800         return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
801 }
802
803 static ssize_t statahead_agl_store(struct kobject *kobj,
804                                    struct attribute *attr,
805                                    const char *buffer,
806                                    size_t count)
807 {
808         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
809                                               ll_kset.kobj);
810         bool val;
811         int rc;
812
813         rc = kstrtobool(buffer, &val);
814         if (rc)
815                 return rc;
816
817         if (val)
818                 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
819         else
820                 sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
821
822         return count;
823 }
824 LUSTRE_RW_ATTR(statahead_agl);
825
826 static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
827 {
828         struct super_block *sb = m->private;
829         struct ll_sb_info *sbi = ll_s2sbi(sb);
830
831         seq_printf(m, "statahead total: %u\n"
832                       "statahead wrong: %u\n"
833                       "agl total: %u\n",
834                    atomic_read(&sbi->ll_sa_total),
835                    atomic_read(&sbi->ll_sa_wrong),
836                    atomic_read(&sbi->ll_agl_total));
837         return 0;
838 }
839
840 LDEBUGFS_SEQ_FOPS_RO(ll_statahead_stats);
841
842 static ssize_t lazystatfs_show(struct kobject *kobj,
843                                struct attribute *attr,
844                                char *buf)
845 {
846         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
847                                               ll_kset.kobj);
848
849         return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_LAZYSTATFS) ? 1 : 0);
850 }
851
852 static ssize_t lazystatfs_store(struct kobject *kobj,
853                                 struct attribute *attr,
854                                 const char *buffer,
855                                 size_t count)
856 {
857         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
858                                               ll_kset.kobj);
859         bool val;
860         int rc;
861
862         rc = kstrtobool(buffer, &val);
863         if (rc)
864                 return rc;
865
866         if (val)
867                 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
868         else
869                 sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
870
871         return count;
872 }
873 LUSTRE_RW_ATTR(lazystatfs);
874
875 static ssize_t statfs_max_age_show(struct kobject *kobj, struct attribute *attr,
876                                    char *buf)
877 {
878         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
879                                               ll_kset.kobj);
880
881         return snprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_statfs_max_age);
882 }
883
884 static ssize_t statfs_max_age_store(struct kobject *kobj,
885                                     struct attribute *attr, const char *buffer,
886                                     size_t count)
887 {
888         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
889                                               ll_kset.kobj);
890         unsigned int val;
891         int rc;
892
893         rc = kstrtouint(buffer, 10, &val);
894         if (rc)
895                 return rc;
896         if (val > OBD_STATFS_CACHE_MAX_AGE)
897                 return -EINVAL;
898
899         sbi->ll_statfs_max_age = val;
900
901         return count;
902 }
903 LUSTRE_RW_ATTR(statfs_max_age);
904
905 static ssize_t max_easize_show(struct kobject *kobj,
906                                struct attribute *attr,
907                                char *buf)
908 {
909         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
910                                               ll_kset.kobj);
911         unsigned int ealen;
912         int rc;
913
914         rc = ll_get_max_mdsize(sbi, &ealen);
915         if (rc)
916                 return rc;
917
918         /* Limit xattr size returned to userspace based on kernel maximum */
919         return snprintf(buf, PAGE_SIZE, "%u\n",
920                         ealen > XATTR_SIZE_MAX ? XATTR_SIZE_MAX : ealen);
921 }
922 LUSTRE_RO_ATTR(max_easize);
923
924 /**
925  * Get default_easize.
926  *
927  * \see client_obd::cl_default_mds_easize
928  *
929  * \param[in] m         seq_file handle
930  * \param[in] v         unused for single entry
931  *
932  * \retval 0            on success
933  * \retval negative     negated errno on failure
934  */
935 static ssize_t default_easize_show(struct kobject *kobj,
936                                    struct attribute *attr,
937                                    char *buf)
938 {
939         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
940                                               ll_kset.kobj);
941         unsigned int ealen;
942         int rc;
943
944         rc = ll_get_default_mdsize(sbi, &ealen);
945         if (rc)
946                 return rc;
947
948         /* Limit xattr size returned to userspace based on kernel maximum */
949         return snprintf(buf, PAGE_SIZE, "%u\n",
950                         ealen > XATTR_SIZE_MAX ? XATTR_SIZE_MAX : ealen);
951 }
952
953 /**
954  * Set default_easize.
955  *
956  * Range checking on the passed value is handled by
957  * ll_set_default_mdsize().
958  *
959  * \see client_obd::cl_default_mds_easize
960  *
961  * \param[in] file      proc file
962  * \param[in] buffer    string passed from user space
963  * \param[in] count     \a buffer length
964  * \param[in] off       unused for single entry
965  *
966  * \retval positive     \a count on success
967  * \retval negative     negated errno on failure
968  */
969 static ssize_t default_easize_store(struct kobject *kobj,
970                                     struct attribute *attr,
971                                     const char *buffer,
972                                     size_t count)
973 {
974         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
975                                               ll_kset.kobj);
976         unsigned int val;
977         int rc;
978
979         if (count == 0)
980                 return 0;
981
982         rc = kstrtouint(buffer, 10, &val);
983         if (rc)
984                 return rc;
985
986         rc = ll_set_default_mdsize(sbi, val);
987         if (rc)
988                 return rc;
989
990         return count;
991 }
992 LUSTRE_RW_ATTR(default_easize);
993
994 static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
995 {
996         const char *str[] = LL_SBI_FLAGS;
997         struct super_block *sb = m->private;
998         int flags = ll_s2sbi(sb)->ll_flags;
999         int i = 0;
1000
1001         while (flags != 0) {
1002                 if (ARRAY_SIZE(str) <= i) {
1003                         CERROR("%s: Revise array LL_SBI_FLAGS to match sbi "
1004                                 "flags please.\n", ll_s2sbi(sb)->ll_fsname);
1005                         return -EINVAL;
1006                 }
1007
1008                 if (flags & 0x1)
1009                         seq_printf(m, "%s ", str[i]);
1010                 flags >>= 1;
1011                 ++i;
1012         }
1013         seq_printf(m, "\b\n");
1014         return 0;
1015 }
1016
1017 LDEBUGFS_SEQ_FOPS_RO(ll_sbi_flags);
1018
1019 static ssize_t xattr_cache_show(struct kobject *kobj,
1020                                 struct attribute *attr,
1021                                 char *buf)
1022 {
1023         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1024                                               ll_kset.kobj);
1025
1026         return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled);
1027 }
1028
1029 static ssize_t xattr_cache_store(struct kobject *kobj,
1030                                  struct attribute *attr,
1031                                  const char *buffer,
1032                                  size_t count)
1033 {
1034         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1035                                               ll_kset.kobj);
1036         bool val;
1037         int rc;
1038
1039         rc = kstrtobool(buffer, &val);
1040         if (rc)
1041                 return rc;
1042
1043         if (val && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
1044                 return -ENOTSUPP;
1045
1046         sbi->ll_xattr_cache_enabled = val;
1047         sbi->ll_xattr_cache_set = 1;
1048
1049         return count;
1050 }
1051 LUSTRE_RW_ATTR(xattr_cache);
1052
1053 static ssize_t tiny_write_show(struct kobject *kobj,
1054                                struct attribute *attr,
1055                                char *buf)
1056 {
1057         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1058                                               ll_kset.kobj);
1059
1060         return sprintf(buf, "%u\n", !!(sbi->ll_flags & LL_SBI_TINY_WRITE));
1061 }
1062
1063 static ssize_t tiny_write_store(struct kobject *kobj,
1064                                 struct attribute *attr,
1065                                 const char *buffer,
1066                                 size_t count)
1067 {
1068         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1069                                               ll_kset.kobj);
1070         bool val;
1071         int rc;
1072
1073         rc = kstrtobool(buffer, &val);
1074         if (rc)
1075                 return rc;
1076
1077         spin_lock(&sbi->ll_lock);
1078         if (val)
1079                 sbi->ll_flags |= LL_SBI_TINY_WRITE;
1080         else
1081                 sbi->ll_flags &= ~LL_SBI_TINY_WRITE;
1082         spin_unlock(&sbi->ll_lock);
1083
1084         return count;
1085 }
1086 LUSTRE_RW_ATTR(tiny_write);
1087
1088 static ssize_t max_read_ahead_async_active_show(struct kobject *kobj,
1089                                                struct attribute *attr,
1090                                                char *buf)
1091 {
1092         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1093                                               ll_kset.kobj);
1094
1095         return snprintf(buf, PAGE_SIZE, "%u\n",
1096                         sbi->ll_ra_info.ra_async_max_active);
1097 }
1098
1099 static ssize_t max_read_ahead_async_active_store(struct kobject *kobj,
1100                                                  struct attribute *attr,
1101                                                  const char *buffer,
1102                                                  size_t count)
1103 {
1104         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1105                                               ll_kset.kobj);
1106         unsigned int val;
1107         int rc;
1108
1109         rc = kstrtouint(buffer, 10, &val);
1110         if (rc)
1111                 return rc;
1112
1113         /**
1114          * It doesn't make any sense to make it exceed what
1115          * workqueue could acutally support. This can easily
1116          * over subscripe the cores but Lustre internally
1117          * throttles to avoid those impacts.
1118          */
1119         if (val > WQ_UNBOUND_MAX_ACTIVE) {
1120                 CERROR("%s: cannot set max_read_ahead_async_active=%u larger than %u\n",
1121                        sbi->ll_fsname, val, WQ_UNBOUND_MAX_ACTIVE);
1122                 return -ERANGE;
1123         }
1124
1125         spin_lock(&sbi->ll_lock);
1126         sbi->ll_ra_info.ra_async_max_active = val;
1127         spin_unlock(&sbi->ll_lock);
1128
1129         return count;
1130 }
1131 LUSTRE_RW_ATTR(max_read_ahead_async_active);
1132
1133 static ssize_t read_ahead_async_file_threshold_mb_show(struct kobject *kobj,
1134                                                        struct attribute *attr,
1135                                                        char *buf)
1136 {
1137         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1138                                               ll_kset.kobj);
1139
1140         return snprintf(buf, PAGE_SIZE, "%lu\n",
1141              PAGES_TO_MiB(sbi->ll_ra_info.ra_async_pages_per_file_threshold));
1142 }
1143
1144 static ssize_t
1145 read_ahead_async_file_threshold_mb_store(struct kobject *kobj,
1146                                          struct attribute *attr,
1147                                          const char *buffer, size_t count)
1148 {
1149         unsigned long pages_number;
1150         unsigned long max_ra_per_file;
1151         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1152                                               ll_kset.kobj);
1153         int rc;
1154
1155         rc = kstrtoul(buffer, 10, &pages_number);
1156         if (rc)
1157                 return rc;
1158
1159         pages_number = MiB_TO_PAGES(pages_number);
1160         max_ra_per_file = sbi->ll_ra_info.ra_max_pages_per_file;
1161         if (pages_number < 0 || pages_number > max_ra_per_file) {
1162                 CERROR("%s: can't set read_ahead_async_file_threshold_mb=%lu > "
1163                        "max_read_readahead_per_file_mb=%lu\n", sbi->ll_fsname,
1164                        PAGES_TO_MiB(pages_number),
1165                        PAGES_TO_MiB(max_ra_per_file));
1166                 return -ERANGE;
1167         }
1168         sbi->ll_ra_info.ra_async_pages_per_file_threshold = pages_number;
1169
1170         return count;
1171 }
1172 LUSTRE_RW_ATTR(read_ahead_async_file_threshold_mb);
1173
1174 static ssize_t fast_read_show(struct kobject *kobj,
1175                               struct attribute *attr,
1176                               char *buf)
1177 {
1178         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1179                                               ll_kset.kobj);
1180
1181         return sprintf(buf, "%u\n", !!(sbi->ll_flags & LL_SBI_FAST_READ));
1182 }
1183
1184 static ssize_t fast_read_store(struct kobject *kobj,
1185                                struct attribute *attr,
1186                                const char *buffer,
1187                                size_t count)
1188 {
1189         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1190                                               ll_kset.kobj);
1191         bool val;
1192         int rc;
1193
1194         rc = kstrtobool(buffer, &val);
1195         if (rc)
1196                 return rc;
1197
1198         spin_lock(&sbi->ll_lock);
1199         if (val)
1200                 sbi->ll_flags |= LL_SBI_FAST_READ;
1201         else
1202                 sbi->ll_flags &= ~LL_SBI_FAST_READ;
1203         spin_unlock(&sbi->ll_lock);
1204
1205         return count;
1206 }
1207 LUSTRE_RW_ATTR(fast_read);
1208
1209 static ssize_t file_heat_show(struct kobject *kobj,
1210                               struct attribute *attr,
1211                               char *buf)
1212 {
1213         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1214                                               ll_kset.kobj);
1215
1216         return snprintf(buf, PAGE_SIZE, "%u\n",
1217                         !!(sbi->ll_flags & LL_SBI_FILE_HEAT));
1218 }
1219
1220 static ssize_t file_heat_store(struct kobject *kobj,
1221                                struct attribute *attr,
1222                                const char *buffer,
1223                                size_t count)
1224 {
1225         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1226                                               ll_kset.kobj);
1227         bool val;
1228         int rc;
1229
1230         rc = kstrtobool(buffer, &val);
1231         if (rc)
1232                 return rc;
1233
1234         spin_lock(&sbi->ll_lock);
1235         if (val)
1236                 sbi->ll_flags |= LL_SBI_FILE_HEAT;
1237         else
1238                 sbi->ll_flags &= ~LL_SBI_FILE_HEAT;
1239         spin_unlock(&sbi->ll_lock);
1240
1241         return count;
1242 }
1243 LUSTRE_RW_ATTR(file_heat);
1244
1245 static ssize_t heat_decay_percentage_show(struct kobject *kobj,
1246                                           struct attribute *attr,
1247                                           char *buf)
1248 {
1249         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1250                                               ll_kset.kobj);
1251
1252         return snprintf(buf, PAGE_SIZE, "%u\n",
1253                        (sbi->ll_heat_decay_weight * 100 + 128) / 256);
1254 }
1255
1256 static ssize_t heat_decay_percentage_store(struct kobject *kobj,
1257                                            struct attribute *attr,
1258                                            const char *buffer,
1259                                            size_t count)
1260 {
1261         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1262                                               ll_kset.kobj);
1263         unsigned long val;
1264         int rc;
1265
1266         rc = kstrtoul(buffer, 10, &val);
1267         if (rc)
1268                 return rc;
1269
1270         if (val < 0 || val > 100)
1271                 return -ERANGE;
1272
1273         sbi->ll_heat_decay_weight = (val * 256 + 50) / 100;
1274
1275         return count;
1276 }
1277 LUSTRE_RW_ATTR(heat_decay_percentage);
1278
1279 static ssize_t heat_period_second_show(struct kobject *kobj,
1280                                        struct attribute *attr,
1281                                        char *buf)
1282 {
1283         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1284                                               ll_kset.kobj);
1285
1286         return snprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_heat_period_second);
1287 }
1288
1289 static ssize_t heat_period_second_store(struct kobject *kobj,
1290                                         struct attribute *attr,
1291                                         const char *buffer,
1292                                         size_t count)
1293 {
1294         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1295                                               ll_kset.kobj);
1296         unsigned long val;
1297         int rc;
1298
1299         rc = kstrtoul(buffer, 10, &val);
1300         if (rc)
1301                 return rc;
1302
1303         if (val <= 0)
1304                 return -ERANGE;
1305
1306         sbi->ll_heat_period_second = val;
1307
1308         return count;
1309 }
1310 LUSTRE_RW_ATTR(heat_period_second);
1311
1312 static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
1313 {
1314         struct super_block      *sb    = m->private;
1315         struct ll_sb_info       *sbi   = ll_s2sbi(sb);
1316         struct cl_client_cache  *cache = sbi->ll_cache;
1317         long pages;
1318         int mb;
1319
1320         pages = atomic_long_read(&cache->ccc_unstable_nr);
1321         mb    = (pages * PAGE_SIZE) >> 20;
1322
1323         seq_printf(m, "unstable_check:     %8d\n"
1324                       "unstable_pages: %12ld\n"
1325                       "unstable_mb:        %8d\n",
1326                    cache->ccc_unstable_check, pages, mb);
1327         return 0;
1328 }
1329
1330 static ssize_t ll_unstable_stats_seq_write(struct file *file,
1331                                            const char __user *buffer,
1332                                            size_t count, loff_t *unused)
1333 {
1334         struct seq_file *seq = file->private_data;
1335         struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)seq->private);
1336         char kernbuf[128];
1337         bool val;
1338         int rc;
1339
1340         if (count == 0)
1341                 return 0;
1342         if (count >= sizeof(kernbuf))
1343                 return -EINVAL;
1344
1345         if (copy_from_user(kernbuf, buffer, count))
1346                 return -EFAULT;
1347         kernbuf[count] = 0;
1348
1349         buffer += lprocfs_find_named_value(kernbuf, "unstable_check:", &count) -
1350                   kernbuf;
1351         rc = kstrtobool_from_user(buffer, count, &val);
1352         if (rc < 0)
1353                 return rc;
1354
1355         /* borrow lru lock to set the value */
1356         spin_lock(&sbi->ll_cache->ccc_lru_lock);
1357         sbi->ll_cache->ccc_unstable_check = val;
1358         spin_unlock(&sbi->ll_cache->ccc_lru_lock);
1359
1360         return count;
1361 }
1362
1363 LDEBUGFS_SEQ_FOPS(ll_unstable_stats);
1364
1365 static int ll_root_squash_seq_show(struct seq_file *m, void *v)
1366 {
1367         struct super_block *sb = m->private;
1368         struct ll_sb_info *sbi = ll_s2sbi(sb);
1369         struct root_squash_info *squash = &sbi->ll_squash;
1370
1371         seq_printf(m, "%u:%u\n", squash->rsi_uid, squash->rsi_gid);
1372         return 0;
1373 }
1374
1375 static ssize_t ll_root_squash_seq_write(struct file *file,
1376                                         const char __user *buffer,
1377                                         size_t count, loff_t *off)
1378 {
1379         struct seq_file *m = file->private_data;
1380         struct super_block *sb = m->private;
1381         struct ll_sb_info *sbi = ll_s2sbi(sb);
1382         struct root_squash_info *squash = &sbi->ll_squash;
1383
1384         return lprocfs_wr_root_squash(buffer, count, squash, sbi->ll_fsname);
1385 }
1386
1387 LDEBUGFS_SEQ_FOPS(ll_root_squash);
1388
1389 static int ll_nosquash_nids_seq_show(struct seq_file *m, void *v)
1390 {
1391         struct super_block *sb = m->private;
1392         struct ll_sb_info *sbi = ll_s2sbi(sb);
1393         struct root_squash_info *squash = &sbi->ll_squash;
1394         int len;
1395
1396         spin_lock(&squash->rsi_lock);
1397         if (!list_empty(&squash->rsi_nosquash_nids)) {
1398                 len = cfs_print_nidlist(m->buf + m->count, m->size - m->count,
1399                                         &squash->rsi_nosquash_nids);
1400                 m->count += len;
1401                 seq_putc(m, '\n');
1402         } else {
1403                 seq_puts(m, "NONE\n");
1404         }
1405         spin_unlock(&squash->rsi_lock);
1406
1407         return 0;
1408 }
1409
1410 static ssize_t ll_nosquash_nids_seq_write(struct file *file,
1411                                           const char __user *buffer,
1412                                           size_t count, loff_t *off)
1413 {
1414         struct seq_file *m = file->private_data;
1415         struct super_block *sb = m->private;
1416         struct ll_sb_info *sbi = ll_s2sbi(sb);
1417         struct root_squash_info *squash = &sbi->ll_squash;
1418         int rc;
1419
1420         rc = lprocfs_wr_nosquash_nids(buffer, count, squash, sbi->ll_fsname);
1421         if (rc < 0)
1422                 return rc;
1423
1424         ll_compute_rootsquash_state(sbi);
1425
1426         return rc;
1427 }
1428
1429 LDEBUGFS_SEQ_FOPS(ll_nosquash_nids);
1430
1431 static int ll_pcc_seq_show(struct seq_file *m, void *v)
1432 {
1433         struct super_block *sb = m->private;
1434         struct ll_sb_info *sbi = ll_s2sbi(sb);
1435
1436         return pcc_super_dump(&sbi->ll_pcc_super, m);
1437 }
1438
1439 static ssize_t ll_pcc_seq_write(struct file *file, const char __user *buffer,
1440                                 size_t count, loff_t *off)
1441 {
1442         struct seq_file *m = file->private_data;
1443         struct super_block *sb = m->private;
1444         struct ll_sb_info *sbi = ll_s2sbi(sb);
1445         int rc;
1446         char *kernbuf;
1447
1448         if (count >= LPROCFS_WR_PCC_MAX_CMD)
1449                 return -EINVAL;
1450
1451         if (!(exp_connect_flags2(sbi->ll_md_exp) & OBD_CONNECT2_PCC))
1452                 return -EOPNOTSUPP;
1453
1454         OBD_ALLOC(kernbuf, count + 1);
1455         if (kernbuf == NULL)
1456                 return -ENOMEM;
1457
1458         if (copy_from_user(kernbuf, buffer, count))
1459                 GOTO(out_free_kernbuff, rc = -EFAULT);
1460
1461         rc = pcc_cmd_handle(kernbuf, count, &sbi->ll_pcc_super);
1462 out_free_kernbuff:
1463         OBD_FREE(kernbuf, count + 1);
1464         return rc ? rc : count;
1465 }
1466 LDEBUGFS_SEQ_FOPS(ll_pcc);
1467
1468 struct ldebugfs_vars lprocfs_llite_obd_vars[] = {
1469         { .name =       "site",
1470           .fops =       &ll_site_stats_fops                     },
1471         { .name =       "max_cached_mb",
1472           .fops =       &ll_max_cached_mb_fops                  },
1473         { .name =       "statahead_stats",
1474           .fops =       &ll_statahead_stats_fops                },
1475         { .name =       "unstable_stats",
1476           .fops =       &ll_unstable_stats_fops                 },
1477         { .name =       "sbi_flags",
1478           .fops =       &ll_sbi_flags_fops                      },
1479         { .name =       "root_squash",
1480           .fops =       &ll_root_squash_fops                    },
1481         { .name =       "nosquash_nids",
1482           .fops =       &ll_nosquash_nids_fops                  },
1483         { .name =       "pcc",
1484           .fops =       &ll_pcc_fops,                           },
1485         { NULL }
1486 };
1487
1488 #define MAX_STRING_SIZE 128
1489
1490 static struct attribute *llite_attrs[] = {
1491         &lustre_attr_blocksize.attr,
1492         &lustre_attr_stat_blocksize.attr,
1493         &lustre_attr_kbytestotal.attr,
1494         &lustre_attr_kbytesfree.attr,
1495         &lustre_attr_kbytesavail.attr,
1496         &lustre_attr_filestotal.attr,
1497         &lustre_attr_filesfree.attr,
1498         &lustre_attr_client_type.attr,
1499         &lustre_attr_fstype.attr,
1500         &lustre_attr_uuid.attr,
1501         &lustre_attr_checksums.attr,
1502         &lustre_attr_checksum_pages.attr,
1503         &lustre_attr_max_read_ahead_mb.attr,
1504         &lustre_attr_max_read_ahead_per_file_mb.attr,
1505         &lustre_attr_max_read_ahead_whole_mb.attr,
1506         &lustre_attr_max_read_ahead_async_active.attr,
1507         &lustre_attr_read_ahead_async_file_threshold_mb.attr,
1508         &lustre_attr_stats_track_pid.attr,
1509         &lustre_attr_stats_track_ppid.attr,
1510         &lustre_attr_stats_track_gid.attr,
1511         &lustre_attr_statahead_running_max.attr,
1512         &lustre_attr_statahead_max.attr,
1513         &lustre_attr_statahead_agl.attr,
1514         &lustre_attr_lazystatfs.attr,
1515         &lustre_attr_statfs_max_age.attr,
1516         &lustre_attr_max_easize.attr,
1517         &lustre_attr_default_easize.attr,
1518         &lustre_attr_xattr_cache.attr,
1519         &lustre_attr_fast_read.attr,
1520         &lustre_attr_tiny_write.attr,
1521         &lustre_attr_file_heat.attr,
1522         &lustre_attr_heat_decay_percentage.attr,
1523         &lustre_attr_heat_period_second.attr,
1524         NULL,
1525 };
1526
1527 static void sbi_kobj_release(struct kobject *kobj)
1528 {
1529         struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1530                                               ll_kset.kobj);
1531         complete(&sbi->ll_kobj_unregister);
1532 }
1533
1534 static struct kobj_type sbi_ktype = {
1535         .default_attrs  = llite_attrs,
1536         .sysfs_ops      = &lustre_sysfs_ops,
1537         .release        = sbi_kobj_release,
1538 };
1539
1540 static const struct llite_file_opcode {
1541         __u32           opcode;
1542         __u32           type;
1543         const char      *opname;
1544 } llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
1545         /* file operation */
1546         { LPROC_LL_READ_BYTES,  LPROCFS_TYPE_BYTES_FULL, "read_bytes" },
1547         { LPROC_LL_WRITE_BYTES, LPROCFS_TYPE_BYTES_FULL, "write_bytes" },
1548         { LPROC_LL_READ,        LPROCFS_TYPE_LATENCY,   "read" },
1549         { LPROC_LL_WRITE,       LPROCFS_TYPE_LATENCY,   "write" },
1550         { LPROC_LL_IOCTL,       LPROCFS_TYPE_REQS,      "ioctl" },
1551         { LPROC_LL_OPEN,        LPROCFS_TYPE_LATENCY,   "open" },
1552         { LPROC_LL_RELEASE,     LPROCFS_TYPE_LATENCY,   "close" },
1553         { LPROC_LL_MMAP,        LPROCFS_TYPE_LATENCY,   "mmap" },
1554         { LPROC_LL_FAULT,       LPROCFS_TYPE_LATENCY,   "page_fault" },
1555         { LPROC_LL_MKWRITE,     LPROCFS_TYPE_LATENCY,   "page_mkwrite" },
1556         { LPROC_LL_LLSEEK,      LPROCFS_TYPE_LATENCY,   "seek" },
1557         { LPROC_LL_FSYNC,       LPROCFS_TYPE_LATENCY,   "fsync" },
1558         { LPROC_LL_READDIR,     LPROCFS_TYPE_LATENCY,   "readdir" },
1559         /* inode operation */
1560         { LPROC_LL_SETATTR,     LPROCFS_TYPE_LATENCY,   "setattr" },
1561         { LPROC_LL_TRUNC,       LPROCFS_TYPE_LATENCY,   "truncate" },
1562         { LPROC_LL_FLOCK,       LPROCFS_TYPE_LATENCY,   "flock" },
1563         { LPROC_LL_GETATTR,     LPROCFS_TYPE_LATENCY,   "getattr" },
1564         { LPROC_LL_FALLOCATE,   LPROCFS_TYPE_LATENCY, "fallocate"},
1565         /* dir inode operation */
1566         { LPROC_LL_CREATE,      LPROCFS_TYPE_LATENCY,   "create" },
1567         { LPROC_LL_LINK,        LPROCFS_TYPE_LATENCY,   "link" },
1568         { LPROC_LL_UNLINK,      LPROCFS_TYPE_LATENCY,   "unlink" },
1569         { LPROC_LL_SYMLINK,     LPROCFS_TYPE_LATENCY,   "symlink" },
1570         { LPROC_LL_MKDIR,       LPROCFS_TYPE_LATENCY,   "mkdir" },
1571         { LPROC_LL_RMDIR,       LPROCFS_TYPE_LATENCY,   "rmdir" },
1572         { LPROC_LL_MKNOD,       LPROCFS_TYPE_LATENCY,   "mknod" },
1573         { LPROC_LL_RENAME,      LPROCFS_TYPE_LATENCY,   "rename" },
1574         /* special inode operation */
1575         { LPROC_LL_STATFS,      LPROCFS_TYPE_LATENCY,   "statfs" },
1576         { LPROC_LL_SETXATTR,    LPROCFS_TYPE_LATENCY,   "setxattr" },
1577         { LPROC_LL_GETXATTR,    LPROCFS_TYPE_LATENCY,   "getxattr" },
1578         { LPROC_LL_GETXATTR_HITS, LPROCFS_TYPE_REQS,    "getxattr_hits" },
1579         { LPROC_LL_LISTXATTR,   LPROCFS_TYPE_LATENCY,   "listxattr" },
1580         { LPROC_LL_REMOVEXATTR, LPROCFS_TYPE_LATENCY,   "removexattr" },
1581         { LPROC_LL_INODE_PERM,  LPROCFS_TYPE_LATENCY,   "inode_permission" },
1582 };
1583
1584 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, long count)
1585 {
1586         if (!sbi->ll_stats)
1587                 return;
1588
1589         if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
1590                 lprocfs_counter_add(sbi->ll_stats, op, count);
1591         else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
1592                  sbi->ll_stats_track_id == current->pid)
1593                 lprocfs_counter_add(sbi->ll_stats, op, count);
1594         else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
1595                  sbi->ll_stats_track_id == current->parent->pid)
1596                 lprocfs_counter_add(sbi->ll_stats, op, count);
1597         else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
1598                  sbi->ll_stats_track_id ==
1599                         from_kgid(&init_user_ns, current_gid()))
1600                 lprocfs_counter_add(sbi->ll_stats, op, count);
1601 }
1602 EXPORT_SYMBOL(ll_stats_ops_tally);
1603
1604 static const char *ra_stat_string[] = {
1605         [RA_STAT_HIT] = "hits",
1606         [RA_STAT_MISS] = "misses",
1607         [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
1608         [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
1609         [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
1610         [RA_STAT_FAILED_MATCH] = "failed lock match",
1611         [RA_STAT_DISCARDED] = "read but discarded",
1612         [RA_STAT_ZERO_LEN] = "zero length file",
1613         [RA_STAT_ZERO_WINDOW] = "zero size window",
1614         [RA_STAT_EOF] = "read-ahead to EOF",
1615         [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
1616         [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
1617         [RA_STAT_FAILED_REACH_END] = "failed to reach end",
1618         [RA_STAT_ASYNC] = "async readahead",
1619         [RA_STAT_FAILED_FAST_READ] = "failed to fast read",
1620 };
1621
1622 int ll_debugfs_register_super(struct super_block *sb, const char *name)
1623 {
1624         struct lustre_sb_info *lsi = s2lsi(sb);
1625         struct ll_sb_info *sbi = ll_s2sbi(sb);
1626         int err, id;
1627
1628         ENTRY;
1629         LASSERT(sbi);
1630
1631         if (IS_ERR_OR_NULL(llite_root))
1632                 goto out_ll_kset;
1633
1634         sbi->ll_debugfs_entry = debugfs_create_dir(name, llite_root);
1635         ldebugfs_add_vars(sbi->ll_debugfs_entry, lprocfs_llite_obd_vars, sb);
1636
1637         debugfs_create_file("dump_page_cache", 0444, sbi->ll_debugfs_entry, sbi,
1638                             &vvp_dump_pgcache_file_ops);
1639
1640         debugfs_create_file("extents_stats", 0644, sbi->ll_debugfs_entry, sbi,
1641                                  &ll_rw_extents_stats_fops);
1642
1643         debugfs_create_file("extents_stats_per_process", 0644,
1644                             sbi->ll_debugfs_entry, sbi,
1645                             &ll_rw_extents_stats_pp_fops);
1646
1647         debugfs_create_file("offset_stats", 0644, sbi->ll_debugfs_entry, sbi,
1648                             &ll_rw_offset_stats_fops);
1649
1650         /* File operations stats */
1651         sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
1652                                             LPROCFS_STATS_FLAG_NONE);
1653         if (sbi->ll_stats == NULL)
1654                 GOTO(out_debugfs, err = -ENOMEM);
1655
1656         /* do counter init */
1657         for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
1658                 u32 type = llite_opcode_table[id].type;
1659                 void *ptr = "unknown";
1660
1661                 if (type & LPROCFS_TYPE_REQS)
1662                         ptr = "reqs";
1663                 else if (type & LPROCFS_TYPE_BYTES)
1664                         ptr = "bytes";
1665                 else if (type & LPROCFS_TYPE_USEC)
1666                         ptr = "usec";
1667                 lprocfs_counter_init(sbi->ll_stats,
1668                                      llite_opcode_table[id].opcode, type,
1669                                      llite_opcode_table[id].opname, ptr);
1670         }
1671
1672         debugfs_create_file("stats", 0644, sbi->ll_debugfs_entry,
1673                             sbi->ll_stats, &ldebugfs_stats_seq_fops);
1674
1675         sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
1676                                                LPROCFS_STATS_FLAG_NONE);
1677         if (sbi->ll_ra_stats == NULL)
1678                 GOTO(out_stats, err = -ENOMEM);
1679
1680         for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
1681                 lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
1682                                      ra_stat_string[id], "pages");
1683
1684         debugfs_create_file("read_ahead_stats", 0644, sbi->ll_debugfs_entry,
1685                             sbi->ll_ra_stats, &ldebugfs_stats_seq_fops);
1686
1687 out_ll_kset:
1688         /* Yes we also register sysfs mount kset here as well */
1689         sbi->ll_kset.kobj.parent = llite_kobj;
1690         sbi->ll_kset.kobj.ktype = &sbi_ktype;
1691         init_completion(&sbi->ll_kobj_unregister);
1692         err = kobject_set_name(&sbi->ll_kset.kobj, "%s", name);
1693         if (err)
1694                 GOTO(out_ra_stats, err);
1695
1696         err = kset_register(&sbi->ll_kset);
1697         if (err)
1698                 GOTO(out_ra_stats, err);
1699
1700         lsi->lsi_kobj = kobject_get(&sbi->ll_kset.kobj);
1701
1702         RETURN(0);
1703 out_ra_stats:
1704         lprocfs_free_stats(&sbi->ll_ra_stats);
1705 out_stats:
1706         lprocfs_free_stats(&sbi->ll_stats);
1707 out_debugfs:
1708         debugfs_remove_recursive(sbi->ll_debugfs_entry);
1709
1710         RETURN(err);
1711 }
1712
1713 void ll_debugfs_unregister_super(struct super_block *sb)
1714 {
1715         struct lustre_sb_info *lsi = s2lsi(sb);
1716         struct ll_sb_info *sbi = ll_s2sbi(sb);
1717
1718         debugfs_remove_recursive(sbi->ll_debugfs_entry);
1719
1720         if (sbi->ll_dt_obd)
1721                 sysfs_remove_link(&sbi->ll_kset.kobj,
1722                                   sbi->ll_dt_obd->obd_type->typ_name);
1723
1724         if (sbi->ll_md_obd)
1725                 sysfs_remove_link(&sbi->ll_kset.kobj,
1726                                   sbi->ll_md_obd->obd_type->typ_name);
1727
1728         kobject_put(lsi->lsi_kobj);
1729
1730         kset_unregister(&sbi->ll_kset);
1731         wait_for_completion(&sbi->ll_kobj_unregister);
1732
1733         lprocfs_free_stats(&sbi->ll_ra_stats);
1734         lprocfs_free_stats(&sbi->ll_stats);
1735 }
1736 #undef MAX_STRING_SIZE
1737
1738 static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
1739                                    struct seq_file *seq, int which)
1740 {
1741         unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
1742         unsigned long start, end, r, w;
1743         char *unitp = "KMGTPEZY";
1744         int i, units = 10;
1745         struct per_process_info *pp_info = &io_extents->pp_extents[which];
1746
1747         read_cum = 0;
1748         write_cum = 0;
1749         start = 0;
1750
1751         for(i = 0; i < LL_HIST_MAX; i++) {
1752                 read_tot += pp_info->pp_r_hist.oh_buckets[i];
1753                 write_tot += pp_info->pp_w_hist.oh_buckets[i];
1754         }
1755
1756         for(i = 0; i < LL_HIST_MAX; i++) {
1757                 r = pp_info->pp_r_hist.oh_buckets[i];
1758                 w = pp_info->pp_w_hist.oh_buckets[i];
1759                 read_cum += r;
1760                 write_cum += w;
1761                 end = 1 << (i + LL_HIST_START - units);
1762                 seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4u %4u  | "
1763                            "%14lu %4u %4u\n", start, *unitp, end, *unitp,
1764                            (i == LL_HIST_MAX - 1) ? '+' : ' ',
1765                            r, pct(r, read_tot), pct(read_cum, read_tot),
1766                            w, pct(w, write_tot), pct(write_cum, write_tot));
1767                 start = end;
1768                 if (start == (1 << 10)) {
1769                         start = 1;
1770                         units += 10;
1771                         unitp++;
1772                 }
1773                 if (read_cum == read_tot && write_cum == write_tot)
1774                         break;
1775         }
1776 }
1777
1778 static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
1779 {
1780         struct timespec64 now;
1781         struct ll_sb_info *sbi = seq->private;
1782         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1783         int k;
1784
1785         ktime_get_real_ts64(&now);
1786
1787         if (!sbi->ll_rw_stats_on) {
1788                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1789                 return 0;
1790         }
1791         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1792                    (s64)now.tv_sec, now.tv_nsec);
1793         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1794         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1795                    "extents", "calls", "%", "cum%",
1796                    "calls", "%", "cum%");
1797         spin_lock(&sbi->ll_pp_extent_lock);
1798         for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
1799                 if (io_extents->pp_extents[k].pid != 0) {
1800                         seq_printf(seq, "\nPID: %d\n",
1801                                    io_extents->pp_extents[k].pid);
1802                         ll_display_extents_info(io_extents, seq, k);
1803                 }
1804         }
1805         spin_unlock(&sbi->ll_pp_extent_lock);
1806         return 0;
1807 }
1808
1809 static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
1810                                                 const char __user *buf,
1811                                                 size_t len,
1812                                                 loff_t *off)
1813 {
1814         struct seq_file *seq = file->private_data;
1815         struct ll_sb_info *sbi = seq->private;
1816         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1817         int i;
1818         __s64 value;
1819
1820         if (len == 0)
1821                 return -EINVAL;
1822
1823         value = ll_stats_pid_write(buf, len);
1824
1825         if (value == 0)
1826                 sbi->ll_rw_stats_on = 0;
1827         else
1828                 sbi->ll_rw_stats_on = 1;
1829
1830         spin_lock(&sbi->ll_pp_extent_lock);
1831         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1832                 io_extents->pp_extents[i].pid = 0;
1833                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1834                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1835         }
1836         spin_unlock(&sbi->ll_pp_extent_lock);
1837         return len;
1838 }
1839
1840 LDEBUGFS_SEQ_FOPS(ll_rw_extents_stats_pp);
1841
1842 static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
1843 {
1844         struct timespec64 now;
1845         struct ll_sb_info *sbi = seq->private;
1846         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1847
1848         ktime_get_real_ts64(&now);
1849
1850         if (!sbi->ll_rw_stats_on) {
1851                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1852                 return 0;
1853         }
1854         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
1855                    (s64)now.tv_sec, now.tv_nsec);
1856
1857         seq_printf(seq, "%15s %19s       | %20s\n", " ", "read", "write");
1858         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
1859                    "extents", "calls", "%", "cum%",
1860                    "calls", "%", "cum%");
1861         spin_lock(&sbi->ll_lock);
1862         ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
1863         spin_unlock(&sbi->ll_lock);
1864
1865         return 0;
1866 }
1867
1868 static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
1869                                              const char __user *buf,
1870                                              size_t len, loff_t *off)
1871 {
1872         struct seq_file *seq = file->private_data;
1873         struct ll_sb_info *sbi = seq->private;
1874         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1875         int i;
1876         __s64 value;
1877
1878         if (len == 0)
1879                 return -EINVAL;
1880
1881         value = ll_stats_pid_write(buf, len);
1882
1883         if (value == 0)
1884                 sbi->ll_rw_stats_on = 0;
1885         else
1886                 sbi->ll_rw_stats_on = 1;
1887
1888         spin_lock(&sbi->ll_pp_extent_lock);
1889         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
1890                 io_extents->pp_extents[i].pid = 0;
1891                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1892                 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1893         }
1894         spin_unlock(&sbi->ll_pp_extent_lock);
1895
1896         return len;
1897 }
1898
1899 LDEBUGFS_SEQ_FOPS(ll_rw_extents_stats);
1900
1901 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1902                        struct ll_file_data *file, loff_t pos,
1903                        size_t count, int rw)
1904 {
1905         int i, cur = -1;
1906         struct ll_rw_process_info *process;
1907         struct ll_rw_process_info *offset;
1908         int *off_count = &sbi->ll_rw_offset_entry_count;
1909         int *process_count = &sbi->ll_offset_process_count;
1910         struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1911
1912         if(!sbi->ll_rw_stats_on)
1913                 return;
1914         process = sbi->ll_rw_process_info;
1915         offset = sbi->ll_rw_offset_info;
1916
1917         spin_lock(&sbi->ll_pp_extent_lock);
1918         /* Extent statistics */
1919         for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1920                 if(io_extents->pp_extents[i].pid == pid) {
1921                         cur = i;
1922                         break;
1923                 }
1924         }
1925
1926         if (cur == -1) {
1927                 /* new process */
1928                 sbi->ll_extent_process_count =
1929                         (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
1930                 cur = sbi->ll_extent_process_count;
1931                 io_extents->pp_extents[cur].pid = pid;
1932                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
1933                 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
1934         }
1935
1936         for (i = 0; (count >= 1 << (LL_HIST_START + i)) &&
1937              (i < (LL_HIST_MAX - 1)); i++);
1938         if (rw == 0) {
1939                 io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
1940                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
1941         } else {
1942                 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
1943                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
1944         }
1945         spin_unlock(&sbi->ll_pp_extent_lock);
1946
1947         spin_lock(&sbi->ll_process_lock);
1948         /* Offset statistics */
1949         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1950                 if (process[i].rw_pid == pid) {
1951                         if (process[i].rw_last_file != file) {
1952                                 process[i].rw_range_start = pos;
1953                                 process[i].rw_last_file_pos = pos + count;
1954                                 process[i].rw_smallest_extent = count;
1955                                 process[i].rw_largest_extent = count;
1956                                 process[i].rw_offset = 0;
1957                                 process[i].rw_last_file = file;
1958                                 spin_unlock(&sbi->ll_process_lock);
1959                                 return;
1960                         }
1961                         if (process[i].rw_last_file_pos != pos) {
1962                                 *off_count =
1963                                     (*off_count + 1) % LL_OFFSET_HIST_MAX;
1964                                 offset[*off_count].rw_op = process[i].rw_op;
1965                                 offset[*off_count].rw_pid = pid;
1966                                 offset[*off_count].rw_range_start =
1967                                         process[i].rw_range_start;
1968                                 offset[*off_count].rw_range_end =
1969                                         process[i].rw_last_file_pos;
1970                                 offset[*off_count].rw_smallest_extent =
1971                                         process[i].rw_smallest_extent;
1972                                 offset[*off_count].rw_largest_extent =
1973                                         process[i].rw_largest_extent;
1974                                 offset[*off_count].rw_offset =
1975                                         process[i].rw_offset;
1976                                 process[i].rw_op = rw;
1977                                 process[i].rw_range_start = pos;
1978                                 process[i].rw_smallest_extent = count;
1979                                 process[i].rw_largest_extent = count;
1980                                 process[i].rw_offset = pos -
1981                                         process[i].rw_last_file_pos;
1982                         }
1983                         if(process[i].rw_smallest_extent > count)
1984                                 process[i].rw_smallest_extent = count;
1985                         if(process[i].rw_largest_extent < count)
1986                                 process[i].rw_largest_extent = count;
1987                         process[i].rw_last_file_pos = pos + count;
1988                         spin_unlock(&sbi->ll_process_lock);
1989                         return;
1990                 }
1991         }
1992         *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
1993         process[*process_count].rw_pid = pid;
1994         process[*process_count].rw_op = rw;
1995         process[*process_count].rw_range_start = pos;
1996         process[*process_count].rw_last_file_pos = pos + count;
1997         process[*process_count].rw_smallest_extent = count;
1998         process[*process_count].rw_largest_extent = count;
1999         process[*process_count].rw_offset = 0;
2000         process[*process_count].rw_last_file = file;
2001         spin_unlock(&sbi->ll_process_lock);
2002 }
2003
2004 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
2005 {
2006         struct timespec64 now;
2007         struct ll_sb_info *sbi = seq->private;
2008         struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
2009         struct ll_rw_process_info *process = sbi->ll_rw_process_info;
2010         int i;
2011
2012         ktime_get_real_ts64(&now);
2013
2014         if (!sbi->ll_rw_stats_on) {
2015                 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
2016                 return 0;
2017         }
2018         spin_lock(&sbi->ll_process_lock);
2019
2020         seq_printf(seq, "snapshot_time:         %llu.%09lu (secs.nsecs)\n",
2021                    (s64)now.tv_sec, now.tv_nsec);
2022         seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
2023                    "R/W", "PID", "RANGE START", "RANGE END",
2024                    "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
2025
2026         /* We stored the discontiguous offsets here; print them first */
2027         for (i = 0; i < LL_OFFSET_HIST_MAX; i++) {
2028                 if (offset[i].rw_pid != 0)
2029                         seq_printf(seq,
2030                                   "%3c %10d %14llu %14llu %17lu %17lu %14lld\n",
2031                                    offset[i].rw_op == READ ? 'R' : 'W',
2032                                    offset[i].rw_pid,
2033                                    offset[i].rw_range_start,
2034                                    offset[i].rw_range_end,
2035                                    (unsigned long)offset[i].rw_smallest_extent,
2036                                    (unsigned long)offset[i].rw_largest_extent,
2037                                    offset[i].rw_offset);
2038         }
2039
2040         /* Then print the current offsets for each process */
2041         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
2042                 if (process[i].rw_pid != 0)
2043                         seq_printf(seq,
2044                                   "%3c %10d %14llu %14llu %17lu %17lu %14lld\n",
2045                                    process[i].rw_op == READ ? 'R' : 'W',
2046                                    process[i].rw_pid,
2047                                    process[i].rw_range_start,
2048                                    process[i].rw_last_file_pos,
2049                                    (unsigned long)process[i].rw_smallest_extent,
2050                                    (unsigned long)process[i].rw_largest_extent,
2051                                    process[i].rw_offset);
2052         }
2053         spin_unlock(&sbi->ll_process_lock);
2054
2055         return 0;
2056 }
2057
2058 static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
2059                                             const char __user *buf,
2060                                             size_t len, loff_t *off)
2061 {
2062         struct seq_file *seq = file->private_data;
2063         struct ll_sb_info *sbi = seq->private;
2064         struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
2065         struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
2066         __s64 value;
2067
2068         if (len == 0)
2069                 return -EINVAL;
2070
2071         value = ll_stats_pid_write(buf, len);
2072
2073         if (value == 0)
2074                 sbi->ll_rw_stats_on = 0;
2075         else
2076                 sbi->ll_rw_stats_on = 1;
2077
2078         spin_lock(&sbi->ll_process_lock);
2079         sbi->ll_offset_process_count = 0;
2080         sbi->ll_rw_offset_entry_count = 0;
2081         memset(process_info, 0, sizeof(struct ll_rw_process_info) *
2082                LL_PROCESS_HIST_MAX);
2083         memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
2084                LL_OFFSET_HIST_MAX);
2085         spin_unlock(&sbi->ll_process_lock);
2086
2087         return len;
2088 }
2089
2090 LDEBUGFS_SEQ_FOPS(ll_rw_offset_stats);