4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 #define DEBUG_SUBSYSTEM S_LLITE
33 #include <linux/version.h>
34 #include <linux/user_namespace.h>
35 #include <linux/uidgid.h>
37 #include <uapi/linux/lustre/lustre_param.h>
38 #include <lprocfs_status.h>
39 #include <obd_support.h>
41 #include "llite_internal.h"
42 #include "vvp_internal.h"
44 static struct kobject *llite_kobj;
45 static struct dentry *llite_root;
47 static void llite_kobj_release(struct kobject *kobj)
49 if (!IS_ERR_OR_NULL(llite_root)) {
50 debugfs_remove(llite_root);
57 static struct kobj_type llite_kobj_ktype = {
58 .release = llite_kobj_release,
59 .sysfs_ops = &lustre_sysfs_ops,
62 int llite_tunables_register(void)
66 llite_kobj = kzalloc(sizeof(*llite_kobj), GFP_KERNEL);
70 llite_kobj->kset = lustre_kset;
71 rc = kobject_init_and_add(llite_kobj, &llite_kobj_ktype,
72 &lustre_kset->kobj, "%s", "llite");
76 llite_root = debugfs_create_dir("llite", debugfs_lustre_root);
80 kobject_put(llite_kobj);
86 void llite_tunables_unregister(void)
88 kobject_put(llite_kobj);
92 /* <debugfs>/lustre/llite mount point registration */
93 static const struct file_operations ll_rw_extents_stats_fops;
94 static const struct file_operations ll_rw_extents_stats_pp_fops;
95 static const struct file_operations ll_rw_offset_stats_fops;
98 * ll_stats_pid_write() - Determine if stats collection should be enabled
99 * @buf: Buffer containing the data written
100 * @len: Number of bytes in the buffer
102 * Several proc files begin collecting stats when a value is written, and stop
103 * collecting when either '0' or 'disable' is written. This function checks the
104 * written value to see if collection should be enabled or disabled.
106 * Return: If '0' or 'disable' is provided, 0 is returned. If the text
107 * equivalent of a number is written, that number is returned. Otherwise,
108 * 1 is returned. Non-zero return values indicate collection should be enabled.
110 static s64 ll_stats_pid_write(const char __user *buf, size_t len)
112 unsigned long long value = 1;
116 rc = kstrtoull_from_user(buf, len, 0, &value);
117 if (rc < 0 && len < sizeof(kernbuf)) {
118 if (copy_from_user(kernbuf, buf, len))
122 if (kernbuf[len - 1] == '\n')
123 kernbuf[len - 1] = 0;
125 if (strncasecmp(kernbuf, "disable", 7) == 0)
132 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
135 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
137 struct obd_statfs osfs;
140 rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
144 return sprintf(buf, "%u\n", osfs.os_bsize);
146 LUSTRE_RO_ATTR(blocksize);
148 static ssize_t stat_blocksize_show(struct kobject *kobj, struct attribute *attr,
151 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
154 return sprintf(buf, "%u\n", sbi->ll_stat_blksize);
157 static ssize_t stat_blocksize_store(struct kobject *kobj,
158 struct attribute *attr,
162 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
167 rc = kstrtouint(buffer, 10, &val);
171 if (val != 0 && (val < PAGE_SIZE || (val & (val - 1))) != 0)
174 sbi->ll_stat_blksize = val;
178 LUSTRE_RW_ATTR(stat_blocksize);
180 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
183 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
185 struct obd_statfs osfs;
190 rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
194 blk_size = osfs.os_bsize >> 10;
195 result = osfs.os_blocks;
197 while (blk_size >>= 1)
200 return sprintf(buf, "%llu\n", result);
202 LUSTRE_RO_ATTR(kbytestotal);
204 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
207 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
209 struct obd_statfs osfs;
214 rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
218 blk_size = osfs.os_bsize >> 10;
219 result = osfs.os_bfree;
221 while (blk_size >>= 1)
224 return sprintf(buf, "%llu\n", result);
226 LUSTRE_RO_ATTR(kbytesfree);
228 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
231 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
233 struct obd_statfs osfs;
238 rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
242 blk_size = osfs.os_bsize >> 10;
243 result = osfs.os_bavail;
245 while (blk_size >>= 1)
248 return sprintf(buf, "%llu\n", result);
250 LUSTRE_RO_ATTR(kbytesavail);
252 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
255 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
257 struct obd_statfs osfs;
260 rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
264 return sprintf(buf, "%llu\n", osfs.os_files);
266 LUSTRE_RO_ATTR(filestotal);
268 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
271 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
273 struct obd_statfs osfs;
276 rc = ll_statfs_internal(sbi, &osfs, OBD_STATFS_NODELAY);
280 return sprintf(buf, "%llu\n", osfs.os_ffree);
282 LUSTRE_RO_ATTR(filesfree);
284 static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
287 return sprintf(buf, "local client\n");
289 LUSTRE_RO_ATTR(client_type);
291 LUSTRE_RW_ATTR(foreign_symlink_enable);
293 LUSTRE_RW_ATTR(foreign_symlink_prefix);
295 LUSTRE_RW_ATTR(foreign_symlink_upcall);
297 LUSTRE_WO_ATTR(foreign_symlink_upcall_info);
299 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
302 return sprintf(buf, "lustre\n");
304 LUSTRE_RO_ATTR(fstype);
306 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
309 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
312 return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
314 LUSTRE_RO_ATTR(uuid);
316 static int ll_site_stats_seq_show(struct seq_file *m, void *v)
318 struct super_block *sb = m->private;
321 * See description of statistical counters in struct cl_site, and
324 return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
327 LDEBUGFS_SEQ_FOPS_RO(ll_site_stats);
329 static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
330 struct attribute *attr, char *buf)
332 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
335 return scnprintf(buf, PAGE_SIZE, "%lu\n",
336 PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages));
339 static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
340 struct attribute *attr,
341 const char *buffer, size_t count)
343 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
345 u64 ra_max_mb, pages_number;
348 rc = sysfs_memparse(buffer, count, &ra_max_mb, "MiB");
352 pages_number = round_up(ra_max_mb, 1024 * 1024) >> PAGE_SHIFT;
353 CDEBUG(D_INFO, "%s: set max_read_ahead_mb=%llu (%llu pages)\n",
354 sbi->ll_fsname, PAGES_TO_MiB(pages_number), pages_number);
355 if (pages_number > cfs_totalram_pages() / 2) {
357 CERROR("%s: cannot set max_read_ahead_mb=%llu > totalram/2=%luMB\n",
358 sbi->ll_fsname, PAGES_TO_MiB(pages_number),
359 PAGES_TO_MiB(cfs_totalram_pages() / 2));
363 spin_lock(&sbi->ll_lock);
364 sbi->ll_ra_info.ra_max_pages = pages_number;
365 spin_unlock(&sbi->ll_lock);
369 LUSTRE_RW_ATTR(max_read_ahead_mb);
371 static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
372 struct attribute *attr,
375 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
378 return scnprintf(buf, PAGE_SIZE, "%lu\n",
379 PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages_per_file));
382 static ssize_t max_read_ahead_per_file_mb_store(struct kobject *kobj,
383 struct attribute *attr,
387 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
389 u64 ra_max_file_mb, pages_number;
392 rc = sysfs_memparse(buffer, count, &ra_max_file_mb, "MiB");
396 pages_number = round_up(ra_max_file_mb, 1024 * 1024) >> PAGE_SHIFT;
397 if (pages_number > sbi->ll_ra_info.ra_max_pages) {
398 CERROR("%s: cannot set max_read_ahead_per_file_mb=%llu > max_read_ahead_mb=%lu\n",
399 sbi->ll_fsname, PAGES_TO_MiB(pages_number),
400 PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages));
404 spin_lock(&sbi->ll_lock);
405 sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
406 spin_unlock(&sbi->ll_lock);
410 LUSTRE_RW_ATTR(max_read_ahead_per_file_mb);
412 static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
413 struct attribute *attr, char *buf)
415 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
418 return scnprintf(buf, PAGE_SIZE, "%lu\n",
419 PAGES_TO_MiB(sbi->ll_ra_info.ra_max_read_ahead_whole_pages));
422 static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
423 struct attribute *attr,
424 const char *buffer, size_t count)
426 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
428 u64 ra_max_whole_mb, pages_number;
431 rc = sysfs_memparse(buffer, count, &ra_max_whole_mb, "MiB");
435 pages_number = round_up(ra_max_whole_mb, 1024 * 1024) >> PAGE_SHIFT;
436 /* Cap this at the current max readahead window size, the readahead
437 * algorithm does this anyway so it's pointless to set it larger.
439 if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
440 CERROR("%s: cannot set max_read_ahead_whole_mb=%llu > max_read_ahead_per_file_mb=%lu\n",
441 sbi->ll_fsname, PAGES_TO_MiB(pages_number),
442 PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages_per_file));
447 spin_lock(&sbi->ll_lock);
448 sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
449 spin_unlock(&sbi->ll_lock);
453 LUSTRE_RW_ATTR(max_read_ahead_whole_mb);
455 static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
457 struct super_block *sb = m->private;
458 struct ll_sb_info *sbi = ll_s2sbi(sb);
459 struct cl_client_cache *cache = sbi->ll_cache;
460 struct ll_ra_info *ra = &sbi->ll_ra_info;
464 mutex_lock(&cache->ccc_max_cache_mb_lock);
465 max_cached_mb = PAGES_TO_MiB(cache->ccc_lru_max);
466 unused_mb = PAGES_TO_MiB(atomic_long_read(&cache->ccc_lru_left));
467 mutex_unlock(&cache->ccc_max_cache_mb_lock);
469 seq_printf(m, "users: %d\n"
470 "max_cached_mb: %ld\n"
473 "reclaim_count: %u\n"
474 "max_read_ahead_mb: %lu\n"
475 "used_read_ahead_mb: %d\n",
476 atomic_read(&cache->ccc_users),
478 max_cached_mb - unused_mb,
480 cache->ccc_lru_shrinkers,
481 PAGES_TO_MiB(ra->ra_max_pages),
482 PAGES_TO_MiB(atomic_read(&ra->ra_cur_pages)));
486 static ssize_t ll_max_cached_mb_seq_write(struct file *file,
487 const char __user *buffer,
488 size_t count, loff_t *off)
490 struct seq_file *m = file->private_data;
491 struct super_block *sb = m->private;
492 struct ll_sb_info *sbi = ll_s2sbi(sb);
493 struct cl_client_cache *cache = sbi->ll_cache;
500 char kernbuf[128], *ptr;
503 if (count >= sizeof(kernbuf))
506 if (copy_from_user(kernbuf, buffer, count))
508 kernbuf[count] = '\0';
510 ptr = lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count);
511 rc = sysfs_memparse(ptr, count, &pages_number, "MiB");
515 pages_number >>= PAGE_SHIFT;
517 if (pages_number < 0 || pages_number > cfs_totalram_pages()) {
518 CERROR("%s: can't set max cache more than %lu MB\n",
520 PAGES_TO_MiB(cfs_totalram_pages()));
523 /* Allow enough cache so clients can make well-formed RPCs */
524 pages_number = max_t(long, pages_number, PTLRPC_MAX_BRW_PAGES);
526 mutex_lock(&cache->ccc_max_cache_mb_lock);
527 diff = pages_number - cache->ccc_lru_max;
529 /* easy - add more LRU slots. */
531 atomic_long_add(diff, &cache->ccc_lru_left);
535 env = cl_env_get(&refcheck);
537 GOTO(out_unlock, rc = PTR_ERR(env));
543 /* reduce LRU budget from free slots. */
545 long lru_left_old, lru_left_new, lru_left_ret;
547 lru_left_old = atomic_long_read(&cache->ccc_lru_left);
548 if (lru_left_old == 0)
551 lru_left_new = lru_left_old > diff ?
552 lru_left_old - diff : 0;
554 atomic_long_cmpxchg(&cache->ccc_lru_left,
557 if (likely(lru_left_old == lru_left_ret)) {
558 diff -= lru_left_old - lru_left_new;
559 nrpages += lru_left_old - lru_left_new;
567 if (sbi->ll_dt_exp == NULL) { /* being initialized */
572 /* Request extra free slots to avoid them all being used
573 * by other processes before this can continue shrinking.
575 tmp = diff + min_t(long, diff, MiB_TO_PAGES(1024));
576 /* difficult - have to ask OSCs to drop LRU slots. */
577 rc = obd_set_info_async(env, sbi->ll_dt_exp,
578 sizeof(KEY_CACHE_LRU_SHRINK),
579 KEY_CACHE_LRU_SHRINK,
580 sizeof(tmp), &tmp, NULL);
584 cl_env_put(env, &refcheck);
588 cache->ccc_lru_max = pages_number;
591 atomic_long_add(nrpages, &cache->ccc_lru_left);
594 mutex_unlock(&cache->ccc_max_cache_mb_lock);
597 LDEBUGFS_SEQ_FOPS(ll_max_cached_mb);
599 static ssize_t checksums_show(struct kobject *kobj, struct attribute *attr,
602 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
605 return scnprintf(buf, PAGE_SIZE, "%u\n",
606 test_bit(LL_SBI_CHECKSUM, sbi->ll_flags));
609 static ssize_t checksums_store(struct kobject *kobj, struct attribute *attr,
610 const char *buffer, size_t count)
612 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
622 rc = kstrtobool(buffer, &val);
626 set_bit(LL_SBI_CHECKSUM, sbi->ll_flags);
628 clear_bit(LL_SBI_CHECKSUM, sbi->ll_flags);
631 rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
632 KEY_CHECKSUM, sizeof(tmp), &tmp, NULL);
634 CWARN("Failed to set OSC checksum flags: %d\n", rc);
638 LUSTRE_RW_ATTR(checksums);
640 LUSTRE_ATTR(checksum_pages, 0644, checksums_show, checksums_store);
642 static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
643 enum stats_track_type type)
645 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
648 if (sbi->ll_stats_track_type == type)
649 return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
650 else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
651 return sprintf(buf, "0 (all)\n");
653 return sprintf(buf, "untracked\n");
656 static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
657 size_t count, enum stats_track_type type)
659 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
664 rc = kstrtoul(buffer, 10, &pid);
668 sbi->ll_stats_track_id = pid;
670 sbi->ll_stats_track_type = STATS_TRACK_ALL;
672 sbi->ll_stats_track_type = type;
673 lprocfs_clear_stats(sbi->ll_stats);
677 static ssize_t stats_track_pid_show(struct kobject *kobj,
678 struct attribute *attr,
681 return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
684 static ssize_t stats_track_pid_store(struct kobject *kobj,
685 struct attribute *attr,
689 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
691 LUSTRE_RW_ATTR(stats_track_pid);
693 static ssize_t stats_track_ppid_show(struct kobject *kobj,
694 struct attribute *attr,
697 return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
700 static ssize_t stats_track_ppid_store(struct kobject *kobj,
701 struct attribute *attr,
705 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
707 LUSTRE_RW_ATTR(stats_track_ppid);
709 static ssize_t stats_track_gid_show(struct kobject *kobj,
710 struct attribute *attr,
713 return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
716 static ssize_t stats_track_gid_store(struct kobject *kobj,
717 struct attribute *attr,
721 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
723 LUSTRE_RW_ATTR(stats_track_gid);
725 static ssize_t statahead_running_max_show(struct kobject *kobj,
726 struct attribute *attr,
729 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
732 return scnprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_sa_running_max);
735 static ssize_t statahead_running_max_store(struct kobject *kobj,
736 struct attribute *attr,
740 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
745 rc = kstrtoul(buffer, 0, &val);
749 if (val <= LL_SA_RUNNING_MAX) {
750 sbi->ll_sa_running_max = val;
754 CERROR("Bad statahead_running_max value %lu. Valid values "
755 "are in the range [0, %d]\n", val, LL_SA_RUNNING_MAX);
759 LUSTRE_RW_ATTR(statahead_running_max);
761 static ssize_t statahead_max_show(struct kobject *kobj,
762 struct attribute *attr,
765 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
768 return sprintf(buf, "%u\n", sbi->ll_sa_max);
771 static ssize_t statahead_max_store(struct kobject *kobj,
772 struct attribute *attr,
776 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
781 rc = kstrtoul(buffer, 0, &val);
785 if (val <= LL_SA_RPC_MAX)
786 sbi->ll_sa_max = val;
788 CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
793 LUSTRE_RW_ATTR(statahead_max);
795 static ssize_t statahead_agl_show(struct kobject *kobj,
796 struct attribute *attr,
799 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
802 return scnprintf(buf, PAGE_SIZE, "%u\n",
803 test_bit(LL_SBI_AGL_ENABLED, sbi->ll_flags));
806 static ssize_t statahead_agl_store(struct kobject *kobj,
807 struct attribute *attr,
811 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
816 rc = kstrtobool(buffer, &val);
821 set_bit(LL_SBI_AGL_ENABLED, sbi->ll_flags);
823 clear_bit(LL_SBI_AGL_ENABLED, sbi->ll_flags);
827 LUSTRE_RW_ATTR(statahead_agl);
829 static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
831 struct super_block *sb = m->private;
832 struct ll_sb_info *sbi = ll_s2sbi(sb);
834 seq_printf(m, "statahead total: %u\n"
835 "statahead wrong: %u\n"
837 atomic_read(&sbi->ll_sa_total),
838 atomic_read(&sbi->ll_sa_wrong),
839 atomic_read(&sbi->ll_agl_total));
843 LDEBUGFS_SEQ_FOPS_RO(ll_statahead_stats);
845 static ssize_t lazystatfs_show(struct kobject *kobj,
846 struct attribute *attr,
849 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
852 return scnprintf(buf, PAGE_SIZE, "%u\n",
853 test_bit(LL_SBI_LAZYSTATFS, sbi->ll_flags));
856 static ssize_t lazystatfs_store(struct kobject *kobj,
857 struct attribute *attr,
861 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
866 rc = kstrtobool(buffer, &val);
871 set_bit(LL_SBI_LAZYSTATFS, sbi->ll_flags);
873 clear_bit(LL_SBI_LAZYSTATFS, sbi->ll_flags);
877 LUSTRE_RW_ATTR(lazystatfs);
879 static ssize_t statfs_max_age_show(struct kobject *kobj, struct attribute *attr,
882 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
885 return scnprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_statfs_max_age);
888 static ssize_t statfs_max_age_store(struct kobject *kobj,
889 struct attribute *attr, const char *buffer,
892 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
897 rc = kstrtouint(buffer, 10, &val);
900 if (val > OBD_STATFS_CACHE_MAX_AGE)
903 sbi->ll_statfs_max_age = val;
907 LUSTRE_RW_ATTR(statfs_max_age);
909 static ssize_t max_easize_show(struct kobject *kobj,
910 struct attribute *attr,
913 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
918 rc = ll_get_max_mdsize(sbi, &ealen);
922 /* Limit xattr size returned to userspace based on kernel maximum */
923 return scnprintf(buf, PAGE_SIZE, "%u\n",
924 ealen > XATTR_SIZE_MAX ? XATTR_SIZE_MAX : ealen);
926 LUSTRE_RO_ATTR(max_easize);
929 * Get default_easize.
931 * \see client_obd::cl_default_mds_easize
933 * \param[in] m seq_file handle
934 * \param[in] v unused for single entry
936 * \retval 0 on success
937 * \retval negative negated errno on failure
939 static ssize_t default_easize_show(struct kobject *kobj,
940 struct attribute *attr,
943 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
948 rc = ll_get_default_mdsize(sbi, &ealen);
952 /* Limit xattr size returned to userspace based on kernel maximum */
953 return scnprintf(buf, PAGE_SIZE, "%u\n",
954 ealen > XATTR_SIZE_MAX ? XATTR_SIZE_MAX : ealen);
958 * Set default_easize.
960 * Range checking on the passed value is handled by
961 * ll_set_default_mdsize().
963 * \see client_obd::cl_default_mds_easize
965 * \param[in] file proc file
966 * \param[in] buffer string passed from user space
967 * \param[in] count \a buffer length
968 * \param[in] off unused for single entry
970 * \retval positive \a count on success
971 * \retval negative negated errno on failure
973 static ssize_t default_easize_store(struct kobject *kobj,
974 struct attribute *attr,
978 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
986 rc = kstrtouint(buffer, 10, &val);
990 rc = ll_set_default_mdsize(sbi, val);
996 LUSTRE_RW_ATTR(default_easize);
998 LDEBUGFS_SEQ_FOPS_RO(ll_sbi_flags);
1000 static ssize_t xattr_cache_show(struct kobject *kobj,
1001 struct attribute *attr,
1004 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1007 return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled);
1010 static ssize_t xattr_cache_store(struct kobject *kobj,
1011 struct attribute *attr,
1015 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1020 rc = kstrtobool(buffer, &val);
1024 if (val && !test_bit(LL_SBI_XATTR_CACHE, sbi->ll_flags))
1027 sbi->ll_xattr_cache_enabled = val;
1028 sbi->ll_xattr_cache_set = 1;
1032 LUSTRE_RW_ATTR(xattr_cache);
1034 static ssize_t tiny_write_show(struct kobject *kobj,
1035 struct attribute *attr,
1038 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1041 return scnprintf(buf, PAGE_SIZE, "%u\n",
1042 test_bit(LL_SBI_TINY_WRITE, sbi->ll_flags));
1045 static ssize_t tiny_write_store(struct kobject *kobj,
1046 struct attribute *attr,
1050 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1055 rc = kstrtobool(buffer, &val);
1059 spin_lock(&sbi->ll_lock);
1061 set_bit(LL_SBI_TINY_WRITE, sbi->ll_flags);
1063 clear_bit(LL_SBI_TINY_WRITE, sbi->ll_flags);
1064 spin_unlock(&sbi->ll_lock);
1068 LUSTRE_RW_ATTR(tiny_write);
1070 static ssize_t parallel_dio_show(struct kobject *kobj,
1071 struct attribute *attr,
1074 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1077 return snprintf(buf, PAGE_SIZE, "%u\n",
1078 test_bit(LL_SBI_PARALLEL_DIO, sbi->ll_flags));
1081 static ssize_t parallel_dio_store(struct kobject *kobj,
1082 struct attribute *attr,
1086 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1091 rc = kstrtobool(buffer, &val);
1095 spin_lock(&sbi->ll_lock);
1097 set_bit(LL_SBI_PARALLEL_DIO, sbi->ll_flags);
1099 clear_bit(LL_SBI_PARALLEL_DIO, sbi->ll_flags);
1100 spin_unlock(&sbi->ll_lock);
1104 LUSTRE_RW_ATTR(parallel_dio);
1106 static ssize_t max_read_ahead_async_active_show(struct kobject *kobj,
1107 struct attribute *attr,
1110 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1113 return scnprintf(buf, PAGE_SIZE, "%u\n",
1114 sbi->ll_ra_info.ra_async_max_active);
1117 static ssize_t max_read_ahead_async_active_store(struct kobject *kobj,
1118 struct attribute *attr,
1122 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1127 rc = kstrtouint(buffer, 10, &val);
1132 * It doesn't make any sense to make it exceed what
1133 * workqueue could acutally support. This can easily
1134 * over subscripe the cores but Lustre internally
1135 * throttles to avoid those impacts.
1137 if (val > WQ_UNBOUND_MAX_ACTIVE) {
1138 CERROR("%s: cannot set max_read_ahead_async_active=%u larger than %u\n",
1139 sbi->ll_fsname, val, WQ_UNBOUND_MAX_ACTIVE);
1143 spin_lock(&sbi->ll_lock);
1144 sbi->ll_ra_info.ra_async_max_active = val;
1145 spin_unlock(&sbi->ll_lock);
1149 LUSTRE_RW_ATTR(max_read_ahead_async_active);
1151 static ssize_t read_ahead_async_file_threshold_mb_show(struct kobject *kobj,
1152 struct attribute *attr,
1155 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1158 return scnprintf(buf, PAGE_SIZE, "%lu\n", PAGES_TO_MiB(
1159 sbi->ll_ra_info.ra_async_pages_per_file_threshold));
1163 read_ahead_async_file_threshold_mb_store(struct kobject *kobj,
1164 struct attribute *attr,
1165 const char *buffer, size_t count)
1167 unsigned long pages_number;
1168 unsigned long max_ra_per_file;
1169 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1173 rc = kstrtoul(buffer, 10, &pages_number);
1177 pages_number = MiB_TO_PAGES(pages_number);
1178 max_ra_per_file = sbi->ll_ra_info.ra_max_pages_per_file;
1179 if (pages_number < 0 || pages_number > max_ra_per_file) {
1180 CERROR("%s: can't set read_ahead_async_file_threshold_mb=%lu > "
1181 "max_read_readahead_per_file_mb=%lu\n", sbi->ll_fsname,
1182 PAGES_TO_MiB(pages_number),
1183 PAGES_TO_MiB(max_ra_per_file));
1186 sbi->ll_ra_info.ra_async_pages_per_file_threshold = pages_number;
1190 LUSTRE_RW_ATTR(read_ahead_async_file_threshold_mb);
1192 static ssize_t read_ahead_range_kb_show(struct kobject *kobj,
1193 struct attribute *attr,char *buf)
1195 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1198 return snprintf(buf, PAGE_SIZE, "%lu\n",
1199 sbi->ll_ra_info.ra_range_pages << (PAGE_SHIFT - 10));
1203 read_ahead_range_kb_store(struct kobject *kobj,
1204 struct attribute *attr,
1205 const char *buffer, size_t count)
1207 unsigned long pages_number;
1208 unsigned long max_ra_per_file;
1210 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1214 rc = sysfs_memparse(buffer, count, &val, "KiB");
1218 pages_number = val >> PAGE_SHIFT;
1219 /* Disable mmap range read */
1220 if (pages_number == 0)
1223 max_ra_per_file = sbi->ll_ra_info.ra_max_pages_per_file;
1224 if (pages_number > max_ra_per_file ||
1225 pages_number < RA_MIN_MMAP_RANGE_PAGES)
1229 spin_lock(&sbi->ll_lock);
1230 sbi->ll_ra_info.ra_range_pages = pages_number;
1231 spin_unlock(&sbi->ll_lock);
1235 LUSTRE_RW_ATTR(read_ahead_range_kb);
1237 static ssize_t fast_read_show(struct kobject *kobj,
1238 struct attribute *attr,
1241 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1244 return scnprintf(buf, PAGE_SIZE, "%u\n",
1245 test_bit(LL_SBI_FAST_READ, sbi->ll_flags));
1248 static ssize_t fast_read_store(struct kobject *kobj,
1249 struct attribute *attr,
1253 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1258 rc = kstrtobool(buffer, &val);
1262 spin_lock(&sbi->ll_lock);
1264 set_bit(LL_SBI_FAST_READ, sbi->ll_flags);
1266 clear_bit(LL_SBI_FAST_READ, sbi->ll_flags);
1267 spin_unlock(&sbi->ll_lock);
1271 LUSTRE_RW_ATTR(fast_read);
1273 static ssize_t file_heat_show(struct kobject *kobj,
1274 struct attribute *attr,
1277 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1280 return scnprintf(buf, PAGE_SIZE, "%u\n",
1281 test_bit(LL_SBI_FILE_HEAT, sbi->ll_flags));
1284 static ssize_t file_heat_store(struct kobject *kobj,
1285 struct attribute *attr,
1289 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1294 rc = kstrtobool(buffer, &val);
1298 spin_lock(&sbi->ll_lock);
1300 set_bit(LL_SBI_FILE_HEAT, sbi->ll_flags);
1302 clear_bit(LL_SBI_FILE_HEAT, sbi->ll_flags);
1303 spin_unlock(&sbi->ll_lock);
1307 LUSTRE_RW_ATTR(file_heat);
1309 static ssize_t heat_decay_percentage_show(struct kobject *kobj,
1310 struct attribute *attr,
1313 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1316 return scnprintf(buf, PAGE_SIZE, "%u\n",
1317 (sbi->ll_heat_decay_weight * 100 + 128) / 256);
1320 static ssize_t heat_decay_percentage_store(struct kobject *kobj,
1321 struct attribute *attr,
1325 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1330 rc = kstrtoul(buffer, 10, &val);
1334 if (val < 0 || val > 100)
1337 sbi->ll_heat_decay_weight = (val * 256 + 50) / 100;
1341 LUSTRE_RW_ATTR(heat_decay_percentage);
1343 static ssize_t heat_period_second_show(struct kobject *kobj,
1344 struct attribute *attr,
1347 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1350 return scnprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_heat_period_second);
1353 static ssize_t heat_period_second_store(struct kobject *kobj,
1354 struct attribute *attr,
1358 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1363 rc = kstrtoul(buffer, 10, &val);
1370 sbi->ll_heat_period_second = val;
1374 LUSTRE_RW_ATTR(heat_period_second);
1376 static ssize_t opencache_threshold_count_show(struct kobject *kobj,
1377 struct attribute *attr,
1380 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1383 if (sbi->ll_oc_thrsh_count)
1384 return snprintf(buf, PAGE_SIZE, "%u\n",
1385 sbi->ll_oc_thrsh_count);
1387 return snprintf(buf, PAGE_SIZE, "off\n");
1390 static ssize_t opencache_threshold_count_store(struct kobject *kobj,
1391 struct attribute *attr,
1395 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1400 rc = kstrtouint(buffer, 10, &val);
1403 /* also accept "off" to disable and "on" to always cache */
1404 rc = kstrtobool(buffer, &enable);
1409 sbi->ll_oc_thrsh_count = val;
1413 LUSTRE_RW_ATTR(opencache_threshold_count);
1415 static ssize_t opencache_threshold_ms_show(struct kobject *kobj,
1416 struct attribute *attr,
1419 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1422 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_oc_thrsh_ms);
1425 static ssize_t opencache_threshold_ms_store(struct kobject *kobj,
1426 struct attribute *attr,
1430 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1435 rc = kstrtouint(buffer, 10, &val);
1439 sbi->ll_oc_thrsh_ms = val;
1443 LUSTRE_RW_ATTR(opencache_threshold_ms);
1445 static ssize_t opencache_max_ms_show(struct kobject *kobj,
1446 struct attribute *attr,
1449 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1452 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_oc_max_ms);
1455 static ssize_t opencache_max_ms_store(struct kobject *kobj,
1456 struct attribute *attr,
1460 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1465 rc = kstrtouint(buffer, 10, &val);
1469 sbi->ll_oc_max_ms = val;
1473 LUSTRE_RW_ATTR(opencache_max_ms);
1475 static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
1477 struct super_block *sb = m->private;
1478 struct ll_sb_info *sbi = ll_s2sbi(sb);
1479 struct cl_client_cache *cache = sbi->ll_cache;
1483 pages = atomic_long_read(&cache->ccc_unstable_nr);
1484 mb = (pages * PAGE_SIZE) >> 20;
1486 seq_printf(m, "unstable_check: %8d\n"
1487 "unstable_pages: %12ld\n"
1488 "unstable_mb: %8d\n",
1489 cache->ccc_unstable_check, pages, mb);
1493 static ssize_t ll_unstable_stats_seq_write(struct file *file,
1494 const char __user *buffer,
1495 size_t count, loff_t *unused)
1497 struct seq_file *seq = file->private_data;
1498 struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)seq->private);
1505 if (count >= sizeof(kernbuf))
1508 if (copy_from_user(kernbuf, buffer, count))
1512 buffer += lprocfs_find_named_value(kernbuf, "unstable_check:", &count) -
1514 rc = kstrtobool_from_user(buffer, count, &val);
1518 /* borrow lru lock to set the value */
1519 spin_lock(&sbi->ll_cache->ccc_lru_lock);
1520 sbi->ll_cache->ccc_unstable_check = val;
1521 spin_unlock(&sbi->ll_cache->ccc_lru_lock);
1526 LDEBUGFS_SEQ_FOPS(ll_unstable_stats);
1528 static int ll_root_squash_seq_show(struct seq_file *m, void *v)
1530 struct super_block *sb = m->private;
1531 struct ll_sb_info *sbi = ll_s2sbi(sb);
1532 struct root_squash_info *squash = &sbi->ll_squash;
1534 seq_printf(m, "%u:%u\n", squash->rsi_uid, squash->rsi_gid);
1538 static ssize_t ll_root_squash_seq_write(struct file *file,
1539 const char __user *buffer,
1540 size_t count, loff_t *off)
1542 struct seq_file *m = file->private_data;
1543 struct super_block *sb = m->private;
1544 struct ll_sb_info *sbi = ll_s2sbi(sb);
1545 struct root_squash_info *squash = &sbi->ll_squash;
1547 return lprocfs_wr_root_squash(buffer, count, squash, sbi->ll_fsname);
1550 LDEBUGFS_SEQ_FOPS(ll_root_squash);
1552 static int ll_nosquash_nids_seq_show(struct seq_file *m, void *v)
1554 struct super_block *sb = m->private;
1555 struct ll_sb_info *sbi = ll_s2sbi(sb);
1556 struct root_squash_info *squash = &sbi->ll_squash;
1559 spin_lock(&squash->rsi_lock);
1560 if (!list_empty(&squash->rsi_nosquash_nids)) {
1561 len = cfs_print_nidlist(m->buf + m->count, m->size - m->count,
1562 &squash->rsi_nosquash_nids);
1566 seq_puts(m, "NONE\n");
1568 spin_unlock(&squash->rsi_lock);
1573 static ssize_t ll_nosquash_nids_seq_write(struct file *file,
1574 const char __user *buffer,
1575 size_t count, loff_t *off)
1577 struct seq_file *m = file->private_data;
1578 struct super_block *sb = m->private;
1579 struct ll_sb_info *sbi = ll_s2sbi(sb);
1580 struct root_squash_info *squash = &sbi->ll_squash;
1583 rc = lprocfs_wr_nosquash_nids(buffer, count, squash, sbi->ll_fsname);
1587 ll_compute_rootsquash_state(sbi);
1592 LDEBUGFS_SEQ_FOPS(ll_nosquash_nids);
1594 static int ll_pcc_seq_show(struct seq_file *m, void *v)
1596 struct super_block *sb = m->private;
1597 struct ll_sb_info *sbi = ll_s2sbi(sb);
1599 return pcc_super_dump(&sbi->ll_pcc_super, m);
1602 static ssize_t ll_pcc_seq_write(struct file *file, const char __user *buffer,
1603 size_t count, loff_t *off)
1605 struct seq_file *m = file->private_data;
1606 struct super_block *sb = m->private;
1607 struct ll_sb_info *sbi = ll_s2sbi(sb);
1611 if (count >= LPROCFS_WR_PCC_MAX_CMD)
1614 if (!(exp_connect_flags2(sbi->ll_md_exp) & OBD_CONNECT2_PCC))
1617 OBD_ALLOC(kernbuf, count + 1);
1618 if (kernbuf == NULL)
1621 if (copy_from_user(kernbuf, buffer, count))
1622 GOTO(out_free_kernbuff, rc = -EFAULT);
1624 rc = pcc_cmd_handle(kernbuf, count, &sbi->ll_pcc_super);
1626 OBD_FREE(kernbuf, count + 1);
1627 return rc ? rc : count;
1629 LDEBUGFS_SEQ_FOPS(ll_pcc);
1631 struct ldebugfs_vars lprocfs_llite_obd_vars[] = {
1633 .fops = &ll_site_stats_fops },
1634 { .name = "max_cached_mb",
1635 .fops = &ll_max_cached_mb_fops },
1636 { .name = "statahead_stats",
1637 .fops = &ll_statahead_stats_fops },
1638 { .name = "unstable_stats",
1639 .fops = &ll_unstable_stats_fops },
1640 { .name = "sbi_flags",
1641 .fops = &ll_sbi_flags_fops },
1642 { .name = "root_squash",
1643 .fops = &ll_root_squash_fops },
1644 { .name = "nosquash_nids",
1645 .fops = &ll_nosquash_nids_fops },
1647 .fops = &ll_pcc_fops, },
1651 #define MAX_STRING_SIZE 128
1653 static struct attribute *llite_attrs[] = {
1654 &lustre_attr_blocksize.attr,
1655 &lustre_attr_stat_blocksize.attr,
1656 &lustre_attr_kbytestotal.attr,
1657 &lustre_attr_kbytesfree.attr,
1658 &lustre_attr_kbytesavail.attr,
1659 &lustre_attr_filestotal.attr,
1660 &lustre_attr_filesfree.attr,
1661 &lustre_attr_client_type.attr,
1662 &lustre_attr_foreign_symlink_enable.attr,
1663 &lustre_attr_foreign_symlink_prefix.attr,
1664 &lustre_attr_foreign_symlink_upcall.attr,
1665 &lustre_attr_foreign_symlink_upcall_info.attr,
1666 &lustre_attr_fstype.attr,
1667 &lustre_attr_uuid.attr,
1668 &lustre_attr_checksums.attr,
1669 &lustre_attr_checksum_pages.attr,
1670 &lustre_attr_max_read_ahead_mb.attr,
1671 &lustre_attr_max_read_ahead_per_file_mb.attr,
1672 &lustre_attr_max_read_ahead_whole_mb.attr,
1673 &lustre_attr_max_read_ahead_async_active.attr,
1674 &lustre_attr_read_ahead_async_file_threshold_mb.attr,
1675 &lustre_attr_read_ahead_range_kb.attr,
1676 &lustre_attr_stats_track_pid.attr,
1677 &lustre_attr_stats_track_ppid.attr,
1678 &lustre_attr_stats_track_gid.attr,
1679 &lustre_attr_statahead_running_max.attr,
1680 &lustre_attr_statahead_max.attr,
1681 &lustre_attr_statahead_agl.attr,
1682 &lustre_attr_lazystatfs.attr,
1683 &lustre_attr_statfs_max_age.attr,
1684 &lustre_attr_max_easize.attr,
1685 &lustre_attr_default_easize.attr,
1686 &lustre_attr_xattr_cache.attr,
1687 &lustre_attr_fast_read.attr,
1688 &lustre_attr_tiny_write.attr,
1689 &lustre_attr_parallel_dio.attr,
1690 &lustre_attr_file_heat.attr,
1691 &lustre_attr_heat_decay_percentage.attr,
1692 &lustre_attr_heat_period_second.attr,
1693 &lustre_attr_opencache_threshold_count.attr,
1694 &lustre_attr_opencache_threshold_ms.attr,
1695 &lustre_attr_opencache_max_ms.attr,
1699 static void sbi_kobj_release(struct kobject *kobj)
1701 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1703 complete(&sbi->ll_kobj_unregister);
1706 static struct kobj_type sbi_ktype = {
1707 .default_attrs = llite_attrs,
1708 .sysfs_ops = &lustre_sysfs_ops,
1709 .release = sbi_kobj_release,
1712 static const struct llite_file_opcode {
1716 } llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
1717 /* file operation */
1718 { LPROC_LL_READ_BYTES, LPROCFS_TYPE_BYTES_FULL, "read_bytes" },
1719 { LPROC_LL_WRITE_BYTES, LPROCFS_TYPE_BYTES_FULL, "write_bytes" },
1720 { LPROC_LL_READ, LPROCFS_TYPE_LATENCY, "read" },
1721 { LPROC_LL_WRITE, LPROCFS_TYPE_LATENCY, "write" },
1722 { LPROC_LL_IOCTL, LPROCFS_TYPE_REQS, "ioctl" },
1723 { LPROC_LL_OPEN, LPROCFS_TYPE_LATENCY, "open" },
1724 { LPROC_LL_RELEASE, LPROCFS_TYPE_LATENCY, "close" },
1725 { LPROC_LL_MMAP, LPROCFS_TYPE_LATENCY, "mmap" },
1726 { LPROC_LL_FAULT, LPROCFS_TYPE_LATENCY, "page_fault" },
1727 { LPROC_LL_MKWRITE, LPROCFS_TYPE_LATENCY, "page_mkwrite" },
1728 { LPROC_LL_LLSEEK, LPROCFS_TYPE_LATENCY, "seek" },
1729 { LPROC_LL_FSYNC, LPROCFS_TYPE_LATENCY, "fsync" },
1730 { LPROC_LL_READDIR, LPROCFS_TYPE_LATENCY, "readdir" },
1731 { LPROC_LL_INODE_OCOUNT,LPROCFS_TYPE_REQS |
1732 LPROCFS_CNTR_AVGMINMAX |
1733 LPROCFS_CNTR_STDDEV, "opencount" },
1734 { LPROC_LL_INODE_OPCLTM,LPROCFS_TYPE_LATENCY, "openclosetime" },
1735 /* inode operation */
1736 { LPROC_LL_SETATTR, LPROCFS_TYPE_LATENCY, "setattr" },
1737 { LPROC_LL_TRUNC, LPROCFS_TYPE_LATENCY, "truncate" },
1738 { LPROC_LL_FLOCK, LPROCFS_TYPE_LATENCY, "flock" },
1739 { LPROC_LL_GETATTR, LPROCFS_TYPE_LATENCY, "getattr" },
1740 { LPROC_LL_FALLOCATE, LPROCFS_TYPE_LATENCY, "fallocate"},
1741 /* dir inode operation */
1742 { LPROC_LL_CREATE, LPROCFS_TYPE_LATENCY, "create" },
1743 { LPROC_LL_LINK, LPROCFS_TYPE_LATENCY, "link" },
1744 { LPROC_LL_UNLINK, LPROCFS_TYPE_LATENCY, "unlink" },
1745 { LPROC_LL_SYMLINK, LPROCFS_TYPE_LATENCY, "symlink" },
1746 { LPROC_LL_MKDIR, LPROCFS_TYPE_LATENCY, "mkdir" },
1747 { LPROC_LL_RMDIR, LPROCFS_TYPE_LATENCY, "rmdir" },
1748 { LPROC_LL_MKNOD, LPROCFS_TYPE_LATENCY, "mknod" },
1749 { LPROC_LL_RENAME, LPROCFS_TYPE_LATENCY, "rename" },
1750 /* special inode operation */
1751 { LPROC_LL_STATFS, LPROCFS_TYPE_LATENCY, "statfs" },
1752 { LPROC_LL_SETXATTR, LPROCFS_TYPE_LATENCY, "setxattr" },
1753 { LPROC_LL_GETXATTR, LPROCFS_TYPE_LATENCY, "getxattr" },
1754 { LPROC_LL_GETXATTR_HITS, LPROCFS_TYPE_REQS, "getxattr_hits" },
1755 { LPROC_LL_LISTXATTR, LPROCFS_TYPE_LATENCY, "listxattr" },
1756 { LPROC_LL_REMOVEXATTR, LPROCFS_TYPE_LATENCY, "removexattr" },
1757 { LPROC_LL_INODE_PERM, LPROCFS_TYPE_LATENCY, "inode_permission" },
1760 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, long count)
1765 if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
1766 lprocfs_counter_add(sbi->ll_stats, op, count);
1767 else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
1768 sbi->ll_stats_track_id == current->pid)
1769 lprocfs_counter_add(sbi->ll_stats, op, count);
1770 else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
1771 sbi->ll_stats_track_id == current->real_parent->pid)
1772 lprocfs_counter_add(sbi->ll_stats, op, count);
1773 else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
1774 sbi->ll_stats_track_id ==
1775 from_kgid(&init_user_ns, current_gid()))
1776 lprocfs_counter_add(sbi->ll_stats, op, count);
1778 EXPORT_SYMBOL(ll_stats_ops_tally);
1780 static const char *const ra_stat_string[] = {
1781 [RA_STAT_HIT] = "hits",
1782 [RA_STAT_MISS] = "misses",
1783 [RA_STAT_DISTANT_READPAGE] = "readpage_not_consecutive",
1784 [RA_STAT_MISS_IN_WINDOW] = "miss_inside_window",
1785 [RA_STAT_FAILED_GRAB_PAGE] = "failed_grab_cache_page",
1786 [RA_STAT_FAILED_MATCH] = "failed_lock_match",
1787 [RA_STAT_DISCARDED] = "read_but_discarded",
1788 [RA_STAT_ZERO_LEN] = "zero_length_file",
1789 [RA_STAT_ZERO_WINDOW] = "zero_size_window",
1790 [RA_STAT_EOF] = "readahead_to_eof",
1791 [RA_STAT_MAX_IN_FLIGHT] = "hit_max_readahead_issue",
1792 [RA_STAT_WRONG_GRAB_PAGE] = "wrong_page_from_grab_cache_page",
1793 [RA_STAT_FAILED_REACH_END] = "failed_to_reach_end",
1794 [RA_STAT_ASYNC] = "async_readahead",
1795 [RA_STAT_FAILED_FAST_READ] = "failed_to_fast_read",
1796 [RA_STAT_MMAP_RANGE_READ] = "mmap_range_read",
1799 int ll_debugfs_register_super(struct super_block *sb, const char *name)
1801 struct lustre_sb_info *lsi = s2lsi(sb);
1802 struct ll_sb_info *sbi = ll_s2sbi(sb);
1808 if (IS_ERR_OR_NULL(llite_root))
1811 sbi->ll_debugfs_entry = debugfs_create_dir(name, llite_root);
1812 ldebugfs_add_vars(sbi->ll_debugfs_entry, lprocfs_llite_obd_vars, sb);
1814 debugfs_create_file("dump_page_cache", 0444, sbi->ll_debugfs_entry, sbi,
1815 &vvp_dump_pgcache_file_ops);
1817 debugfs_create_file("extents_stats", 0644, sbi->ll_debugfs_entry, sbi,
1818 &ll_rw_extents_stats_fops);
1820 debugfs_create_file("extents_stats_per_process", 0644,
1821 sbi->ll_debugfs_entry, sbi,
1822 &ll_rw_extents_stats_pp_fops);
1824 debugfs_create_file("offset_stats", 0644, sbi->ll_debugfs_entry, sbi,
1825 &ll_rw_offset_stats_fops);
1827 /* File operations stats */
1828 sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
1829 LPROCFS_STATS_FLAG_NONE);
1830 if (sbi->ll_stats == NULL)
1831 GOTO(out_debugfs, err = -ENOMEM);
1833 /* do counter init */
1834 for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
1835 u32 type = llite_opcode_table[id].type;
1836 void *ptr = "unknown";
1838 if (type & LPROCFS_TYPE_REQS)
1840 else if (type & LPROCFS_TYPE_BYTES)
1842 else if (type & LPROCFS_TYPE_USEC)
1844 lprocfs_counter_init(sbi->ll_stats,
1845 llite_opcode_table[id].opcode, type,
1846 llite_opcode_table[id].opname, ptr);
1849 debugfs_create_file("stats", 0644, sbi->ll_debugfs_entry,
1850 sbi->ll_stats, &ldebugfs_stats_seq_fops);
1852 sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
1853 LPROCFS_STATS_FLAG_NONE);
1854 if (sbi->ll_ra_stats == NULL)
1855 GOTO(out_stats, err = -ENOMEM);
1857 for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
1858 lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
1859 ra_stat_string[id], "pages");
1861 debugfs_create_file("read_ahead_stats", 0644, sbi->ll_debugfs_entry,
1862 sbi->ll_ra_stats, &ldebugfs_stats_seq_fops);
1865 /* Yes we also register sysfs mount kset here as well */
1866 sbi->ll_kset.kobj.parent = llite_kobj;
1867 sbi->ll_kset.kobj.ktype = &sbi_ktype;
1868 init_completion(&sbi->ll_kobj_unregister);
1869 err = kobject_set_name(&sbi->ll_kset.kobj, "%s", name);
1871 GOTO(out_ra_stats, err);
1873 err = kset_register(&sbi->ll_kset);
1875 GOTO(out_ra_stats, err);
1877 lsi->lsi_kobj = kobject_get(&sbi->ll_kset.kobj);
1881 lprocfs_free_stats(&sbi->ll_ra_stats);
1883 lprocfs_free_stats(&sbi->ll_stats);
1885 debugfs_remove_recursive(sbi->ll_debugfs_entry);
1890 void ll_debugfs_unregister_super(struct super_block *sb)
1892 struct lustre_sb_info *lsi = s2lsi(sb);
1893 struct ll_sb_info *sbi = ll_s2sbi(sb);
1895 debugfs_remove_recursive(sbi->ll_debugfs_entry);
1898 sysfs_remove_link(&sbi->ll_kset.kobj,
1899 sbi->ll_dt_obd->obd_type->typ_name);
1902 sysfs_remove_link(&sbi->ll_kset.kobj,
1903 sbi->ll_md_obd->obd_type->typ_name);
1905 kobject_put(lsi->lsi_kobj);
1907 kset_unregister(&sbi->ll_kset);
1908 wait_for_completion(&sbi->ll_kobj_unregister);
1910 lprocfs_free_stats(&sbi->ll_ra_stats);
1911 lprocfs_free_stats(&sbi->ll_stats);
1913 #undef MAX_STRING_SIZE
1915 static void ll_display_extents_info(struct ll_rw_extents_info *rw_extents,
1916 struct seq_file *seq, int which)
1918 unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
1919 unsigned long start, end, r, w;
1920 char *unitp = "KMGTPEZY";
1922 struct per_process_info *pp_info;
1924 pp_info = &rw_extents->pp_extents[which];
1929 for (i = 0; i < LL_HIST_MAX; i++) {
1930 read_tot += pp_info->pp_r_hist.oh_buckets[i];
1931 write_tot += pp_info->pp_w_hist.oh_buckets[i];
1934 for (i = 0; i < LL_HIST_MAX; i++) {
1935 r = pp_info->pp_r_hist.oh_buckets[i];
1936 w = pp_info->pp_w_hist.oh_buckets[i];
1939 end = 1 << (i + LL_HIST_START - units);
1940 seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4u %4u | "
1941 "%14lu %4u %4u\n", start, *unitp, end, *unitp,
1942 (i == LL_HIST_MAX - 1) ? '+' : ' ',
1943 r, pct(r, read_tot), pct(read_cum, read_tot),
1944 w, pct(w, write_tot), pct(write_cum, write_tot));
1946 if (start == (1 << 10)) {
1951 if (read_cum == read_tot && write_cum == write_tot)
1956 static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
1958 struct ll_sb_info *sbi = seq->private;
1959 struct ll_rw_extents_info *rw_extents = sbi->ll_rw_extents_info;
1962 if (!sbi->ll_rw_stats_on || !rw_extents) {
1963 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
1967 spin_lock(&sbi->ll_pp_extent_lock);
1968 lprocfs_stats_header(seq, ktime_get(), rw_extents->pp_init, 25, ":", 1);
1969 seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
1970 seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
1971 "extents", "calls", "%", "cum%", "calls", "%", "cum%");
1973 for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
1974 if (rw_extents->pp_extents[k].pid != 0) {
1975 seq_printf(seq, "\nPID: %d\n",
1976 rw_extents->pp_extents[k].pid);
1977 ll_display_extents_info(rw_extents, seq, k);
1980 spin_unlock(&sbi->ll_pp_extent_lock);
1984 static int alloc_rw_stats_info(struct ll_sb_info *sbi)
1986 struct ll_rw_extents_info *rw_extents;
1987 struct ll_rw_process_info *offset;
1988 struct ll_rw_process_info *process;
1991 OBD_ALLOC(rw_extents, sizeof(*rw_extents));
1995 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
1996 spin_lock_init(&rw_extents->pp_extents[i].pp_r_hist.oh_lock);
1997 spin_lock_init(&rw_extents->pp_extents[i].pp_w_hist.oh_lock);
2000 spin_lock(&sbi->ll_pp_extent_lock);
2001 if (!sbi->ll_rw_extents_info)
2002 sbi->ll_rw_extents_info = rw_extents;
2003 spin_unlock(&sbi->ll_pp_extent_lock);
2004 /* another writer allocated the struct before we got the lock */
2005 if (sbi->ll_rw_extents_info != rw_extents)
2006 OBD_FREE(rw_extents, sizeof(*rw_extents));
2008 OBD_ALLOC(process, sizeof(*process) * LL_PROCESS_HIST_MAX);
2010 GOTO(out, rc = -ENOMEM);
2011 OBD_ALLOC(offset, sizeof(*offset) * LL_OFFSET_HIST_MAX);
2013 GOTO(out_free, rc = -ENOMEM);
2015 spin_lock(&sbi->ll_process_lock);
2016 if (!sbi->ll_rw_process_info)
2017 sbi->ll_rw_process_info = process;
2018 if (!sbi->ll_rw_offset_info)
2019 sbi->ll_rw_offset_info = offset;
2020 spin_unlock(&sbi->ll_process_lock);
2022 /* another writer allocated the structs before we got the lock */
2023 if (sbi->ll_rw_offset_info != offset)
2024 OBD_FREE(offset, sizeof(*offset) * LL_OFFSET_HIST_MAX);
2025 if (sbi->ll_rw_process_info != process) {
2027 OBD_FREE(process, sizeof(*process) * LL_PROCESS_HIST_MAX);
2034 void ll_free_rw_stats_info(struct ll_sb_info *sbi)
2036 if (sbi->ll_rw_extents_info) {
2037 OBD_FREE(sbi->ll_rw_extents_info,
2038 sizeof(*sbi->ll_rw_extents_info));
2039 sbi->ll_rw_extents_info = NULL;
2041 if (sbi->ll_rw_offset_info) {
2042 OBD_FREE(sbi->ll_rw_offset_info,
2043 sizeof(*sbi->ll_rw_offset_info) * LL_OFFSET_HIST_MAX);
2044 sbi->ll_rw_offset_info = NULL;
2046 if (sbi->ll_rw_process_info) {
2047 OBD_FREE(sbi->ll_rw_process_info,
2048 sizeof(*sbi->ll_rw_process_info) * LL_PROCESS_HIST_MAX);
2049 sbi->ll_rw_process_info = NULL;
2053 static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
2054 const char __user *buf,
2055 size_t len, loff_t *off)
2057 struct seq_file *seq = file->private_data;
2058 struct ll_sb_info *sbi = seq->private;
2059 struct ll_rw_extents_info *rw_extents;
2066 value = ll_stats_pid_write(buf, len);
2069 sbi->ll_rw_stats_on = 0;
2071 if (!sbi->ll_rw_extents_info) {
2072 int rc = alloc_rw_stats_info(sbi);
2077 sbi->ll_rw_stats_on = 1;
2081 spin_lock(&sbi->ll_pp_extent_lock);
2082 rw_extents = sbi->ll_rw_extents_info;
2084 rw_extents->pp_init = ktime_get();
2085 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
2086 rw_extents->pp_extents[i].pid = 0;
2087 lprocfs_oh_clear(&rw_extents->pp_extents[i].pp_r_hist);
2088 lprocfs_oh_clear(&rw_extents->pp_extents[i].pp_w_hist);
2091 spin_unlock(&sbi->ll_pp_extent_lock);
2096 LDEBUGFS_SEQ_FOPS(ll_rw_extents_stats_pp);
2098 static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
2100 struct ll_sb_info *sbi = seq->private;
2101 struct ll_rw_extents_info *rw_extents = sbi->ll_rw_extents_info;
2103 if (!sbi->ll_rw_stats_on || !rw_extents) {
2104 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
2108 spin_lock(&sbi->ll_lock);
2109 lprocfs_stats_header(seq, ktime_get(), rw_extents->pp_init, 25, ":", 1);
2111 seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
2112 seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
2113 "extents", "calls", "%", "cum%",
2114 "calls", "%", "cum%");
2116 ll_display_extents_info(rw_extents, seq, LL_PROCESS_HIST_MAX);
2117 spin_unlock(&sbi->ll_lock);
2122 static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
2123 const char __user *buf,
2124 size_t len, loff_t *off)
2126 struct seq_file *seq = file->private_data;
2127 struct ll_sb_info *sbi = seq->private;
2128 struct ll_rw_extents_info *rw_extents;
2135 value = ll_stats_pid_write(buf, len);
2138 sbi->ll_rw_stats_on = 0;
2140 if (!sbi->ll_rw_extents_info) {
2141 int rc = alloc_rw_stats_info(sbi);
2146 sbi->ll_rw_stats_on = 1;
2149 spin_lock(&sbi->ll_pp_extent_lock);
2150 rw_extents = sbi->ll_rw_extents_info;
2152 rw_extents->pp_init = ktime_get();
2153 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
2154 rw_extents->pp_extents[i].pid = 0;
2155 lprocfs_oh_clear(&rw_extents->pp_extents[i].pp_r_hist);
2156 lprocfs_oh_clear(&rw_extents->pp_extents[i].pp_w_hist);
2159 spin_unlock(&sbi->ll_pp_extent_lock);
2164 LDEBUGFS_SEQ_FOPS(ll_rw_extents_stats);
2166 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
2167 struct ll_file_data *file, loff_t pos,
2168 size_t count, int rw)
2171 struct ll_rw_process_info *process;
2172 struct ll_rw_process_info *offset;
2173 int *off_count = &sbi->ll_rw_offset_entry_count;
2174 int *process_count = &sbi->ll_offset_process_count;
2175 struct ll_rw_extents_info *rw_extents;
2177 if (!sbi->ll_rw_stats_on)
2180 spin_lock(&sbi->ll_pp_extent_lock);
2181 rw_extents = sbi->ll_rw_extents_info;
2183 spin_unlock(&sbi->ll_pp_extent_lock);
2187 /* Extent statistics */
2188 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
2189 if (rw_extents->pp_extents[i].pid == pid) {
2197 sbi->ll_extent_process_count =
2198 (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
2199 cur = sbi->ll_extent_process_count;
2200 rw_extents->pp_extents[cur].pid = pid;
2201 lprocfs_oh_clear(&rw_extents->pp_extents[cur].pp_r_hist);
2202 lprocfs_oh_clear(&rw_extents->pp_extents[cur].pp_w_hist);
2205 for (i = 0; (count >= 1 << (LL_HIST_START + i)) &&
2206 (i < (LL_HIST_MAX - 1)); i++);
2208 rw_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
2209 rw_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
2211 rw_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
2212 rw_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
2214 spin_unlock(&sbi->ll_pp_extent_lock);
2216 spin_lock(&sbi->ll_process_lock);
2217 process = sbi->ll_rw_process_info;
2218 offset = sbi->ll_rw_offset_info;
2219 if (!process || !offset)
2222 /* Offset statistics */
2223 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
2224 if (process[i].rw_pid == pid) {
2225 if (process[i].rw_last_file != file) {
2226 process[i].rw_range_start = pos;
2227 process[i].rw_last_file_pos = pos + count;
2228 process[i].rw_smallest_extent = count;
2229 process[i].rw_largest_extent = count;
2230 process[i].rw_offset = 0;
2231 process[i].rw_last_file = file;
2234 if (process[i].rw_last_file_pos != pos) {
2236 (*off_count + 1) % LL_OFFSET_HIST_MAX;
2237 offset[*off_count].rw_op = process[i].rw_op;
2238 offset[*off_count].rw_pid = pid;
2239 offset[*off_count].rw_range_start =
2240 process[i].rw_range_start;
2241 offset[*off_count].rw_range_end =
2242 process[i].rw_last_file_pos;
2243 offset[*off_count].rw_smallest_extent =
2244 process[i].rw_smallest_extent;
2245 offset[*off_count].rw_largest_extent =
2246 process[i].rw_largest_extent;
2247 offset[*off_count].rw_offset =
2248 process[i].rw_offset;
2249 process[i].rw_op = rw;
2250 process[i].rw_range_start = pos;
2251 process[i].rw_smallest_extent = count;
2252 process[i].rw_largest_extent = count;
2253 process[i].rw_offset = pos -
2254 process[i].rw_last_file_pos;
2256 if (process[i].rw_smallest_extent > count)
2257 process[i].rw_smallest_extent = count;
2258 if (process[i].rw_largest_extent < count)
2259 process[i].rw_largest_extent = count;
2260 process[i].rw_last_file_pos = pos + count;
2264 *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
2265 process[*process_count].rw_pid = pid;
2266 process[*process_count].rw_op = rw;
2267 process[*process_count].rw_range_start = pos;
2268 process[*process_count].rw_last_file_pos = pos + count;
2269 process[*process_count].rw_smallest_extent = count;
2270 process[*process_count].rw_largest_extent = count;
2271 process[*process_count].rw_offset = 0;
2272 process[*process_count].rw_last_file = file;
2275 spin_unlock(&sbi->ll_process_lock);
2278 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
2280 struct ll_sb_info *sbi = seq->private;
2281 struct ll_rw_process_info *offset;
2282 struct ll_rw_process_info *process;
2285 if (!sbi->ll_rw_stats_on) {
2286 seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
2290 spin_lock(&sbi->ll_process_lock);
2291 lprocfs_stats_header(seq, ktime_get(), sbi->ll_process_stats_init, 25,
2293 seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
2294 "R/W", "PID", "RANGE START", "RANGE END",
2295 "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
2297 /* We stored the discontiguous offsets here; print them first */
2298 offset = sbi->ll_rw_offset_info;
2299 for (i = 0; offset && i < LL_OFFSET_HIST_MAX; i++) {
2300 if (offset[i].rw_pid != 0)
2302 "%3c %10d %14llu %14llu %17lu %17lu %14lld\n",
2303 offset[i].rw_op == READ ? 'R' : 'W',
2305 offset[i].rw_range_start,
2306 offset[i].rw_range_end,
2307 (unsigned long)offset[i].rw_smallest_extent,
2308 (unsigned long)offset[i].rw_largest_extent,
2309 offset[i].rw_offset);
2312 /* Then print the current offsets for each process */
2313 process = sbi->ll_rw_process_info;
2314 for (i = 0; process && i < LL_PROCESS_HIST_MAX; i++) {
2315 if (process[i].rw_pid != 0)
2317 "%3c %10d %14llu %14llu %17lu %17lu %14lld\n",
2318 process[i].rw_op == READ ? 'R' : 'W',
2320 process[i].rw_range_start,
2321 process[i].rw_last_file_pos,
2322 (unsigned long)process[i].rw_smallest_extent,
2323 (unsigned long)process[i].rw_largest_extent,
2324 process[i].rw_offset);
2326 spin_unlock(&sbi->ll_process_lock);
2331 static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
2332 const char __user *buf,
2333 size_t len, loff_t *off)
2335 struct seq_file *seq = file->private_data;
2336 struct ll_sb_info *sbi = seq->private;
2342 value = ll_stats_pid_write(buf, len);
2345 sbi->ll_rw_stats_on = 0;
2347 if (!sbi->ll_rw_process_info || !sbi->ll_rw_offset_info) {
2348 int rc = alloc_rw_stats_info(sbi);
2353 sbi->ll_rw_stats_on = 1;
2356 spin_lock(&sbi->ll_process_lock);
2357 sbi->ll_offset_process_count = 0;
2358 sbi->ll_rw_offset_entry_count = 0;
2359 sbi->ll_process_stats_init = ktime_get();
2360 if (sbi->ll_rw_process_info)
2361 memset(sbi->ll_rw_process_info, 0,
2362 sizeof(struct ll_rw_process_info) * LL_PROCESS_HIST_MAX);
2363 if (sbi->ll_rw_offset_info)
2364 memset(sbi->ll_rw_offset_info, 0,
2365 sizeof(struct ll_rw_process_info) * LL_OFFSET_HIST_MAX);
2366 spin_unlock(&sbi->ll_process_lock);
2371 LDEBUGFS_SEQ_FOPS(ll_rw_offset_stats);