}
LUSTRE_RO_ATTR(client_type);
+LUSTRE_RW_ATTR(foreign_symlink_enable);
+
+LUSTRE_RW_ATTR(foreign_symlink_prefix);
+
+LUSTRE_RW_ATTR(foreign_symlink_upcall);
+
+LUSTRE_WO_ATTR(foreign_symlink_upcall_info);
+
static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct super_block *sb = m->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = sbi->ll_cache;
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
long max_cached_mb;
long unused_mb;
max_cached_mb = PAGES_TO_MiB(cache->ccc_lru_max);
unused_mb = PAGES_TO_MiB(atomic_long_read(&cache->ccc_lru_left));
mutex_unlock(&cache->ccc_max_cache_mb_lock);
+
seq_printf(m, "users: %d\n"
"max_cached_mb: %ld\n"
"used_mb: %ld\n"
"unused_mb: %ld\n"
- "reclaim_count: %u\n",
+ "reclaim_count: %u\n"
+ "max_read_ahead_mb: %lu\n"
+ "used_read_ahead_mb: %d\n",
atomic_read(&cache->ccc_users),
max_cached_mb,
max_cached_mb - unused_mb,
unused_mb,
- cache->ccc_lru_shrinkers);
+ cache->ccc_lru_shrinkers,
+ PAGES_TO_MiB(ra->ra_max_pages),
+ PAGES_TO_MiB(atomic_read(&ra->ra_cur_pages)));
return 0;
}
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- return snprintf(buf, 16, "%u\n", sbi->ll_sa_running_max);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_sa_running_max);
}
static ssize_t statahead_running_max_store(struct kobject *kobj,
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- return snprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_statfs_max_age);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_statfs_max_age);
}
static ssize_t statfs_max_age_store(struct kobject *kobj,
return rc;
/* Limit xattr size returned to userspace based on kernel maximum */
- return snprintf(buf, PAGE_SIZE, "%u\n",
- ealen > XATTR_SIZE_MAX ? XATTR_SIZE_MAX : ealen);
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ ealen > XATTR_SIZE_MAX ? XATTR_SIZE_MAX : ealen);
}
LUSTRE_RO_ATTR(max_easize);
return rc;
/* Limit xattr size returned to userspace based on kernel maximum */
- return snprintf(buf, PAGE_SIZE, "%u\n",
- ealen > XATTR_SIZE_MAX ? XATTR_SIZE_MAX : ealen);
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ ealen > XATTR_SIZE_MAX ? XATTR_SIZE_MAX : ealen);
}
/**
static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
{
- const char *str[] = LL_SBI_FLAGS;
+ const char *const str[] = LL_SBI_FLAGS;
struct super_block *sb = m->private;
int flags = ll_s2sbi(sb)->ll_flags;
int i = 0;
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- return snprintf(buf, PAGE_SIZE, "%u\n",
- sbi->ll_ra_info.ra_async_max_active);
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ sbi->ll_ra_info.ra_async_max_active);
}
static ssize_t max_read_ahead_async_active_store(struct kobject *kobj,
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- return snprintf(buf, PAGE_SIZE, "%lu\n",
- PAGES_TO_MiB(sbi->ll_ra_info.ra_async_pages_per_file_threshold));
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", PAGES_TO_MiB(
+ sbi->ll_ra_info.ra_async_pages_per_file_threshold));
}
static ssize_t
}
LUSTRE_RW_ATTR(read_ahead_async_file_threshold_mb);
+static ssize_t read_ahead_range_kb_show(struct kobject *kobj,
+ struct attribute *attr,char *buf)
+{
+ struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
+ ll_kset.kobj);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n",
+ sbi->ll_ra_info.ra_range_pages << (PAGE_SHIFT - 10));
+}
+
+static ssize_t
+read_ahead_range_kb_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned long pages_number;
+ unsigned long max_ra_per_file;
+ u64 val;
+ struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
+ ll_kset.kobj);
+ int rc;
+
+ rc = sysfs_memparse(buffer, count, &val, "KiB");
+ if (rc < 0)
+ return rc;
+
+ pages_number = val >> PAGE_SHIFT;
+ /* Disable mmap range read */
+ if (pages_number == 0)
+ goto out;
+
+ max_ra_per_file = sbi->ll_ra_info.ra_max_pages_per_file;
+ if (pages_number > max_ra_per_file ||
+ pages_number < RA_MIN_MMAP_RANGE_PAGES)
+ return -ERANGE;
+
+out:
+ spin_lock(&sbi->ll_lock);
+ sbi->ll_ra_info.ra_range_pages = pages_number;
+ spin_unlock(&sbi->ll_lock);
+
+ return count;
+}
+LUSTRE_RW_ATTR(read_ahead_range_kb);
+
static ssize_t fast_read_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- return snprintf(buf, PAGE_SIZE, "%u\n",
- !!(sbi->ll_flags & LL_SBI_FILE_HEAT));
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ !!(sbi->ll_flags & LL_SBI_FILE_HEAT));
}
static ssize_t file_heat_store(struct kobject *kobj,
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- return snprintf(buf, PAGE_SIZE, "%u\n",
- (sbi->ll_heat_decay_weight * 100 + 128) / 256);
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (sbi->ll_heat_decay_weight * 100 + 128) / 256);
}
static ssize_t heat_decay_percentage_store(struct kobject *kobj,
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- return snprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_heat_period_second);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_heat_period_second);
}
static ssize_t heat_period_second_store(struct kobject *kobj,
&lustre_attr_filestotal.attr,
&lustre_attr_filesfree.attr,
&lustre_attr_client_type.attr,
+ &lustre_attr_foreign_symlink_enable.attr,
+ &lustre_attr_foreign_symlink_prefix.attr,
+ &lustre_attr_foreign_symlink_upcall.attr,
+ &lustre_attr_foreign_symlink_upcall_info.attr,
&lustre_attr_fstype.attr,
&lustre_attr_uuid.attr,
&lustre_attr_checksums.attr,
&lustre_attr_max_read_ahead_whole_mb.attr,
&lustre_attr_max_read_ahead_async_active.attr,
&lustre_attr_read_ahead_async_file_threshold_mb.attr,
+ &lustre_attr_read_ahead_range_kb.attr,
&lustre_attr_stats_track_pid.attr,
&lustre_attr_stats_track_ppid.attr,
&lustre_attr_stats_track_gid.attr,
}
EXPORT_SYMBOL(ll_stats_ops_tally);
-static const char *ra_stat_string[] = {
+static const char *const ra_stat_string[] = {
[RA_STAT_HIT] = "hits",
[RA_STAT_MISS] = "misses",
[RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
[RA_STAT_FAILED_REACH_END] = "failed to reach end",
[RA_STAT_ASYNC] = "async readahead",
[RA_STAT_FAILED_FAST_READ] = "failed to fast read",
+ [RA_STAT_MMAP_RANGE_READ] = "mmap range read",
};
int ll_debugfs_register_super(struct super_block *sb, const char *name)