*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*/
#define DEBUG_SUBSYSTEM S_LLITE
goto free_kobj;
llite_root = debugfs_create_dir("llite", debugfs_lustre_root);
- if (IS_ERR_OR_NULL(llite_root)) {
- rc = llite_root ? PTR_ERR(llite_root) : -ENOMEM;
- llite_root = NULL;
+ return 0;
+
free_kobj:
- kobject_put(llite_kobj);
- llite_kobj = NULL;
- }
+ kobject_put(llite_kobj);
+ llite_kobj = NULL;
return rc;
}
}
LUSTRE_RO_ATTR(client_type);
+LUSTRE_RW_ATTR(foreign_symlink_enable);
+
+LUSTRE_RW_ATTR(foreign_symlink_prefix);
+
+LUSTRE_RW_ATTR(foreign_symlink_upcall);
+
+LUSTRE_WO_ATTR(foreign_symlink_upcall_info);
+
static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
{
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- unsigned long ra_max_mb;
-
- spin_lock(&sbi->ll_lock);
- ra_max_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages);
- spin_unlock(&sbi->ll_lock);
- return snprintf(buf, PAGE_SIZE, "%lu\n", ra_max_mb);
+ return scnprintf(buf, PAGE_SIZE, "%lu\n",
+ PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages));
}
static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
{
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- unsigned long ra_max_file_mb;
- spin_lock(&sbi->ll_lock);
- ra_max_file_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages_per_file);
- spin_unlock(&sbi->ll_lock);
-
- return snprintf(buf, PAGE_SIZE, "%lu\n", ra_max_file_mb);
+ return scnprintf(buf, PAGE_SIZE, "%lu\n",
+ PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages_per_file));
}
static ssize_t max_read_ahead_per_file_mb_store(struct kobject *kobj,
{
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- unsigned long ra_max_whole_mb;
-
- spin_lock(&sbi->ll_lock);
- ra_max_whole_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_read_ahead_whole_pages);
- spin_unlock(&sbi->ll_lock);
- return snprintf(buf, PAGE_SIZE, "%lu\n", ra_max_whole_mb);
+ return scnprintf(buf, PAGE_SIZE, "%lu\n",
+ PAGES_TO_MiB(sbi->ll_ra_info.ra_max_read_ahead_whole_pages));
}
static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
struct super_block *sb = m->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = sbi->ll_cache;
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
long max_cached_mb;
long unused_mb;
max_cached_mb = PAGES_TO_MiB(cache->ccc_lru_max);
unused_mb = PAGES_TO_MiB(atomic_long_read(&cache->ccc_lru_left));
mutex_unlock(&cache->ccc_max_cache_mb_lock);
+
seq_printf(m, "users: %d\n"
"max_cached_mb: %ld\n"
"used_mb: %ld\n"
"unused_mb: %ld\n"
- "reclaim_count: %u\n",
+ "reclaim_count: %u\n"
+ "max_read_ahead_mb: %lu\n"
+ "used_read_ahead_mb: %d\n",
atomic_read(&cache->ccc_users),
max_cached_mb,
max_cached_mb - unused_mb,
unused_mb,
- cache->ccc_lru_shrinkers);
+ cache->ccc_lru_shrinkers,
+ PAGES_TO_MiB(ra->ra_max_pages),
+ PAGES_TO_MiB(atomic_read(&ra->ra_cur_pages)));
return 0;
}
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- return snprintf(buf, 16, "%u\n", sbi->ll_sa_running_max);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_sa_running_max);
}
static ssize_t statahead_running_max_store(struct kobject *kobj,
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- return snprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_statfs_max_age);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_statfs_max_age);
}
static ssize_t statfs_max_age_store(struct kobject *kobj,
return rc;
/* Limit xattr size returned to userspace based on kernel maximum */
- return snprintf(buf, PAGE_SIZE, "%u\n",
- ealen > XATTR_SIZE_MAX ? XATTR_SIZE_MAX : ealen);
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ ealen > XATTR_SIZE_MAX ? XATTR_SIZE_MAX : ealen);
}
LUSTRE_RO_ATTR(max_easize);
return rc;
/* Limit xattr size returned to userspace based on kernel maximum */
- return snprintf(buf, PAGE_SIZE, "%u\n",
- ealen > XATTR_SIZE_MAX ? XATTR_SIZE_MAX : ealen);
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ ealen > XATTR_SIZE_MAX ? XATTR_SIZE_MAX : ealen);
}
/**
static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
{
- const char *str[] = LL_SBI_FLAGS;
+ const char *const str[] = LL_SBI_FLAGS;
struct super_block *sb = m->private;
int flags = ll_s2sbi(sb)->ll_flags;
int i = 0;
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- return snprintf(buf, PAGE_SIZE, "%u\n",
- sbi->ll_ra_info.ra_async_max_active);
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ sbi->ll_ra_info.ra_async_max_active);
}
static ssize_t max_read_ahead_async_active_store(struct kobject *kobj,
if (rc)
return rc;
- if (val < 1 || val > WQ_UNBOUND_MAX_ACTIVE) {
- CERROR("%s: cannot set max_read_ahead_async_active=%u %s than %u\n",
- sbi->ll_fsname, val,
- val < 1 ? "smaller" : "larger",
- val < 1 ? 1 : WQ_UNBOUND_MAX_ACTIVE);
+ /**
+ * It doesn't make any sense to make it exceed what
+ * workqueue could acutally support. This can easily
+ * over subscripe the cores but Lustre internally
+ * throttles to avoid those impacts.
+ */
+ if (val > WQ_UNBOUND_MAX_ACTIVE) {
+ CERROR("%s: cannot set max_read_ahead_async_active=%u larger than %u\n",
+ sbi->ll_fsname, val, WQ_UNBOUND_MAX_ACTIVE);
return -ERANGE;
}
spin_lock(&sbi->ll_lock);
sbi->ll_ra_info.ra_async_max_active = val;
spin_unlock(&sbi->ll_lock);
- workqueue_set_max_active(sbi->ll_ra_info.ll_readahead_wq, val);
return count;
}
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- return snprintf(buf, PAGE_SIZE, "%lu\n",
- PAGES_TO_MiB(sbi->ll_ra_info.ra_async_pages_per_file_threshold));
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", PAGES_TO_MiB(
+ sbi->ll_ra_info.ra_async_pages_per_file_threshold));
}
static ssize_t
}
LUSTRE_RW_ATTR(read_ahead_async_file_threshold_mb);
+static ssize_t read_ahead_range_kb_show(struct kobject *kobj,
+ struct attribute *attr,char *buf)
+{
+ struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
+ ll_kset.kobj);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n",
+ sbi->ll_ra_info.ra_range_pages << (PAGE_SHIFT - 10));
+}
+
+static ssize_t
+read_ahead_range_kb_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned long pages_number;
+ unsigned long max_ra_per_file;
+ u64 val;
+ struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
+ ll_kset.kobj);
+ int rc;
+
+ rc = sysfs_memparse(buffer, count, &val, "KiB");
+ if (rc < 0)
+ return rc;
+
+ pages_number = val >> PAGE_SHIFT;
+ /* Disable mmap range read */
+ if (pages_number == 0)
+ goto out;
+
+ max_ra_per_file = sbi->ll_ra_info.ra_max_pages_per_file;
+ if (pages_number > max_ra_per_file ||
+ pages_number < RA_MIN_MMAP_RANGE_PAGES)
+ return -ERANGE;
+
+out:
+ spin_lock(&sbi->ll_lock);
+ sbi->ll_ra_info.ra_range_pages = pages_number;
+ spin_unlock(&sbi->ll_lock);
+
+ return count;
+}
+LUSTRE_RW_ATTR(read_ahead_range_kb);
+
static ssize_t fast_read_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- return snprintf(buf, PAGE_SIZE, "%u\n",
- !!(sbi->ll_flags & LL_SBI_FILE_HEAT));
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ !!(sbi->ll_flags & LL_SBI_FILE_HEAT));
}
static ssize_t file_heat_store(struct kobject *kobj,
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- return snprintf(buf, PAGE_SIZE, "%u\n",
- (sbi->ll_heat_decay_weight * 100 + 128) / 256);
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (sbi->ll_heat_decay_weight * 100 + 128) / 256);
}
static ssize_t heat_decay_percentage_store(struct kobject *kobj,
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- return snprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_heat_period_second);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sbi->ll_heat_period_second);
}
static ssize_t heat_period_second_store(struct kobject *kobj,
OBD_FREE(kernbuf, count + 1);
return rc ? rc : count;
}
-LPROC_SEQ_FOPS(ll_pcc);
+LDEBUGFS_SEQ_FOPS(ll_pcc);
-struct lprocfs_vars lprocfs_llite_obd_vars[] = {
+struct ldebugfs_vars lprocfs_llite_obd_vars[] = {
{ .name = "site",
.fops = &ll_site_stats_fops },
{ .name = "max_cached_mb",
&lustre_attr_filestotal.attr,
&lustre_attr_filesfree.attr,
&lustre_attr_client_type.attr,
+ &lustre_attr_foreign_symlink_enable.attr,
+ &lustre_attr_foreign_symlink_prefix.attr,
+ &lustre_attr_foreign_symlink_upcall.attr,
+ &lustre_attr_foreign_symlink_upcall_info.attr,
&lustre_attr_fstype.attr,
&lustre_attr_uuid.attr,
&lustre_attr_checksums.attr,
&lustre_attr_max_read_ahead_whole_mb.attr,
&lustre_attr_max_read_ahead_async_active.attr,
&lustre_attr_read_ahead_async_file_threshold_mb.attr,
+ &lustre_attr_read_ahead_range_kb.attr,
&lustre_attr_stats_track_pid.attr,
&lustre_attr_stats_track_ppid.attr,
&lustre_attr_stats_track_gid.attr,
.release = sbi_kobj_release,
};
-#define LPROCFS_TYPE_LATENCY \
- (LPROCFS_TYPE_USEC | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV)
static const struct llite_file_opcode {
__u32 opcode;
__u32 type;
const char *opname;
} llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
/* file operation */
- { LPROC_LL_READ_BYTES, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
- "read_bytes" },
- { LPROC_LL_WRITE_BYTES, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
- "write_bytes" },
+ { LPROC_LL_READ_BYTES, LPROCFS_TYPE_BYTES_FULL, "read_bytes" },
+ { LPROC_LL_WRITE_BYTES, LPROCFS_TYPE_BYTES_FULL, "write_bytes" },
{ LPROC_LL_READ, LPROCFS_TYPE_LATENCY, "read" },
{ LPROC_LL_WRITE, LPROCFS_TYPE_LATENCY, "write" },
{ LPROC_LL_IOCTL, LPROCFS_TYPE_REQS, "ioctl" },
{ LPROC_LL_TRUNC, LPROCFS_TYPE_LATENCY, "truncate" },
{ LPROC_LL_FLOCK, LPROCFS_TYPE_LATENCY, "flock" },
{ LPROC_LL_GETATTR, LPROCFS_TYPE_LATENCY, "getattr" },
+ { LPROC_LL_FALLOCATE, LPROCFS_TYPE_LATENCY, "fallocate"},
/* dir inode operation */
{ LPROC_LL_CREATE, LPROCFS_TYPE_LATENCY, "create" },
{ LPROC_LL_LINK, LPROCFS_TYPE_LATENCY, "link" },
}
EXPORT_SYMBOL(ll_stats_ops_tally);
-static const char *ra_stat_string[] = {
- [RA_STAT_HIT] = "hits",
- [RA_STAT_MISS] = "misses",
- [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
- [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
- [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
- [RA_STAT_FAILED_MATCH] = "failed lock match",
- [RA_STAT_DISCARDED] = "read but discarded",
- [RA_STAT_ZERO_LEN] = "zero length file",
- [RA_STAT_ZERO_WINDOW] = "zero size window",
- [RA_STAT_EOF] = "read-ahead to EOF",
- [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
- [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
- [RA_STAT_FAILED_REACH_END] = "failed to reach end",
- [RA_STAT_ASYNC] = "async readahead",
- [RA_STAT_FAILED_FAST_READ] = "failed to fast read",
+static const char *const ra_stat_string[] = {
+ [RA_STAT_HIT] = "hits",
+ [RA_STAT_MISS] = "misses",
+ [RA_STAT_DISTANT_READPAGE] = "readpage_not_consecutive",
+ [RA_STAT_MISS_IN_WINDOW] = "miss_inside_window",
+ [RA_STAT_FAILED_GRAB_PAGE] = "failed_grab_cache_page",
+ [RA_STAT_FAILED_MATCH] = "failed_lock_match",
+ [RA_STAT_DISCARDED] = "read_but_discarded",
+ [RA_STAT_ZERO_LEN] = "zero_length_file",
+ [RA_STAT_ZERO_WINDOW] = "zero_size_window",
+ [RA_STAT_EOF] = "readahead_to_eof",
+ [RA_STAT_MAX_IN_FLIGHT] = "hit_max_readahead_issue",
+ [RA_STAT_WRONG_GRAB_PAGE] = "wrong_page_from_grab_cache_page",
+ [RA_STAT_FAILED_REACH_END] = "failed_to_reach_end",
+ [RA_STAT_ASYNC] = "async_readahead",
+ [RA_STAT_FAILED_FAST_READ] = "failed_to_fast_read",
+ [RA_STAT_MMAP_RANGE_READ] = "mmap_range_read",
};
int ll_debugfs_register_super(struct super_block *sb, const char *name)
{
struct lustre_sb_info *lsi = s2lsi(sb);
struct ll_sb_info *sbi = ll_s2sbi(sb);
- int err, id, rc;
+ int err, id;
ENTRY;
LASSERT(sbi);
if (IS_ERR_OR_NULL(llite_root))
goto out_ll_kset;
- sbi->ll_debugfs_entry = ldebugfs_register(name, llite_root,
- lprocfs_llite_obd_vars, sb);
- if (IS_ERR_OR_NULL(sbi->ll_debugfs_entry)) {
- err = sbi->ll_debugfs_entry ? PTR_ERR(sbi->ll_debugfs_entry) :
- -ENOMEM;
- sbi->ll_debugfs_entry = NULL;
- RETURN(err);
- }
+ sbi->ll_debugfs_entry = debugfs_create_dir(name, llite_root);
+ ldebugfs_add_vars(sbi->ll_debugfs_entry, lprocfs_llite_obd_vars, sb);
- rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "dump_page_cache",0444,
- &vvp_dump_pgcache_file_ops, sbi);
- if (rc)
- CWARN("Error adding the dump_page_cache file\n");
+ debugfs_create_file("dump_page_cache", 0444, sbi->ll_debugfs_entry, sbi,
+ &vvp_dump_pgcache_file_ops);
- rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "extents_stats", 0644,
- &ll_rw_extents_stats_fops, sbi);
- if (rc)
- CWARN("Error adding the extent_stats file\n");
+ debugfs_create_file("extents_stats", 0644, sbi->ll_debugfs_entry, sbi,
+ &ll_rw_extents_stats_fops);
- rc = ldebugfs_seq_create(sbi->ll_debugfs_entry,
- "extents_stats_per_process", 0644,
- &ll_rw_extents_stats_pp_fops, sbi);
- if (rc)
- CWARN("Error adding the extents_stats_per_process file\n");
+ debugfs_create_file("extents_stats_per_process", 0644,
+ sbi->ll_debugfs_entry, sbi,
+ &ll_rw_extents_stats_pp_fops);
- rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "offset_stats", 0644,
- &ll_rw_offset_stats_fops, sbi);
- if (rc)
- CWARN("Error adding the offset_stats file\n");
+ debugfs_create_file("offset_stats", 0644, sbi->ll_debugfs_entry, sbi,
+ &ll_rw_offset_stats_fops);
/* File operations stats */
sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
/* do counter init */
for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
u32 type = llite_opcode_table[id].type;
- void *ptr = NULL;
+ void *ptr = "unknown";
if (type & LPROCFS_TYPE_REQS)
ptr = "reqs";
else if (type & LPROCFS_TYPE_BYTES)
ptr = "bytes";
- else if (type & LPROCFS_TYPE_PAGES)
- ptr = "pages";
else if (type & LPROCFS_TYPE_USEC)
ptr = "usec";
lprocfs_counter_init(sbi->ll_stats,
- llite_opcode_table[id].opcode,
- (type & LPROCFS_CNTR_AVGMINMAX),
+ llite_opcode_table[id].opcode, type,
llite_opcode_table[id].opname, ptr);
}
- err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats",
- sbi->ll_stats);
- if (err)
- GOTO(out_stats, err);
+ debugfs_create_file("stats", 0644, sbi->ll_debugfs_entry,
+ sbi->ll_stats, &ldebugfs_stats_seq_fops);
sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
LPROCFS_STATS_FLAG_NONE);
lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
ra_stat_string[id], "pages");
- err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats",
- sbi->ll_ra_stats);
- if (err)
- GOTO(out_ra_stats, err);
+ debugfs_create_file("read_ahead_stats", 0644, sbi->ll_debugfs_entry,
+ sbi->ll_ra_stats, &ldebugfs_stats_seq_fops);
out_ll_kset:
/* Yes we also register sysfs mount kset here as well */
write_tot += pp_info->pp_w_hist.oh_buckets[i];
}
- for(i = 0; i < LL_HIST_MAX; i++) {
- r = pp_info->pp_r_hist.oh_buckets[i];
- w = pp_info->pp_w_hist.oh_buckets[i];
- read_cum += r;
- write_cum += w;
- end = BIT(i + LL_HIST_START - units);
+ for(i = 0; i < LL_HIST_MAX; i++) {
+ r = pp_info->pp_r_hist.oh_buckets[i];
+ w = pp_info->pp_w_hist.oh_buckets[i];
+ read_cum += r;
+ write_cum += w;
+ end = 1 << (i + LL_HIST_START - units);
seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4u %4u | "
"%14lu %4u %4u\n", start, *unitp, end, *unitp,
- (i == LL_HIST_MAX - 1) ? '+' : ' ',
- r, pct(r, read_tot), pct(read_cum, read_tot),
- w, pct(w, write_tot), pct(write_cum, write_tot));
- start = end;
- if (start == BIT(10)) {
- start = 1;
- units += 10;
- unitp++;
- }
- if (read_cum == read_tot && write_cum == write_tot)
- break;
- }
+ (i == LL_HIST_MAX - 1) ? '+' : ' ',
+ r, pct(r, read_tot), pct(read_cum, read_tot),
+ w, pct(w, write_tot), pct(write_cum, write_tot));
+ start = end;
+ if (start == (1 << 10)) {
+ start = 1;
+ units += 10;
+ unitp++;
+ }
+ if (read_cum == read_tot && write_cum == write_tot)
+ break;
+ }
}
static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
}
- for (i = 0; (count >= BIT(LL_HIST_START + i)) &&
+ for (i = 0; (count >= 1 << (LL_HIST_START + i)) &&
(i < (LL_HIST_MAX - 1)); i++);
if (rw == 0) {
io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;