};
struct brw_stats {
- struct obd_histogram hist[BRW_LAST];
+ ktime_t bs_init;
+ struct obd_histogram bs_hist[BRW_LAST];
};
enum {
};
struct rename_stats {
- struct obd_histogram hist[RENAME_LAST];
+ ktime_t rs_init;
+ struct obd_histogram rs_hist[RENAME_LAST];
};
/* An lprocfs counter can be configured using the enum bit masks below.
/* 1 + the biggest cpu # whose ls_percpu slot has been allocated */
unsigned short ls_biggest_alloc_num;
enum lprocfs_stats_flags ls_flags;
+ ktime_t ls_init;
/* Lock used when there are no percpu stats areas; For percpu stats,
* it is used to protect ls_biggest_alloc_num change */
spinlock_t ls_lock;
struct cfs_hash *ojs_hash; /* hash of jobids */
struct list_head ojs_list; /* list of job_stat structs */
rwlock_t ojs_lock; /* protect ojs_list/js_list */
- unsigned int ojs_cleanup_interval;/* seconds before expiry */
- time64_t ojs_last_cleanup; /* previous cleanup time */
+ ktime_t ojs_cleanup_interval;/* 1/2 expiry seconds */
+ ktime_t ojs_cleanup_last;/* previous cleanup time */
cntr_init_callback ojs_cntr_init_fn;/* lprocfs_stats initializer */
unsigned short ojs_cntr_num; /* number of stats in struct */
bool ojs_cleaning; /* currently expiring stats */
extern int lprocfs_obd_seq_create(struct obd_device *obd, const char *name,
mode_t mode, const struct proc_ops *seq_fops,
void *data);
+extern void lprocfs_stats_header(struct seq_file *seq, ktime_t now,
+ ktime_t ts_init, int width, const char *colon,
+ bool show_units);
/* Generic callbacks */
extern int lprocfs_uuid_seq_show(struct seq_file *m, void *data);
/* Write stats is actually protected by client_obd's lock. */
struct osc_stats {
+ ktime_t os_init;
uint64_t os_lockless_writes; /* by bytes */
uint64_t os_lockless_reads; /* by bytes */
} od_stats;
u32 cl_max_pages_per_rpc;
u32 cl_max_rpcs_in_flight;
u32 cl_max_short_io_bytes;
+ ktime_t cl_stats_init;
struct obd_histogram cl_read_rpc_hist;
struct obd_histogram cl_write_rpc_hist;
struct obd_histogram cl_read_page_hist;
__u16 cl_close_rpcs_in_flight;
wait_queue_head_t cl_mod_rpcs_waitq;
unsigned long *cl_mod_tag_bitmap;
+ ktime_t cl_mod_rpcs_init;
struct obd_histogram cl_mod_rpcs_hist;
- /* mgc datastruct */
+ /* mgc datastruct */
struct mutex cl_mgc_mutex;
struct local_oid_storage *cl_mgc_los;
struct dt_object *cl_mgc_configs_dir;
#define LL_HIST_START 12 /* buckets start at 2^12 = 4k */
#define LL_PROCESS_HIST_MAX 10
struct per_process_info {
- pid_t pid;
- struct obd_histogram pp_r_hist;
- struct obd_histogram pp_w_hist;
+ pid_t pid;
+ struct obd_histogram pp_r_hist;
+ struct obd_histogram pp_w_hist;
};
/* pp_extents[LL_PROCESS_HIST_MAX] will hold the combined process info */
struct ll_rw_extents_info {
- struct per_process_info pp_extents[LL_PROCESS_HIST_MAX + 1];
+ ktime_t pp_init;
+ struct per_process_info pp_extents[LL_PROCESS_HIST_MAX + 1];
};
#define LL_OFFSET_HIST_MAX 100
struct lu_site *ll_site;
struct cl_device *ll_cl;
+
/* Statistics */
struct ll_rw_extents_info ll_rw_extents_info;
- int ll_extent_process_count;
+ int ll_extent_process_count;
+ unsigned int ll_offset_process_count;
+ ktime_t ll_process_stats_init;
struct ll_rw_process_info ll_rw_process_info[LL_PROCESS_HIST_MAX];
- unsigned int ll_offset_process_count;
struct ll_rw_process_info ll_rw_offset_info[LL_OFFSET_HIST_MAX];
- unsigned int ll_rw_offset_entry_count;
- int ll_stats_track_id;
+ unsigned int ll_rw_offset_entry_count;
+ int ll_stats_track_id;
enum stats_track_type ll_stats_track_type;
- int ll_rw_stats_on;
+ int ll_rw_stats_on;
/* metadata stat-ahead */
unsigned int ll_sa_running_max;/* max concurrent
static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
{
- struct timespec64 now;
struct ll_sb_info *sbi = seq->private;
struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
int k;
- ktime_get_real_ts64(&now);
-
if (!sbi->ll_rw_stats_on) {
seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
return 0;
}
- seq_printf(seq, "snapshot_time: %llu.%09lu (secs.nsecs)\n",
- (s64)now.tv_sec, now.tv_nsec);
- seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
- seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
- "extents", "calls", "%", "cum%",
- "calls", "%", "cum%");
+ lprocfs_stats_header(seq, ktime_get(), io_extents->pp_init, 25, ":", 1);
+ seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
+ seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
+ "extents", "calls", "%", "cum%",
+ "calls", "%", "cum%");
spin_lock(&sbi->ll_pp_extent_lock);
for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
if (io_extents->pp_extents[k].pid != 0) {
sbi->ll_rw_stats_on = 1;
spin_lock(&sbi->ll_pp_extent_lock);
+ io_extents->pp_init = ktime_get();
for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
io_extents->pp_extents[i].pid = 0;
lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
{
- struct timespec64 now;
struct ll_sb_info *sbi = seq->private;
struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
- ktime_get_real_ts64(&now);
-
if (!sbi->ll_rw_stats_on) {
seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
return 0;
}
- seq_printf(seq, "snapshot_time: %llu.%09lu (secs.nsecs)\n",
- (s64)now.tv_sec, now.tv_nsec);
+
+ lprocfs_stats_header(seq, ktime_get(), io_extents->pp_init, 25, ":", 1);
seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
sbi->ll_rw_stats_on = 1;
spin_lock(&sbi->ll_pp_extent_lock);
+ io_extents->pp_init = ktime_get();
for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
io_extents->pp_extents[i].pid = 0;
lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
{
- struct timespec64 now;
struct ll_sb_info *sbi = seq->private;
struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
struct ll_rw_process_info *process = sbi->ll_rw_process_info;
int i;
- ktime_get_real_ts64(&now);
-
if (!sbi->ll_rw_stats_on) {
seq_puts(seq, "disabled\n write anything to this file to activate, then '0' or 'disable' to deactivate\n");
return 0;
}
+
spin_lock(&sbi->ll_process_lock);
+ lprocfs_stats_header(seq, ktime_get(), sbi->ll_process_stats_init, 25,
+ ":", true);
- seq_printf(seq, "snapshot_time: %llu.%09lu (secs.nsecs)\n",
- (s64)now.tv_sec, now.tv_nsec);
seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
"R/W", "PID", "RANGE START", "RANGE END",
"SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
spin_lock(&sbi->ll_process_lock);
sbi->ll_offset_process_count = 0;
sbi->ll_rw_offset_entry_count = 0;
+ sbi->ll_process_stats_init = ktime_get();
memset(process_info, 0, sizeof(struct ll_rw_process_info) *
LL_PROCESS_HIST_MAX);
memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
static int mdc_stats_seq_show(struct seq_file *seq, void *v)
{
- struct timespec64 now;
struct obd_device *obd = seq->private;
struct osc_stats *stats = &obd2osc_dev(obd)->od_stats;
- ktime_get_real_ts64(&now);
-
- seq_printf(seq, "snapshot_time: %lld.%09lu (secs.nsecs)\n",
- (s64)now.tv_sec, now.tv_nsec);
+ lprocfs_stats_header(seq, ktime_get(), stats->os_init, 25, ":", true);
seq_printf(seq, "lockless_write_bytes\t\t%llu\n",
stats->os_lockless_writes);
seq_printf(seq, "lockless_read_bytes\t\t%llu\n",
struct osc_stats *stats = &obd2osc_dev(obd)->od_stats;
memset(stats, 0, sizeof(*stats));
+ stats->os_init = ktime_get();
+
return len;
}
LPROC_SEQ_FOPS(mdc_stats);
/**
* The rename stats output would be YAML formats, like
* rename_stats:
- * - snapshot_time: 1234567890.123456
+ * - snapshot_time: 1234567890.123456789
+ * - start_time: 1234567880.987654321
+ * - elapsed_time: 9.135802468
* - same_dir:
* 4kB: { samples: 1230, pct: 33, cum_pct: 45 }
* 8kB: { samples: 1242, pct: 33, cum_pct: 78 }
**/
static void display_rename_stats(struct seq_file *seq, char *name,
- struct obd_histogram *hist)
-{
- unsigned long tot, t, cum = 0;
- int i;
-
- tot = lprocfs_oh_sum(hist);
- if (tot > 0)
- seq_printf(seq, "- %-15s\n", name);
- /* dir size start from 4K, start i from 10(2^10) here */
- for (i = 0; i < OBD_HIST_MAX; i++) {
- t = hist->oh_buckets[i];
- cum += t;
- if (cum == 0)
- continue;
-
- if (i < 10)
- seq_printf(seq, "%6s%d%s", " ", 1<< i, "bytes:");
- else if (i < 20)
- seq_printf(seq, "%6s%d%s", " ", 1<<(i-10), "KB:");
- else
- seq_printf(seq, "%6s%d%s", " ", 1<<(i-20), "MB:");
+ struct obd_histogram *rs_hist)
+{
+ unsigned long tot, t, cum = 0;
+ int i;
+
+ tot = lprocfs_oh_sum(rs_hist);
+ if (tot > 0)
+ seq_printf(seq, "- %s\n", name);
+
+ for (i = 0; i < OBD_HIST_MAX; i++) {
+ t = rs_hist->oh_buckets[i];
+ cum += t;
+ if (cum == 0)
+ continue;
+
+ if (i < 10)
+ seq_printf(seq, "%6s%d%s", " ", 1 << i, "bytes:");
+ else if (i < 20)
+ seq_printf(seq, "%6s%d%s", " ", 1 << (i - 10), "KB:");
+ else
+ seq_printf(seq, "%6s%d%s", " ", 1 << (i - 20), "MB:");
seq_printf(seq, " { sample: %3lu, pct: %3u, cum_pct: %3u }\n",
t, pct(t, tot), pct(cum, tot));
- if (cum == tot)
- break;
- }
+ if (cum == tot)
+ break;
+ }
}
static void rename_stats_show(struct seq_file *seq,
- struct rename_stats *rename_stats)
+ struct rename_stats *rename_stats)
{
- struct timespec64 now;
-
/* this sampling races with updates */
- ktime_get_real_ts64(&now);
- seq_printf(seq, "rename_stats:\n");
- seq_printf(seq, "- %-15s %llu.%9lu\n", "snapshot_time:",
- (s64)now.tv_sec, now.tv_nsec);
+ seq_puts(seq, "rename_stats:\n- ");
+ lprocfs_stats_header(seq, ktime_get(), rename_stats->rs_init, 15, ":",
+ false);
- display_rename_stats(seq, "same_dir",
- &rename_stats->hist[RENAME_SAMEDIR_SIZE]);
- display_rename_stats(seq, "crossdir_src",
- &rename_stats->hist[RENAME_CROSSDIR_SRC_SIZE]);
- display_rename_stats(seq, "crossdir_tgt",
- &rename_stats->hist[RENAME_CROSSDIR_TGT_SIZE]);
+ display_rename_stats(seq, "same_dir",
+ &rename_stats->rs_hist[RENAME_SAMEDIR_SIZE]);
+ display_rename_stats(seq, "crossdir_src",
+ &rename_stats->rs_hist[RENAME_CROSSDIR_SRC_SIZE]);
+ display_rename_stats(seq, "crossdir_tgt",
+ &rename_stats->rs_hist[RENAME_CROSSDIR_TGT_SIZE]);
}
static int mdt_rename_stats_seq_show(struct seq_file *seq, void *v)
{
- struct mdt_device *mdt = seq->private;
+ struct mdt_device *mdt = seq->private;
- rename_stats_show(seq, &mdt->mdt_rename_stats);
+ rename_stats_show(seq, &mdt->mdt_rename_stats);
- return 0;
+ return 0;
}
static ssize_t
mdt_rename_stats_seq_write(struct file *file, const char __user *buf,
size_t len, loff_t *off)
{
- struct seq_file *seq = file->private_data;
- struct mdt_device *mdt = seq->private;
- int i;
+ struct seq_file *seq = file->private_data;
+ struct mdt_device *mdt = seq->private;
+ int i;
- for (i = 0; i < RENAME_LAST; i++)
- lprocfs_oh_clear(&mdt->mdt_rename_stats.hist[i]);
+ for (i = 0; i < RENAME_LAST; i++)
+ lprocfs_oh_clear(&mdt->mdt_rename_stats.rs_hist[i]);
+ mdt->mdt_rename_stats.rs_init = ktime_get();
- return len;
+ return len;
}
LPROC_SEQ_FOPS(mdt_rename_stats);
int i;
for (i = 0; i < RENAME_LAST; i++)
- spin_lock_init(&mdt->mdt_rename_stats.hist[i].oh_lock);
+ spin_lock_init(&mdt->mdt_rename_stats.rs_hist[i].oh_lock);
return lprocfs_obd_seq_create(mdt2obd_dev(mdt), "rename_stats", 0644,
&mdt_rename_stats_fops, mdt);
struct mdt_object *src,
struct mdt_object *tgt, long count)
{
- struct md_attr *ma = &info->mti_attr;
- struct rename_stats *rstats = &mdt->mdt_rename_stats;
- int rc;
+ struct md_attr *ma = &info->mti_attr;
+ struct rename_stats *rstats = &mdt->mdt_rename_stats;
+ int rc;
- ma->ma_need = MA_INODE;
- ma->ma_valid = 0;
- rc = mo_attr_get(info->mti_env, mdt_object_child(src), ma);
- if (rc) {
- CERROR("%s: "DFID" attr_get, rc = %d\n",
+ ma->ma_need = MA_INODE;
+ ma->ma_valid = 0;
+ rc = mo_attr_get(info->mti_env, mdt_object_child(src), ma);
+ if (rc) {
+ CERROR("%s: "DFID" attr_get, rc = %d\n",
mdt_obd_name(mdt), PFID(mdt_object_fid(src)), rc);
- return;
- }
+ return;
+ }
- if (src == tgt) {
+ if (src == tgt) {
mdt_counter_incr(req, LPROC_MDT_SAMEDIR_RENAME, count);
- lprocfs_oh_tally_log2(&rstats->hist[RENAME_SAMEDIR_SIZE],
- (unsigned int)ma->ma_attr.la_size);
- return;
- }
+ lprocfs_oh_tally_log2(&rstats->rs_hist[RENAME_SAMEDIR_SIZE],
+ (unsigned int)ma->ma_attr.la_size);
+ return;
+ }
mdt_counter_incr(req, LPROC_MDT_CROSSDIR_RENAME, count);
- lprocfs_oh_tally_log2(&rstats->hist[RENAME_CROSSDIR_SRC_SIZE],
- (unsigned int)ma->ma_attr.la_size);
-
- ma->ma_need = MA_INODE;
- ma->ma_valid = 0;
- rc = mo_attr_get(info->mti_env, mdt_object_child(tgt), ma);
- if (rc) {
- CERROR("%s: "DFID" attr_get, rc = %d\n",
+ lprocfs_oh_tally_log2(&rstats->rs_hist[RENAME_CROSSDIR_SRC_SIZE],
+ (unsigned int)ma->ma_attr.la_size);
+
+ ma->ma_need = MA_INODE;
+ ma->ma_valid = 0;
+ rc = mo_attr_get(info->mti_env, mdt_object_child(tgt), ma);
+ if (rc) {
+ CERROR("%s: "DFID" attr_get, rc = %d\n",
mdt_obd_name(mdt), PFID(mdt_object_fid(tgt)), rc);
- return;
- }
+ return;
+ }
- lprocfs_oh_tally_log2(&rstats->hist[RENAME_CROSSDIR_TGT_SIZE],
- (unsigned int)ma->ma_attr.la_size);
+ lprocfs_oh_tally_log2(&rstats->rs_hist[RENAME_CROSSDIR_TGT_SIZE],
+ (unsigned int)ma->ma_attr.la_size);
}
static ssize_t identity_expire_show(struct kobject *kobj,
struct seq_file *seq)
{
unsigned long mod_tot = 0, mod_cum;
- struct timespec64 now;
int i;
- ktime_get_real_ts64(&now);
-
spin_lock(&cli->cl_mod_rpcs_lock);
-
- seq_printf(seq, "snapshot_time: %llu.%9lu (secs.nsecs)\n",
- (s64)now.tv_sec, now.tv_nsec);
+ lprocfs_stats_header(seq, ktime_get(), cli->cl_mod_rpcs_init, 25,
+ ":", true);
seq_printf(seq, "modify_RPCs_in_flight: %hu\n",
cli->cl_mod_rpcs_in_flight);
mod_cum = 0;
for (i = 0; i < OBD_HIST_MAX; i++) {
unsigned long mod = cli->cl_mod_rpcs_hist.oh_buckets[i];
+
mod_cum += mod;
seq_printf(seq, "%d:\t\t%10lu %3u %3u\n",
i, mod, pct(mod, mod_tot),
struct list_head js_list; /* on ojs_list, with ojs_lock */
atomic_t js_refcount; /* num users of this struct */
char js_jobid[LUSTRE_JOBID_SIZE]; /* job name + NUL*/
- time64_t js_timestamp; /* seconds of most recent stat*/
+ ktime_t js_time_init; /* time of initial stat*/
+ ktime_t js_time_latest; /* time of most recent stat*/
struct lprocfs_stats *js_stats; /* per-job statistics */
struct obd_job_stats *js_jobstats; /* for accessing ojs_lock */
};
struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *data)
{
- time64_t oldest_time = *((time64_t *)data);
+ ktime_t oldest_time = *((ktime_t *)data);
struct job_stat *job;
job = hlist_entry(hnode, struct job_stat, js_hash);
- if (job->js_timestamp < oldest_time)
+ if (ktime_before(job->js_time_latest, oldest_time))
cfs_hash_bd_del_locked(hs, bd, hnode);
return 0;
*
* Since this function may be called frequently, do not scan all of the
* jobstats on each call, only twice per cleanup interval. That means stats
- * may be around on average cleanup_interval / 4 longer than necessary,
- * but that is not considered harmful.
+ * may be on average around cleanup_interval / 4 older than the cleanup
+ * interval, but that is not considered harmful.
*
- * If \a before is negative then this will force clean up all jobstats due
- * to the expiry time being in the future (e.g. at shutdown).
+ * The value stored in ojs_cleanup_interval is how often to perform a cleanup
+ * scan, and 1/2 of the maximum age of the individual statistics. This is
+ * done rather than dividing the interval by two each time, because it is
+ * much easier to do the division when the value is initially set (in seconds)
+ * rather than after it has been converted to ktime_t, and maybe a bit faster.
+ *
+ * If \a clear is true then this will force clean up all jobstats
+ * (e.g. at shutdown).
*
* If there is already another thread doing jobstats cleanup, don't try to
* do this again in the current thread unless this is a force cleanup.
*
* \param[in] stats stucture tracking all job stats for this device
- * \param[in] before expire jobstats updated more than this many seconds ago
+ * \param[in] clear clear all job stats if true
*/
-static void lprocfs_job_cleanup(struct obd_job_stats *stats, int before)
+static void lprocfs_job_cleanup(struct obd_job_stats *stats, bool clear)
{
- time64_t now = ktime_get_real_seconds();
- time64_t oldest;
-
- if (likely(before >= 0)) {
- unsigned int cleanup_interval = stats->ojs_cleanup_interval;
+ ktime_t cleanup_interval = stats->ojs_cleanup_interval;
+ ktime_t now = ktime_get();
+ ktime_t oldest;
- if (cleanup_interval == 0 || before == 0)
+ if (likely(!clear)) {
+ /* ojs_cleanup_interval of zero means never clean up stats */
+ if (ktime_to_ns(cleanup_interval) == 0)
return;
- if (now < stats->ojs_last_cleanup + cleanup_interval / 2)
+ if (ktime_before(now, ktime_add(stats->ojs_cleanup_last,
+ cleanup_interval)))
return;
if (stats->ojs_cleaning)
}
write_lock(&stats->ojs_lock);
- if (before >= 0 && stats->ojs_cleaning) {
+ if (!clear && stats->ojs_cleaning) {
write_unlock(&stats->ojs_lock);
return;
}
* Holding ojs_lock isn't necessary for safety of the hash iteration,
* since locking of the hash is handled internally, but there isn't
* any benefit to having multiple threads doing cleanup at one time.
+ *
+ * Subtract twice the cleanup_interval, since it is 1/2 the maximum age.
*/
- oldest = now - before;
+ oldest = ktime_sub(now, ktime_add(cleanup_interval, cleanup_interval));
cfs_hash_for_each_safe(stats->ojs_hash, job_cleanup_iter_callback,
&oldest);
write_lock(&stats->ojs_lock);
stats->ojs_cleaning = false;
- stats->ojs_last_cleanup = ktime_get_real_seconds();
+ stats->ojs_cleanup_last = ktime_get();
write_unlock(&stats->ojs_lock);
}
jobs->ojs_cntr_init_fn(job->js_stats, 0);
memcpy(job->js_jobid, jobid, sizeof(job->js_jobid));
- job->js_timestamp = ktime_get_real_seconds();
+ job->js_time_init = ktime_get();
+ job->js_time_latest = job->js_time_init;
job->js_jobstats = jobs;
INIT_HLIST_NODE(&job->js_hash);
INIT_LIST_HEAD(&job->js_list);
if (job)
goto found;
- lprocfs_job_cleanup(stats, stats->ojs_cleanup_interval);
+ lprocfs_job_cleanup(stats, false);
job = job_alloc(jobid, stats);
if (job == NULL)
found:
LASSERT(stats == job->js_jobstats);
- job->js_timestamp = ktime_get_real_seconds();
+ job->js_time_latest = ktime_get();
lprocfs_counter_add(job->js_stats, event, amount);
job_putref(job);
if (stats->ojs_hash == NULL)
return;
- lprocfs_job_cleanup(stats, -99);
+ lprocfs_job_cleanup(stats, true);
cfs_hash_putref(stats->ojs_hash);
stats->ojs_hash = NULL;
LASSERT(list_empty(&stats->ojs_list));
*
* job_stats:
* - job_id: dd.4854
- * snapshot_time: 1322494486
+ * snapshot_time: 1322494486.123456789
+ * start_time: 1322494476.012345678
+ * elapsed_time: 10.111111111
* open: { samples: 1, unit: reqs }
* close: { samples: 1, unit: reqs }
* mknod: { samples: 0, unit: reqs }
*
* job_stats:
* - job_id dd.4854
- * snapshot_time: 1322494602
+ * snapshot_time: 1322494602.123456789
+ * start_time: 1322494592.987654321
+ * elapsed_time: 9.135802468
* read: { samples: 0, unit: bytes, min: 0, max: 0, sum: 0 }
* write: { samples: 1, unit: bytes, min: 4096, max: 4096, sum: 4096 }
* setattr: { samples: 0, unit: reqs }
static int lprocfs_jobstats_seq_show(struct seq_file *p, void *v)
{
- struct job_stat *job = v;
- struct lprocfs_stats *s;
- struct lprocfs_counter ret;
- struct lprocfs_counter_header *cntr_header;
- int i;
+ struct job_stat *job = v;
+ struct lprocfs_stats *s;
+ struct lprocfs_counter ret;
+ struct lprocfs_counter_header *cntr_header;
+ int i;
if (v == SEQ_START_TOKEN) {
seq_printf(p, "job_stats:\n");
}
seq_putc(p, '\n');
- seq_printf(p, " %-16s %lld\n", "snapshot_time:", job->js_timestamp);
+ lprocfs_stats_header(p, job->js_time_latest, job->js_time_init, 16,
+ ":", true);
s = job->js_stats;
for (i = 0; i < s->ls_num; i++) {
jobid[len - 1] = 0;
if (strcmp(jobid, "clear") == 0) {
- lprocfs_job_cleanup(stats, -99);
+ lprocfs_job_cleanup(stats, true);
return len;
}
struct seq_file *seq = file->private_data;
struct obd_job_stats *stats = seq->private;
- lprocfs_job_cleanup(stats, stats->ojs_cleanup_interval);
+ lprocfs_job_cleanup(stats, false);
return lprocfs_seq_release(inode, file);
}
rwlock_init(&stats->ojs_lock);
stats->ojs_cntr_num = cntr_num;
stats->ojs_cntr_init_fn = init_fn;
- stats->ojs_cleanup_interval = 600; /* 10 mins by default */
- stats->ojs_last_cleanup = ktime_get_real_seconds();
+ /* Store 1/2 the actual interval, since we use that the most, and
+ * it is easier to work with.
+ */
+ stats->ojs_cleanup_interval = ktime_set(600 / 2, 0); /* default 10 min*/
+ stats->ojs_cleanup_last = ktime_get();
entry = lprocfs_add_simple(obd->obd_proc_entry, "job_stats", stats,
&lprocfs_jobstats_seq_fops);
struct obd_device *obd = container_of(kobj, struct obd_device,
obd_kset.kobj);
struct obd_job_stats *stats;
+ struct timespec64 ts;
stats = &obd->u.obt.obt_jobstats;
- return scnprintf(buf, PAGE_SIZE, "%d\n", stats->ojs_cleanup_interval);
+ ts = ktime_to_timespec64(stats->ojs_cleanup_interval);
+
+ return scnprintf(buf, PAGE_SIZE, "%lld\n", (long long)ts.tv_sec * 2);
}
EXPORT_SYMBOL(job_cleanup_interval_show);
if (rc)
return rc;
- stats->ojs_cleanup_interval = val;
- lprocfs_job_cleanup(stats, stats->ojs_cleanup_interval);
+ stats->ojs_cleanup_interval = ktime_set(val / 2, 0);
+ lprocfs_job_cleanup(stats, false);
+
return count;
}
EXPORT_SYMBOL(job_cleanup_interval_store);
return lprocfs_stats_seq_start(p, pos);
}
+void lprocfs_stats_header(struct seq_file *seq, ktime_t now, ktime_t ts_init,
+ int width, const char *colon, bool show_units)
+{
+ const char *units = show_units ? " secs.nsecs" : "";
+ struct timespec64 ts;
+
+ ts = ktime_to_timespec64(now);
+ seq_printf(seq, "%-*s%s %llu.%09lu%s\n", width,
+ "snapshot_time", colon, (s64)ts.tv_sec, ts.tv_nsec, units);
+ ts = ktime_to_timespec64(ts_init);
+ seq_printf(seq, "%-*s%s %llu.%09lu%s\n", width,
+ "start_time", colon, (s64)ts.tv_sec, ts.tv_nsec, units);
+ ts = ktime_to_timespec64(ktime_sub(now, ts_init));
+ seq_printf(seq, "%-*s%s %llu.%09lu%s\n", width,
+ "elapsed_time", colon, (s64)ts.tv_sec, ts.tv_nsec, units);
+}
+EXPORT_SYMBOL(lprocfs_stats_header);
+
/* seq file export of one lprocfs counter */
static int lprocfs_stats_seq_show(struct seq_file *p, void *v)
{
struct lprocfs_counter ctr;
int idx = *(loff_t *)v;
- if (idx == 0) {
- struct timespec64 now;
-
- ktime_get_real_ts64(&now);
- seq_printf(p, "%-25s %llu.%09lu secs.nsecs\n",
- "snapshot_time", (s64)now.tv_sec, now.tv_nsec);
- }
+ if (idx == 0)
+ lprocfs_stats_header(p, ktime_get(), stats->ls_init, 25, "", 1);
hdr = &stats->ls_cnt_header[idx];
lprocfs_stats_collect(stats, idx, &ctr);
static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
{
- struct timespec64 now;
struct obd_device *obd = seq->private;
struct client_obd *cli = &obd->u.cli;
unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
int i;
- ktime_get_real_ts64(&now);
-
spin_lock(&cli->cl_loi_list_lock);
- seq_printf(seq, "snapshot_time: %lld.%09lu (secs.nsecs)\n",
- (s64)now.tv_sec, now.tv_nsec);
+ lprocfs_stats_header(seq, ktime_get(), cli->cl_stats_init, 25, ":", 1);
seq_printf(seq, "read RPCs in flight: %d\n",
cli->cl_r_in_flight);
seq_printf(seq, "write RPCs in flight: %d\n",
static ssize_t osc_rpc_stats_seq_write(struct file *file,
const char __user *buf,
- size_t len, loff_t *off)
+ size_t len, loff_t *off)
{
struct seq_file *seq = file->private_data;
struct obd_device *obd = seq->private;
lprocfs_oh_clear(&cli->cl_write_page_hist);
lprocfs_oh_clear(&cli->cl_read_offset_hist);
lprocfs_oh_clear(&cli->cl_write_offset_hist);
+ cli->cl_stats_init = ktime_get();
return len;
}
static int osc_stats_seq_show(struct seq_file *seq, void *v)
{
- struct timespec64 now;
struct obd_device *obd = seq->private;
struct osc_stats *stats = &obd2osc_dev(obd)->od_stats;
- ktime_get_real_ts64(&now);
-
- seq_printf(seq, "snapshot_time: %lld.%09lu (secs.nsecs)\n",
- (s64)now.tv_sec, now.tv_nsec);
+ lprocfs_stats_header(seq, ktime_get(), stats->os_init, 25, ":", true);
seq_printf(seq, "lockless_write_bytes\t\t%llu\n",
stats->os_lockless_writes);
seq_printf(seq, "lockless_read_bytes\t\t%llu\n",
struct osc_stats *stats = &obd2osc_dev(obd)->od_stats;
memset(stats, 0, sizeof(*stats));
+ stats->os_init = ktime_get();
+
return len;
}
iobuf->dr_elapsed_valid = 0;
LASSERT(iobuf->dr_dev == d);
LASSERT(iobuf->dr_frags > 0);
- lprocfs_oh_tally(&d->od_brw_stats.hist[BRW_R_DIO_FRAGS+rw],
+ lprocfs_oh_tally(&d->od_brw_stats.bs_hist[BRW_R_DIO_FRAGS + rw],
iobuf->dr_frags);
- lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
+ lprocfs_oh_tally_log2(&d->od_brw_stats.bs_hist[BRW_R_IO_TIME+rw],
ktime_to_ms(iobuf->dr_elapsed));
}
}
static void record_start_io(struct osd_iobuf *iobuf, int size)
{
- struct osd_device *osd = iobuf->dr_dev;
- struct obd_histogram *h = osd->od_brw_stats.hist;
+ struct osd_device *osd = iobuf->dr_dev;
+ struct obd_histogram *h = osd->od_brw_stats.bs_hist;
iobuf->dr_frags++;
atomic_inc(&iobuf->dr_numreqs);
void osd_brw_stats_update(struct osd_device *osd, struct osd_iobuf *iobuf)
{
- struct brw_stats *s = &osd->od_brw_stats;
+ struct brw_stats *bs = &osd->od_brw_stats;
sector_t *last_block = NULL;
- struct page **pages = iobuf->dr_pages;
- struct page *last_page = NULL;
- unsigned long discont_pages = 0;
- unsigned long discont_blocks = 0;
+ struct page **pages = iobuf->dr_pages;
+ struct page *last_page = NULL;
+ unsigned long discont_pages = 0;
+ unsigned long discont_blocks = 0;
sector_t *blocks = iobuf->dr_blocks;
- int i, nr_pages = iobuf->dr_npages;
- int blocks_per_page;
- int rw = iobuf->dr_rw;
+ int i, nr_pages = iobuf->dr_npages;
+ int blocks_per_page;
+ int rw = iobuf->dr_rw;
- if (unlikely(nr_pages == 0))
- return;
+ if (unlikely(nr_pages == 0))
+ return;
blocks_per_page = PAGE_SIZE >> osd_sb(osd)->s_blocksize_bits;
- lprocfs_oh_tally_log2(&s->hist[BRW_R_PAGES+rw], nr_pages);
-
- while (nr_pages-- > 0) {
- if (last_page && (*pages)->index != (last_page->index + 1))
- discont_pages++;
- last_page = *pages;
- pages++;
- for (i = 0; i < blocks_per_page; i++) {
- if (last_block && *blocks != (*last_block + 1))
- discont_blocks++;
- last_block = blocks++;
- }
- }
+ lprocfs_oh_tally_log2(&bs->bs_hist[BRW_R_PAGES + rw], nr_pages);
+
+ while (nr_pages-- > 0) {
+ if (last_page && (*pages)->index != (last_page->index + 1))
+ discont_pages++;
+ last_page = *pages;
+ pages++;
+ for (i = 0; i < blocks_per_page; i++) {
+ if (last_block && *blocks != (*last_block + 1))
+ discont_blocks++;
+ last_block = blocks++;
+ }
+ }
- lprocfs_oh_tally(&s->hist[BRW_R_DISCONT_PAGES+rw], discont_pages);
- lprocfs_oh_tally(&s->hist[BRW_R_DISCONT_BLOCKS+rw], discont_blocks);
+ lprocfs_oh_tally(&bs->bs_hist[BRW_R_DISCONT_PAGES+rw], discont_pages);
+ lprocfs_oh_tally(&bs->bs_hist[BRW_R_DISCONT_BLOCKS+rw], discont_blocks);
}
static void display_brw_stats(struct seq_file *seq, char *name, char *units,
static void brw_stats_show(struct seq_file *seq, struct brw_stats *brw_stats)
{
- struct timespec64 now;
-
/* this sampling races with updates */
- ktime_get_real_ts64(&now);
+ lprocfs_stats_header(seq, ktime_get(), brw_stats->bs_init, 25, ":", 1);
- seq_printf(seq, "snapshot_time: %lld.%09ld (secs.nsecs)\n",
- (s64)now.tv_sec, now.tv_nsec);
+ display_brw_stats(seq, "pages per bulk r/w", "rpcs",
+ &brw_stats->bs_hist[BRW_R_PAGES],
+ &brw_stats->bs_hist[BRW_W_PAGES], 1);
- display_brw_stats(seq, "pages per bulk r/w", "rpcs",
- &brw_stats->hist[BRW_R_PAGES],
- &brw_stats->hist[BRW_W_PAGES], 1);
+ display_brw_stats(seq, "discontiguous pages", "rpcs",
+ &brw_stats->bs_hist[BRW_R_DISCONT_PAGES],
+ &brw_stats->bs_hist[BRW_W_DISCONT_PAGES], 0);
- display_brw_stats(seq, "discontiguous pages", "rpcs",
- &brw_stats->hist[BRW_R_DISCONT_PAGES],
- &brw_stats->hist[BRW_W_DISCONT_PAGES], 0);
+ display_brw_stats(seq, "discontiguous blocks", "rpcs",
+ &brw_stats->bs_hist[BRW_R_DISCONT_BLOCKS],
+ &brw_stats->bs_hist[BRW_W_DISCONT_BLOCKS], 0);
- display_brw_stats(seq, "discontiguous blocks", "rpcs",
- &brw_stats->hist[BRW_R_DISCONT_BLOCKS],
- &brw_stats->hist[BRW_W_DISCONT_BLOCKS], 0);
+ display_brw_stats(seq, "disk fragmented I/Os", "ios",
+ &brw_stats->bs_hist[BRW_R_DIO_FRAGS],
+ &brw_stats->bs_hist[BRW_W_DIO_FRAGS], 0);
- display_brw_stats(seq, "disk fragmented I/Os", "ios",
- &brw_stats->hist[BRW_R_DIO_FRAGS],
- &brw_stats->hist[BRW_W_DIO_FRAGS], 0);
-
- display_brw_stats(seq, "disk I/Os in flight", "ios",
- &brw_stats->hist[BRW_R_RPC_HIST],
- &brw_stats->hist[BRW_W_RPC_HIST], 0);
+ display_brw_stats(seq, "disk I/Os in flight", "ios",
+ &brw_stats->bs_hist[BRW_R_RPC_HIST],
+ &brw_stats->bs_hist[BRW_W_RPC_HIST], 0);
display_brw_stats(seq, "I/O time (1/1000s)", "ios",
- &brw_stats->hist[BRW_R_IO_TIME],
- &brw_stats->hist[BRW_W_IO_TIME], 1);
+ &brw_stats->bs_hist[BRW_R_IO_TIME],
+ &brw_stats->bs_hist[BRW_W_IO_TIME], 1);
- display_brw_stats(seq, "disk I/O size", "ios",
- &brw_stats->hist[BRW_R_DISK_IOSIZE],
- &brw_stats->hist[BRW_W_DISK_IOSIZE], 1);
+ display_brw_stats(seq, "disk I/O size", "ios",
+ &brw_stats->bs_hist[BRW_R_DISK_IOSIZE],
+ &brw_stats->bs_hist[BRW_W_DISK_IOSIZE], 1);
}
static int osd_brw_stats_seq_show(struct seq_file *seq, void *v)
size_t len, loff_t *off)
{
struct seq_file *seq = file->private_data;
- struct osd_device *osd = seq->private;
- int i;
+ struct osd_device *osd = seq->private;
+ int i;
- for (i = 0; i < BRW_LAST; i++)
- lprocfs_oh_clear(&osd->od_brw_stats.hist[i]);
+ for (i = 0; i < BRW_LAST; i++)
+ lprocfs_oh_clear(&osd->od_brw_stats.bs_hist[i]);
+ osd->od_brw_stats.bs_init = ktime_get();
- return len;
+ return len;
}
LPROC_SEQ_FOPS(osd_brw_stats);
static int osd_stats_init(struct osd_device *osd)
{
- int i, result;
- ENTRY;
+ int i, result;
+
+ ENTRY;
+ osd->od_brw_stats.bs_init = ktime_get();
- for (i = 0; i < BRW_LAST; i++)
- spin_lock_init(&osd->od_brw_stats.hist[i].oh_lock);
+ for (i = 0; i < BRW_LAST; i++)
+ spin_lock_init(&osd->od_brw_stats.bs_hist[i].oh_lock);
osd->od_stats = lprocfs_alloc_stats(LPROC_OSD_LAST, 0);
if (osd->od_stats != NULL) {
static void record_start_io(struct osd_device *osd, int rw, int discont_pages)
{
- struct obd_histogram *h = osd->od_brw_stats.hist;
+ struct obd_histogram *h = osd->od_brw_stats.bs_hist;
if (rw == READ) {
atomic_inc(&osd->od_r_in_flight);
lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
atomic_read(&osd->od_r_in_flight));
lprocfs_oh_tally(&h[BRW_R_DISCONT_PAGES], discont_pages);
-
} else {
atomic_inc(&osd->od_w_in_flight);
lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
atomic_read(&osd->od_w_in_flight));
lprocfs_oh_tally(&h[BRW_W_DISCONT_PAGES], discont_pages);
-
}
}
static void record_end_io(struct osd_device *osd, int rw,
unsigned long elapsed, int disksize, int npages)
{
- struct obd_histogram *h = osd->od_brw_stats.hist;
+ struct obd_histogram *h = osd->od_brw_stats.bs_hist;
if (rw == READ)
atomic_dec(&osd->od_r_in_flight);
static void brw_stats_show(struct seq_file *seq, struct brw_stats *brw_stats)
{
- struct timespec64 now;
-
/* this sampling races with updates */
- ktime_get_real_ts64(&now);
- seq_printf(seq, "snapshot_time: %llu.%09lu (secs.nsecs)\n",
- (s64)now.tv_sec, now.tv_nsec);
+ lprocfs_stats_header(seq, ktime_get(), brw_stats->bs_init, 25, ":", 1);
display_brw_stats(seq, "pages per bulk r/w", "rpcs",
- &brw_stats->hist[BRW_R_PAGES],
- &brw_stats->hist[BRW_W_PAGES], 1);
+ &brw_stats->bs_hist[BRW_R_PAGES],
+ &brw_stats->bs_hist[BRW_W_PAGES], 1);
+
display_brw_stats(seq, "discontiguous pages", "rpcs",
- &brw_stats->hist[BRW_R_DISCONT_PAGES],
- &brw_stats->hist[BRW_W_DISCONT_PAGES], 0);
+ &brw_stats->bs_hist[BRW_R_DISCONT_PAGES],
+ &brw_stats->bs_hist[BRW_W_DISCONT_PAGES], 0);
#if 0
display_brw_stats(seq, "discontiguous blocks", "rpcs",
- &brw_stats->hist[BRW_R_DISCONT_BLOCKS],
- &brw_stats->hist[BRW_W_DISCONT_BLOCKS], 0);
+ &brw_stats->bs_hist[BRW_R_DISCONT_BLOCKS],
+ &brw_stats->bs_hist[BRW_W_DISCONT_BLOCKS], 0);
display_brw_stats(seq, "disk fragmented I/Os", "ios",
- &brw_stats->hist[BRW_R_DIO_FRAGS],
- &brw_stats->hist[BRW_W_DIO_FRAGS], 0);
+ &brw_stats->bs_hist[BRW_R_DIO_FRAGS],
+ &brw_stats->bs_hist[BRW_W_DIO_FRAGS], 0);
#endif
display_brw_stats(seq, "disk I/Os in flight", "ios",
- &brw_stats->hist[BRW_R_RPC_HIST],
- &brw_stats->hist[BRW_W_RPC_HIST], 0);
+ &brw_stats->bs_hist[BRW_R_RPC_HIST],
+ &brw_stats->bs_hist[BRW_W_RPC_HIST], 0);
display_brw_stats(seq, "I/O time (1/1000s)", "ios",
- &brw_stats->hist[BRW_R_IO_TIME],
- &brw_stats->hist[BRW_W_IO_TIME], 1);
+ &brw_stats->bs_hist[BRW_R_IO_TIME],
+ &brw_stats->bs_hist[BRW_W_IO_TIME], 1);
display_brw_stats(seq, "disk I/O size", "ios",
- &brw_stats->hist[BRW_R_DISK_IOSIZE],
- &brw_stats->hist[BRW_W_DISK_IOSIZE], 1);
+ &brw_stats->bs_hist[BRW_R_DISK_IOSIZE],
+ &brw_stats->bs_hist[BRW_W_DISK_IOSIZE], 1);
}
static int osd_brw_stats_seq_show(struct seq_file *seq, void *v)
int i;
for (i = 0; i < BRW_LAST; i++)
- lprocfs_oh_clear(&osd->od_brw_stats.hist[i]);
+ lprocfs_oh_clear(&osd->od_brw_stats.bs_hist[i]);
return len;
}
ENTRY;
for (i = 0; i < BRW_LAST; i++)
- spin_lock_init(&osd->od_brw_stats.hist[i].oh_lock);
+ spin_lock_init(&osd->od_brw_stats.bs_hist[i].oh_lock);
osd->od_stats = lprocfs_alloc_stats(LPROC_OSD_LAST, 0);
if (osd->od_stats != NULL) {
if (defined($prevcount)) {
$last{$name} = $prevcount;
}
- if ($name =~ /^$timestamp$/) {
+ if ($name =~ /^$timestamp/) {
$cur{$name} = $iodata[1];
} elsif ($name =~ /^$read_bytes$/) {
if (defined($cur{"read_ops"})) {
$prevcount = $cumulhash->{$name};
if (defined($prevcount)) {
$diff = $cumulcount - $prevcount;
- if ($name eq "snapshot_time") {
+ if ($name =~ /^snapshot_time/) {
$tdiff = int($diff);
&print_headings($cumulcount);
$| = 1;